Skip to content

Commit a5f28ae

Browse files
committed
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2: ocfs2/cluster: Make o2net connect messages KERN_NOTICE ocfs2/dlm: Fix printing of lockname ocfs2: Fix contiguousness check in ocfs2_try_to_merge_extent_map() ocfs2/dlm: Remove BUG_ON in dlm recovery when freeing locks of a dead node ocfs2: Plugs race between the dc thread and an unlock ast message ocfs2: Remove overzealous BUG_ON during blocked lock processing ocfs2: Do not downconvert if the lock level is already compatible ocfs2: Prevent a livelock in dlmglue ocfs2: Fix setting of OCFS2_LOCK_BLOCKED during bast ocfs2: Use compat_ptr in reflink_arguments. ocfs2/dlm: Handle EAGAIN for compatibility - v2 ocfs2: Add parenthesis to wrap the check for O_DIRECT. ocfs2: Only bug out when page size is larger than cluster size. ocfs2: Fix memory overflow in cow_by_page. ocfs2/dlm: Print more messages during lock migration ocfs2/dlm: Ignore LVBs of locks in the Blocked list ocfs2/trivial: Remove trailing whitespaces ocfs2: fix a misleading variable name ocfs2: Sync max_inline_data_with_xattr from tools. ocfs2: Fix refcnt leak on ocfs2_fast_follow_link() error path
2 parents 8defcaa + 6efd806 commit a5f28ae

28 files changed

+284
-129
lines changed

fs/ocfs2/aops.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -599,7 +599,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
599599
return ret;
600600
}
601601

602-
/*
602+
/*
603603
* ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
604604
* particularly interested in the aio/dio case. Like the core uses
605605
* i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
670670

671671
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
672672
inode->i_sb->s_bdev, iov, offset,
673-
nr_segs,
673+
nr_segs,
674674
ocfs2_direct_IO_get_blocks,
675675
ocfs2_dio_end_io);
676676

fs/ocfs2/buffer_head_io.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
368368
}
369369
ocfs2_metadata_cache_io_unlock(ci);
370370

371-
mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
371+
mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
372372
(unsigned long long)block, nr,
373373
((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
374374
flags);

fs/ocfs2/cluster/heartbeat.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
7878

7979
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
8080

81-
/* Only sets a new threshold if there are no active regions.
81+
/* Only sets a new threshold if there are no active regions.
8282
*
8383
* No locking or otherwise interesting code is required for reading
8484
* o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
170170

171171
mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
172172
"milliseconds\n", reg->hr_dev_name,
173-
jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
173+
jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
174174
o2quo_disk_timeout();
175175
}
176176

@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
624624
"seq %llu last %llu changed %u equal %u\n",
625625
slot->ds_node_num, (long long)slot->ds_last_generation,
626626
le32_to_cpu(hb_block->hb_cksum),
627-
(unsigned long long)le64_to_cpu(hb_block->hb_seq),
627+
(unsigned long long)le64_to_cpu(hb_block->hb_seq),
628628
(unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
629629
slot->ds_equal_samples);
630630

fs/ocfs2/cluster/tcp.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -485,15 +485,15 @@ static void o2net_set_nn_state(struct o2net_node *nn,
485485
}
486486

487487
if (was_valid && !valid) {
488-
printk(KERN_INFO "o2net: no longer connected to "
488+
printk(KERN_NOTICE "o2net: no longer connected to "
489489
SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
490490
o2net_complete_nodes_nsw(nn);
491491
}
492492

493493
if (!was_valid && valid) {
494494
o2quo_conn_up(o2net_num_from_nn(nn));
495495
cancel_delayed_work(&nn->nn_connect_expired);
496-
printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n",
496+
printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
497497
o2nm_this_node() > sc->sc_node->nd_num ?
498498
"connected to" : "accepted connection from",
499499
SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
930930
cond_resched();
931931
continue;
932932
}
933-
mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
933+
mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
934934
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
935935
o2net_ensure_shutdown(nn, sc, 0);
936936
break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
14761476

14771477
do_gettimeofday(&now);
14781478

1479-
printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
1479+
printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
14801480
"seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
14811481
o2net_idle_timeout() / 1000,
14821482
o2net_idle_timeout() % 1000);
14831483
mlog(ML_NOTICE, "here are some times that might help debug the "
14841484
"situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
14851485
"%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
1486-
sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
1486+
sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
14871487
now.tv_sec, (long) now.tv_usec,
14881488
sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
14891489
sc->sc_tv_advance_start.tv_sec,

fs/ocfs2/cluster/tcp_internal.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,10 @@
3232
* on their number */
3333
#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
3434

35-
/*
35+
/*
3636
* This version number represents quite a lot, unfortunately. It not
3737
* only represents the raw network message protocol on the wire but also
38-
* locking semantics of the file system using the protocol. It should
38+
* locking semantics of the file system using the protocol. It should
3939
* be somewhere else, I'm sure, but right now it isn't.
4040
*
4141
* With version 11, we separate out the filesystem locking portion. The

fs/ocfs2/dlm/dlmapi.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
9595
mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
9696
} while (0)
9797

98-
#define DLM_LKSB_UNUSED1 0x01
98+
#define DLM_LKSB_UNUSED1 0x01
9999
#define DLM_LKSB_PUT_LVB 0x02
100100
#define DLM_LKSB_GET_LVB 0x04
101101
#define DLM_LKSB_UNUSED2 0x08

fs/ocfs2/dlm/dlmast.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
123123
dlm_lock_put(lock);
124124
/* free up the reserved bast that we are cancelling.
125125
* guaranteed that this will not be the last reserved
126-
* ast because *both* an ast and a bast were reserved
126+
* ast because *both* an ast and a bast were reserved
127127
* to get to this point. the res->spinlock will not be
128128
* taken here */
129129
dlm_lockres_release_ast(dlm, res);

fs/ocfs2/dlm/dlmconvert.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
396396
/* instead of logging the same network error over
397397
* and over, sleep here and wait for the heartbeat
398398
* to notice the node is dead. times out after 5s. */
399-
dlm_wait_for_node_death(dlm, res->owner,
399+
dlm_wait_for_node_death(dlm, res->owner,
400400
DLM_NODE_DEATH_WAIT_MAX);
401401
ret = DLM_RECOVERING;
402402
mlog(0, "node %u died so returning DLM_RECOVERING "

fs/ocfs2/dlm/dlmdebug.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
102102
assert_spin_locked(&res->spinlock);
103103

104104
stringify_lockname(res->lockname.name, res->lockname.len,
105-
buf, sizeof(buf) - 1);
105+
buf, sizeof(buf));
106106
printk("lockres: %s, owner=%u, state=%u\n",
107107
buf, res->owner, res->state);
108108
printk(" last used: %lu, refcnt: %u, on purge list: %s\n",

fs/ocfs2/dlm/dlmdomain.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
816816
}
817817

818818
/* Once the dlm ctxt is marked as leaving then we don't want
819-
* to be put in someone's domain map.
819+
* to be put in someone's domain map.
820820
* Also, explicitly disallow joining at certain troublesome
821821
* times (ie. during recovery). */
822822
if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {

fs/ocfs2/dlm/dlmlock.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
269269
}
270270
dlm_revert_pending_lock(res, lock);
271271
dlm_lock_put(lock);
272-
} else if (dlm_is_recovery_lock(res->lockname.name,
272+
} else if (dlm_is_recovery_lock(res->lockname.name,
273273
res->lockname.len)) {
274274
/* special case for the $RECOVERY lock.
275275
* there will never be an AST delivered to put

fs/ocfs2/dlm/dlmmaster.c

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
366366
struct dlm_master_list_entry *mle;
367367

368368
assert_spin_locked(&dlm->spinlock);
369-
369+
370370
list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
371371
if (node_up)
372372
dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
833833
__dlm_insert_mle(dlm, mle);
834834

835835
/* still holding the dlm spinlock, check the recovery map
836-
* to see if there are any nodes that still need to be
836+
* to see if there are any nodes that still need to be
837837
* considered. these will not appear in the mle nodemap
838838
* but they might own this lockres. wait on them. */
839839
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
883883
msleep(500);
884884
}
885885
continue;
886-
}
886+
}
887887

888888
dlm_kick_recovery_thread(dlm);
889889
msleep(1000);
@@ -939,8 +939,8 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
939939
res->lockname.name, blocked);
940940
if (++tries > 20) {
941941
mlog(ML_ERROR, "%s:%.*s: spinning on "
942-
"dlm_wait_for_lock_mastery, blocked=%d\n",
943-
dlm->name, res->lockname.len,
942+
"dlm_wait_for_lock_mastery, blocked=%d\n",
943+
dlm->name, res->lockname.len,
944944
res->lockname.name, blocked);
945945
dlm_print_one_lock_resource(res);
946946
dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
10291029
ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
10301030
b = (mle->type == DLM_MLE_BLOCK);
10311031
if ((*blocked && !b) || (!*blocked && b)) {
1032-
mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1032+
mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
10331033
dlm->name, res->lockname.len, res->lockname.name,
10341034
*blocked, b);
10351035
*blocked = b;
@@ -1602,7 +1602,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
16021602
}
16031603
mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
16041604
dlm->node_num, res->lockname.len, res->lockname.name);
1605-
ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1605+
ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
16061606
DLM_ASSERT_MASTER_MLE_CLEANUP);
16071607
if (ret < 0) {
16081608
mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ static int dlm_do_assert_master(struct dlm_ctxt *dlm,
17011701

17021702
if (r & DLM_ASSERT_RESPONSE_REASSERT) {
17031703
mlog(0, "%.*s: node %u create mles on other "
1704-
"nodes and requests a re-assert\n",
1704+
"nodes and requests a re-assert\n",
17051705
namelen, lockname, to);
17061706
reassert = 1;
17071707
}
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
18121812
spin_unlock(&dlm->master_lock);
18131813
spin_unlock(&dlm->spinlock);
18141814
goto done;
1815-
}
1815+
}
18161816
}
18171817
}
18181818
spin_unlock(&dlm->master_lock);
@@ -1883,15 +1883,15 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
18831883
int extra_ref = 0;
18841884
int nn = -1;
18851885
int rr, err = 0;
1886-
1886+
18871887
spin_lock(&mle->spinlock);
18881888
if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
18891889
extra_ref = 1;
18901890
else {
18911891
/* MASTER mle: if any bits set in the response map
18921892
* then the calling node needs to re-assert to clear
18931893
* up nodes that this node contacted */
1894-
while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1894+
while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
18951895
nn+1)) < O2NM_MAX_NODES) {
18961896
if (nn != dlm->node_num && nn != assert->node_idx)
18971897
master_request = 1;
@@ -2002,7 +2002,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
20022002
__dlm_print_one_lock_resource(res);
20032003
spin_unlock(&res->spinlock);
20042004
spin_unlock(&dlm->spinlock);
2005-
*ret_data = (void *)res;
2005+
*ret_data = (void *)res;
20062006
dlm_put(dlm);
20072007
return -EINVAL;
20082008
}
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
20402040
item->u.am.request_from = request_from;
20412041
item->u.am.flags = flags;
20422042

2043-
if (ignore_higher)
2044-
mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2043+
if (ignore_higher)
2044+
mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
20452045
res->lockname.name);
2046-
2046+
20472047
spin_lock(&dlm->work_lock);
20482048
list_add_tail(&item->list, &dlm->work_list);
20492049
spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
21332133
* think that $RECOVERY is currently mastered by a dead node. If so,
21342134
* we wait a short time to allow that node to get notified by its own
21352135
* heartbeat stack, then check again. All $RECOVERY lock resources
2136-
* mastered by dead nodes are purged when the hearbeat callback is
2136+
* mastered by dead nodes are purged when the hearbeat callback is
21372137
* fired, so we can know for sure that it is safe to continue once
21382138
* the node returns a live node or no node. */
21392139
static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
21742174
ret = -EAGAIN;
21752175
}
21762176
spin_unlock(&dlm->spinlock);
2177-
mlog(0, "%s: reco lock master is %u\n", dlm->name,
2177+
mlog(0, "%s: reco lock master is %u\n", dlm->name,
21782178
master);
21792179
break;
21802180
}
@@ -2602,7 +2602,7 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
26022602

26032603
mlog(0, "%s:%.*s: timed out during migration\n",
26042604
dlm->name, res->lockname.len, res->lockname.name);
2605-
/* avoid hang during shutdown when migrating lockres
2605+
/* avoid hang during shutdown when migrating lockres
26062606
* to a node which also goes down */
26072607
if (dlm_is_node_dead(dlm, target)) {
26082608
mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
27382738
can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
27392739
spin_unlock(&res->spinlock);
27402740

2741-
/* target has died, so make the caller break out of the
2741+
/* target has died, so make the caller break out of the
27422742
* wait_event, but caller must recheck the domain_map */
27432743
spin_lock(&dlm->spinlock);
27442744
if (!test_bit(mig_target, dlm->domain_map))

0 commit comments

Comments
 (0)