28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
37 #include <linux/socket.h>
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
65 static int dlm_do_assert_master(
struct dlm_ctxt *
dlm,
70 static inline int dlm_mle_equal(
struct dlm_ctxt *
dlm,
89 static void dlm_mle_release(
struct kref *
kref);
95 unsigned int namelen);
98 static int dlm_find_mle(
struct dlm_ctxt *dlm,
100 char *name,
unsigned int namelen);
106 static int dlm_wait_for_lock_mastery(
struct dlm_ctxt *dlm,
110 static int dlm_restart_lock_mastery(
struct dlm_ctxt *dlm,
114 static int dlm_add_migration_mle(
struct dlm_ctxt *dlm,
118 const char *name,
unsigned int namelen,
119 u8 new_master,
u8 master);
121 static u8 dlm_pick_migration_target(
struct dlm_ctxt *dlm,
123 static void dlm_remove_nonlocal_locks(
struct dlm_ctxt *dlm,
125 static int dlm_mark_lockres_migrating(
struct dlm_ctxt *dlm,
128 static int dlm_pre_master_reco_lockres(
struct dlm_ctxt *dlm,
178 static inline void __dlm_mle_attach_hb_events(
struct dlm_ctxt *dlm,
187 static inline void __dlm_mle_detach_hb_events(
struct dlm_ctxt *dlm,
195 static inline void dlm_mle_detach_hb_events(
struct dlm_ctxt *dlm,
199 __dlm_mle_detach_hb_events(dlm, mle);
243 kref_put(&mle->
mle_refs, dlm_mle_release);
270 unsigned int namelen)
316 __dlm_mle_attach_hb_events(dlm, mle);
334 bucket = dlm_master_hash(dlm, mle->
mnamehash);
339 static int dlm_find_mle(
struct dlm_ctxt *dlm,
341 char *name,
unsigned int namelen)
351 bucket = dlm_master_hash(dlm, hash);
355 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
372 dlm_mle_node_up(dlm, mle,
NULL, idx);
374 dlm_mle_node_down(dlm, mle,
NULL, idx);
378 static void dlm_mle_node_down(
struct dlm_ctxt *dlm,
385 mlog(0,
"node %u already removed from nodemap!\n", idx);
392 static void dlm_mle_node_up(
struct dlm_ctxt *dlm,
399 mlog(0,
"node %u already in node map!\n", idx);
413 if (dlm_mle_cache ==
NULL)
424 static void dlm_mle_release(
struct kref *
kref)
442 __dlm_mle_detach_hb_events(dlm, mle);
461 if (!dlm_lockres_cache)
467 if (!dlm_lockname_cache)
478 if (dlm_lockname_cache)
481 if (dlm_lockres_cache)
485 static void dlm_lockres_release(
struct kref *
kref)
504 mlog(
ML_ERROR,
"Resource %.*s not on the Tracking list\n",
516 !list_empty(&res->
dirty) ||
518 !list_empty(&res->
purge)) {
520 "Going to BUG for resource %.*s."
521 " We're on a list! [%c%c%c%c%c%c%c]\n",
523 !hlist_unhashed(&res->
hash_node) ?
'H' :
' ',
524 !list_empty(&res->
granted) ?
'G' :
' ',
526 !list_empty(&res->
blocked) ?
'B' :
' ',
527 !list_empty(&res->
dirty) ?
'D' :
' ',
529 !list_empty(&res->
purge) ?
'P' :
' ');
551 kref_put(&res->
refs, dlm_lockres_release);
554 static void dlm_init_lockres(
struct dlm_ctxt *dlm,
556 const char *name,
unsigned int namelen)
564 qname = (
char *) res->
lockname.name;
565 memcpy(qname, name, namelen);
576 INIT_LIST_HEAD(&res->
dirty);
578 INIT_LIST_HEAD(&res->
purge);
586 kref_init(&res->
refs);
610 unsigned int namelen)
614 res = kmem_cache_zalloc(dlm_lockres_cache,
GFP_NOFS);
622 dlm_init_lockres(dlm, res, name, namelen);
639 mlog(0,
"res %.*s, set node %u, %ps()\n", res->
lockname.len,
640 res->
lockname.name, bit, __builtin_return_address(0));
650 mlog(0,
"res %.*s, clr node %u, %ps()\n", res->
lockname.len,
651 res->
lockname.name, bit, __builtin_return_address(0));
664 mlog(0,
"%s: res %.*s, inflight++: now %u, %ps()\n", dlm->
name,
666 __builtin_return_address(0));
678 mlog(0,
"%s: res %.*s, inflight--: now %u, %ps()\n", dlm->
name,
680 __builtin_return_address(0));
714 int bit, wait_on_recovery = 0;
720 mlog(0,
"get lockres %s (len %d)\n", lockid, namelen);
730 __dlm_wait_on_lockres(tmpres);
761 mlog(0,
"allocating a new resource\n");
772 mlog(0,
"no lockres found, allocated our own: %p\n", res);
777 spin_lock(&res->spinlock);
778 dlm_change_lockres_owner(dlm, res, dlm->
node_num);
781 spin_unlock(&res->spinlock);
791 blocked = dlm_find_mle(dlm, &mle, (
char *)lockid, namelen);
811 mlog(0,
"%s:%.*s: late on %s\n",
812 dlm->
name, namelen, lockid,
813 mig ?
"MIGRATION" :
"BLOCK");
819 dlm_mle_detach_hb_events(dlm, mle);
843 mlog(0,
"%s: res %.*s, At least one node (%d) "
844 "to recover before lock mastery can begin\n",
845 dlm->
name, namelen, (
char *)lockid, bit);
846 wait_on_recovery = 1;
859 spin_lock(&res->spinlock);
861 spin_unlock(&res->spinlock);
867 dlm_get_mle_inuse(mle);
872 while (wait_on_recovery) {
876 if (dlm_is_recovery_lock(lockid, namelen)) {
877 mlog(0,
"%s: Recovery map is not empty, but must "
878 "master $RECOVERY lock now\n", dlm->
name);
879 if (!dlm_pre_master_reco_lockres(dlm, res))
880 wait_on_recovery = 0;
882 mlog(0,
"%s: waiting 500ms for heartbeat state "
883 "change\n", dlm->
name);
896 mlog(0,
"%s: res %.*s, At least one node (%d) "
897 "to recover before lock mastery can begin\n",
898 dlm->
name, namelen, (
char *)lockid, bit);
899 wait_on_recovery = 1;
901 wait_on_recovery = 0;
904 if (wait_on_recovery)
913 dlm_node_iter_init(mle->
vote_map, &iter);
914 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
915 ret = dlm_do_master_request(res, mle, nodenum);
920 if (mle->
master <= nodenum)
926 mlog(0,
"%s: res %.*s, Requests only up to %u but "
927 "master is %u, keep going\n", dlm->
name, namelen,
928 lockid, nodenum, mle->
master);
934 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
936 wait_on_recovery = 1;
937 mlog(0,
"%s: res %.*s, Node map changed, redo the master "
938 "request now, blocked=%d\n", dlm->
name, res->lockname.len,
939 res->lockname.name, blocked);
942 "dlm_wait_for_lock_mastery, blocked = %d\n",
943 dlm->
name, res->lockname.len,
944 res->lockname.name, blocked);
952 mlog(0,
"%s: res %.*s, Mastered by %u\n", dlm->
name, res->lockname.len,
953 res->lockname.name, res->owner);
958 dlm_mle_detach_hb_events(dlm, mle);
961 dlm_put_mle_inuse(mle);
964 spin_lock(&res->spinlock);
966 spin_unlock(&res->spinlock);
978 #define DLM_MASTERY_TIMEOUT_MS 5000
980 static int dlm_wait_for_lock_mastery(
struct dlm_ctxt *dlm,
987 int map_changed, voting_done;
997 mlog(0,
"%s:%.*s: owner is suddenly %u\n", dlm->
name,
1003 ret = dlm_do_master_request(res, mle, res->
owner);
1026 mlog(0,
"%s: %.*s: node map changed, restarting\n",
1028 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1030 if ((*blocked && !b) || (!*blocked &&
b)) {
1031 mlog(0,
"%s:%.*s: status change: old=%d new=%d\n",
1041 mlog(0,
"%s:%.*s: restart lock mastery succeeded, "
1047 mlog(0,
"map not changed and voting not done "
1060 if (voting_done && !*blocked) {
1094 mlog(0,
"%s:%.*s: waiting again\n", dlm->
name,
1098 mlog(0,
"done waiting, master is %u\n", res->
owner);
1106 mlog(0,
"about to master %.*s here, this=%u\n",
1108 ret = dlm_do_assert_master(dlm, res, mle->
vote_map, 0);
1126 dlm_change_lockres_owner(dlm, res, m);
1149 unsigned long *orig_bm,
1150 unsigned long *cur_bm)
1152 unsigned long p1, p2;
1162 iter->
diff_bm[
i] = (p1 & ~p2) | (p2 & ~p1);
1192 static int dlm_restart_lock_mastery(
struct dlm_ctxt *dlm,
1202 mlog(0,
"something happened such that the "
1203 "master process may need to be restarted!\n");
1208 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1217 mlog(0,
"sending request to new node\n");
1229 if (node == lowest) {
1230 mlog(0,
"expected master %u died"
1231 " while this node was blocked "
1232 "waiting on it!\n", node);
1237 mlog(0,
"%s:%.*s:still "
1238 "blocked. waiting on %u "
1252 mlog(0,
"%s:%.*s: no "
1253 "longer blocking. try to "
1254 "master this here\n",
1276 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1309 sizeof(
request), to, &response);
1311 if (ret == -
ESRCH) {
1315 }
else if (ret == -
EINVAL) {
1318 }
else if (ret == -
ENOMEM) {
1320 "network message! retrying\n");
1342 mlog(0,
"node %u is the master, response=YES\n", to);
1343 mlog(0,
"%s:%.*s: master node %u now knows I have a "
1349 mlog(0,
"node %u not master, response=NO\n", to);
1353 mlog(0,
"node %u not master, response=MAYBE\n", to);
1358 mlog(0,
"node %u hit an error, resending\n", to);
1395 unsigned int namelen, hash;
1398 int dispatch_assert = 0;
1408 name = request->
name;
1428 mlog(0,
"returning DLM_MASTER_RESP_ERROR since res is "
1429 "being recovered/migrated\n");
1449 dispatch_assert = 1;
1471 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1483 mlog(0,
"node %u is master, but trying to migrate to "
1487 "node is trying to migrate it to %u?!\n",
1501 dispatch_assert = 1;
1519 dlm_put_mle(tmpmle);
1532 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1560 mlog(
ML_ERROR,
"no lockres, but an mle with this node as master!\n");
1566 mlog(0,
"migration mle was found (%u->%u)\n",
1581 dlm_put_mle(tmpmle);
1590 if (dispatch_assert) {
1594 mlog(
ML_ERROR,
"bad lockres while trying to assert!\n");
1597 mlog(0,
"%u is the owner of %.*s, cleaning everyone else\n",
1602 mlog(
ML_ERROR,
"failed to dispatch assert master work\n");
1625 static int dlm_do_assert_master(
struct dlm_ctxt *dlm,
1634 const char *lockname = res->
lockname.name;
1635 unsigned int namelen = res->
lockname.len;
1647 dlm_node_iter_init(nodemap, &iter);
1648 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1652 mlog(0,
"sending assert master to %d (%.*s)\n", to,
1663 mlog(
ML_ERROR,
"Error %d when sending message %u (key "
1664 "0x%x) to node %u\n", tmpret,
1671 mlog(0,
"link to %d went down!\n", to);
1678 "got %d.\n", namelen, lockname, to, r);
1681 if (dlm_find_mle(dlm, &mle, (
char *)lockname,
1694 "master MLE but no lockres on %u\n",
1695 namelen, lockname, to);
1699 mlog(0,
"%.*s: node %u create mles on other "
1700 "nodes and requests a re-assert\n",
1701 namelen, lockname, to);
1704 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1705 mlog(0,
"%.*s: node %u has a reference to this "
1706 "lockres, set the bit in the refmap\n",
1707 namelen, lockname, to);
1742 unsigned int namelen, hash;
1744 int master_request = 0, have_lockres_ref = 0;
1750 name = assert->
name;
1763 mlog(0,
"assert_master with flags: %u\n", flags);
1767 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1769 mlog(0,
"just got an assert_master from %u, but no "
1770 "MLE for it! (%.*s)\n", assert->
node_idx,
1777 mlog(0,
"no bits set in the maybe_map, but %u "
1778 "is asserting! (%.*s)\n", assert->
node_idx,
1780 }
else if (bit != assert->
node_idx) {
1782 mlog(0,
"master %u was found, %u should "
1783 "back off\n", assert->
node_idx, bit);
1789 mlog(0,
"%u is the lowest node, "
1790 "%u is asserting. (%.*s) %u must "
1791 "have begun after %u won.\n", bit,
1792 assert->
node_idx, namelen, name, bit,
1798 mlog(0,
"%s:%.*s: got cleanup assert"
1799 " from %u for migration\n",
1800 dlm->
name, namelen, name,
1803 mlog(0,
"%s:%.*s: got unrelated assert"
1804 " from %u for migration, ignoring\n",
1805 dlm->
name, namelen, name,
1823 "RECOVERING!\n", assert->
node_idx, namelen, name);
1830 "but current owner is %u! (%.*s)\n",
1840 mlog(0,
"owner %u re-asserting on "
1846 "node %u, but %u is the owner! "
1848 res->
owner, namelen, name);
1853 "with no owner should be "
1854 "in-progress! (%.*s)\n",
1863 "new master is %u, and old master "
1866 mle->
master, namelen, name);
1904 mlog(0,
"finishing off migration of lockres %.*s, "
1910 dlm_change_lockres_owner(dlm, res, mle->
new_master);
1913 dlm_change_lockres_owner(dlm, res, mle->
master);
1916 have_lockres_ref = 1;
1927 if (mle->
inuse > 0) {
1928 if (extra_ref && rr < 3)
1930 else if (!extra_ref && rr < 2)
1933 if (extra_ref && rr < 2)
1935 else if (!extra_ref && rr < 1)
1940 "that will mess up this node, refs=%d, extra=%d, "
1941 "inuse=%d\n", dlm->
name, namelen, name,
1946 __dlm_mle_detach_hb_events(dlm, mle);
1958 mlog(0,
"assert_master from %u, but current "
1959 "owner is %u (%.*s), no mle\n", assert->
node_idx,
1960 res->
owner, namelen, name);
1971 *ret_data = (
void *)res;
1974 if (master_request) {
1975 mlog(0,
"need to tell master to reassert\n");
1978 if (!have_lockres_ref) {
1980 "mle present here for %s:%.*s, but no lockres!\n",
1984 if (have_lockres_ref) {
1987 mlog(0,
"%s:%.*s: got assert from %u, need a ref\n",
1994 mlog(
ML_ERROR,
"Bad message received from another node. Dumping state "
1995 "and killing the other node now! This node is OK and can continue.\n");
1999 *ret_data = (
void *)res;
2020 int ignore_higher,
u8 request_from,
u32 flags)
2023 item = kzalloc(
sizeof(*item),
GFP_NOFS);
2030 dlm_init_work_item(dlm, item, dlm_assert_master_worker,
NULL);
2031 item->
u.
am.lockres =
res;
2033 item->
u.
am.ignore_higher = ignore_higher;
2034 item->
u.
am.request_from = request_from;
2061 res = item->
u.
am.lockres;
2062 ignore_higher = item->
u.
am.ignore_higher;
2063 request_from = item->
u.
am.request_from;
2064 flags = item->
u.
am.flags;
2071 if (ignore_higher) {
2094 mlog(0,
"Someone asked us to assert mastery, but we're "
2095 "in the middle of migration. Skipping assert, "
2096 "the new master will handle that.\n");
2105 mlog(0,
"worker about to master %.*s here, this=%u\n",
2107 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2120 mlog(0,
"finished with dlm_assert_master_worker\n");
2133 static int dlm_pre_master_reco_lockres(
struct dlm_ctxt *dlm,
2145 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2164 "node %u go down yet, and thinks the "
2165 "dead node is mastering the recovery "
2166 "lock. must wait.\n", dlm->
name,
2171 mlog(0,
"%s: reco lock master is %u\n", dlm->
name,
2187 const char *lockname;
2194 memset(&deref, 0,
sizeof(deref));
2200 &deref,
sizeof(deref), res->
owner, &r);
2202 mlog(
ML_ERROR,
"%s: res %.*s, error %d send DEREF to node %u\n",
2203 dlm->
name, namelen, lockname, ret, res->
owner);
2206 mlog(
ML_ERROR,
"%s: res %.*s, DEREF to node %u got %d\n",
2207 dlm->
name, namelen, lockname, res->
owner, r);
2221 unsigned int namelen;
2252 dlm->
name, namelen, name);
2274 "but it is already dropped!\n", dlm->
name,
2282 item = kzalloc(
sizeof(*item),
GFP_NOFS);
2289 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker,
NULL);
2290 item->
u.
dl.deref_res =
res;
2308 static void dlm_deref_lockres_worker(
struct dlm_work_item *item,
void *data)
2316 res = item->
u.
dl.deref_res;
2317 node = item->
u.
dl.deref_node;
2329 mlog(0,
"%s:%.*s node %u ref dropped in dispatch\n",
2334 "but it is already dropped!\n", dlm->
name,
2349 static int dlm_is_lockres_migrateable(
struct dlm_ctxt *dlm,
2353 int nonlocal = 0, node_ref;
2364 queue = dlm_list_idx_to_ptr(res, idx);
2371 mlog(0,
"%s: Not migrateable res %.*s, lock %u:%llu on "
2374 dlm_get_lock_cookie_node(cookie),
2375 dlm_get_lock_cookie_seq(cookie),
2376 dlm_list_in_text(idx));
2398 static int dlm_migrate_lockres(
struct dlm_ctxt *dlm,
2418 mlog(0,
"%s: Migrating %.*s to node %u\n", dlm->
name, namelen, name,
2442 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2448 mlog(0,
"another process is already migrating it\n");
2457 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2459 "the target went down.\n", res->
lockname.len,
2471 dlm_mle_detach_hb_events(dlm, oldmle);
2472 dlm_put_mle(oldmle);
2477 dlm_mle_detach_hb_events(dlm, mle);
2503 dlm_get_mle_inuse(mle);
2515 mlog(0,
"migration to node %u failed with %d\n",
2518 dlm_mle_detach_hb_events(dlm, mle);
2520 dlm_put_mle_inuse(mle);
2550 res->
owner == target)
2553 mlog(0,
"%s:%.*s: timed out during migration\n",
2558 mlog(0,
"%s:%.*s: expected migration "
2559 "target %u is no longer up, restarting\n",
2564 dlm_mle_detach_hb_events(dlm, mle);
2566 dlm_put_mle_inuse(mle);
2574 mlog(0,
"%s:%.*s: caught signal during migration\n",
2580 dlm_set_lockres_owner(dlm, res, target);
2582 dlm_remove_nonlocal_locks(dlm, res);
2587 dlm_mle_detach_hb_events(dlm, mle);
2588 dlm_put_mle_inuse(mle);
2608 mlog(0,
"%s: Migrating %.*s to %u, returns %d\n", dlm->
name, namelen,
2613 #define DLM_MIGRATION_RETRY_MS 100
2628 int lock_dropped = 0;
2634 if (dlm_is_lockres_migrateable(dlm, res))
2635 target = dlm_pick_migration_target(dlm, res);
2644 ret = dlm_migrate_lockres(dlm, res, target);
2646 mlog(0,
"%s: res %.*s, Migrate to node %u failed with %d\n",
2651 return lock_dropped;
2665 static int dlm_migration_can_proceed(
struct dlm_ctxt *dlm,
2683 static int dlm_lockres_is_dirty(
struct dlm_ctxt *dlm,
2694 static int dlm_mark_lockres_migrating(
struct dlm_ctxt *dlm,
2700 mlog(0,
"dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2725 mlog(0,
"about to wait on migration_wq, dirty=%s\n",
2732 dlm_migration_can_proceed(dlm, res, target),
2735 mlog(0,
"woken again: migrating? %s, dead? %s\n",
2739 mlog(0,
"all is well: migrating? %s, dead? %s\n",
2743 if (!dlm_migration_can_proceed(dlm, res, target)) {
2744 mlog(0,
"trying again...\n");
2752 mlog(
ML_ERROR,
"aha. migration target %u just went down\n",
2784 static void dlm_remove_nonlocal_locks(
struct dlm_ctxt *dlm,
2795 for (i=0; i<3; i++) {
2798 mlog(0,
"putting lock for node %u\n",
2807 list_del_init(&lock->
list);
2824 mlog(0,
"%s:%.*s: node %u had a ref to this "
2825 "migrating lockres, clearing\n", dlm->
name,
2838 static u8 dlm_pick_migration_target(
struct dlm_ctxt *dlm,
2852 queue = dlm_list_idx_to_ptr(res, idx);
2858 nodenum = lock->
ml.node;
2884 static int dlm_do_migrate_request(
struct dlm_ctxt *dlm,
2886 u8 master,
u8 new_master,
2893 memset(&migrate, 0,
sizeof(migrate));
2894 migrate.namelen = res->
lockname.len;
2902 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2903 if (nodenum == master ||
2904 nodenum == new_master)
2917 &migrate,
sizeof(migrate), nodenum,
2921 "MIGRATE_REQUEST to node %u\n", dlm->
name,
2922 migrate.namelen, migrate.name, ret, nodenum);
2929 }
else if (status < 0) {
2930 mlog(0,
"migrate request (node %u) returned %d!\n",
2937 mlog(0,
"%s:%.*s: need ref for node %u\n",
2949 mlog(0,
"returning ret=%d\n", ret);
2969 unsigned int namelen, hash;
2975 name = migrate->
name;
2998 "lockres is marked as recovering!");
3009 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3020 dlm_mle_detach_hb_events(dlm, oldmle);
3021 dlm_put_mle(oldmle);
3038 static int dlm_add_migration_mle(
struct dlm_ctxt *dlm,
3042 const char *name,
unsigned int namelen,
3043 u8 new_master,
u8 master)
3054 found = dlm_find_mle(dlm, oldmle, (
char *)name, namelen);
3061 mlog(0,
"tried to migrate %.*s, but some "
3062 "process beat me to it\n",
3068 "master=%u new_master=%u // request: "
3069 "master=%u new_master=%u // "
3083 __dlm_mle_detach_hb_events(dlm, tmp);
3085 mlog(0,
"%s:%.*s: master=%u, newmaster=%u, "
3086 "telling master to get ref for cleared out mle "
3087 "during migration\n", dlm->
name, namelen, name,
3088 master, new_master);
3128 __dlm_mle_detach_hb_events(dlm, mle);
3139 static void dlm_clean_migration_mle(
struct dlm_ctxt *dlm,
3142 __dlm_mle_detach_hb_events(dlm, mle);
3152 static void dlm_clean_block_mle(
struct dlm_ctxt *dlm,
3161 if (bit != dead_node) {
3162 mlog(0,
"mle found, but dead node %u would not have been "
3163 "master\n", dead_node);
3170 mlog(0,
"node %u was expected master\n", dead_node);
3176 __dlm_mle_detach_hb_events(dlm, mle);
3189 mlog(0,
"dlm=%s, dead node=%u\n", dlm->
name, dead_node);
3196 bucket = dlm_master_hash(dlm, i);
3215 dlm_clean_block_mle(dlm, mle, dead_node);
3230 if (mle->
master != dead_node &&
3236 dlm_clean_migration_mle(dlm, mle);
3238 mlog(0,
"%s: node %u died during migration from "
3239 "%u to %u!\n", dlm->
name, dead_node, mle->
master,
3247 res = dlm_reset_mleres_owner(dlm, mle);
3278 mlog(0,
"now time to do a migrate request to other nodes\n");
3279 ret = dlm_do_migrate_request(dlm, res, old_master,
3286 mlog(0,
"doing assert master of %.*s to all except the original node\n",
3290 ret = dlm_do_assert_master(dlm, res, iter.
node_map,
3300 mlog(0,
"doing assert master of %.*s back to %u\n",
3302 ret = dlm_do_assert_master(dlm, res, iter.
node_map,
3305 mlog(0,
"assert master to original master failed "
3314 dlm_set_lockres_owner(dlm, res, dlm->
node_num);
3396 bucket = dlm_master_hash(dlm, i);
3408 __dlm_mle_detach_hb_events(dlm, mle);