28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
34 #include <linux/sysctl.h>
35 #include <linux/random.h>
37 #include <linux/socket.h>
52 #define MLOG_MASK_PREFIX ML_DLM
58 static u64 dlm_next_cookie = 1;
63 static void dlm_init_lock(
struct dlm_lock *newlock,
int type,
65 static void dlm_lock_release(
struct kref *
kref);
66 static void dlm_lock_detach_lockres(
struct dlm_lock *lock);
73 if (dlm_lock_cache ==
NULL)
100 if (!dlm_lock_compatible(tmplock->
ml.type, lock->
ml.type))
107 if (!dlm_lock_compatible(tmplock->
ml.type, lock->
ml.type))
109 if (!dlm_lock_compatible(tmplock->
ml.convert_type,
128 int call_ast = 0, kick_thread = 0;
131 mlog(0,
"type=%d\n", lock->
ml.type);
136 status = __dlm_lockres_state_to_status(res);
144 __dlm_wait_on_lockres(res);
147 if (dlm_can_grant_new_lock(res, lock)) {
148 mlog(0,
"I can grant this lock right away\n");
160 if (!dlm_is_recovery_lock(res->
lockname.name,
165 mlog(0,
"%s: returning DLM_NORMAL to "
166 "node %u for reco lock\n", dlm->
name,
174 if (dlm_is_recovery_lock(res->
lockname.name,
176 mlog(0,
"%s: returning NOTQUEUED to "
177 "node %u for reco lock\n", dlm->
name,
207 list_del_init(&lock->
list);
224 int lockres_changed = 1;
226 mlog(0,
"type=%d, lockres %.*s, flags = 0x%x\n",
235 __dlm_wait_on_lockres(res);
250 status = dlm_send_remote_lock_request(dlm, res, lock, flags);
257 dlm_is_recovery_lock(res->
lockname.name,
262 mlog(0,
"%s: recovery lock was owned by "
263 "dead node %u, remaster it now.\n",
277 }
else if (dlm_is_recovery_lock(res->
lockname.name,
283 mlog(0,
"%s: $RECOVERY lock for this node (%u) is "
284 "mastered by %u; got lock, manually granting (no ast)\n",
310 int tmpret, status = 0;
315 create.requested_type = lock->
ml.type;
327 "owned by node %u. That node is coming back up "
334 mlog(
ML_ERROR,
"%s: res %.*s, Error %d send CREATE LOCK to "
340 ret = dlm_err_to_dlm_status(tmpret);
353 kref_put(&lock->
lock_refs, dlm_lock_release);
356 static void dlm_lock_release(
struct kref *
kref)
368 dlm_lock_detach_lockres(lock);
371 mlog(0,
"freeing kernel-allocated lksb\n");
381 dlm_lockres_get(res);
386 static void dlm_lock_detach_lockres(
struct dlm_lock *lock)
393 mlog(0,
"removing lock's lockres reference\n");
398 static void dlm_init_lock(
struct dlm_lock *newlock,
int type,
401 INIT_LIST_HEAD(&newlock->
list);
409 newlock->
ml.pad1 = 0;
410 newlock->
ml.list = 0;
411 newlock->
ml.flags = 0;
431 int kernel_allocated = 0;
433 lock = kmem_cache_zalloc(dlm_lock_cache,
GFP_NOFS);
439 lksb = kzalloc(
sizeof(*lksb),
GFP_NOFS);
444 kernel_allocated = 1;
447 dlm_init_lock(lock, type, node, cookie);
448 if (kernel_allocated)
483 mlog(
ML_ERROR,
"Domain %s not fully joined, but node %u is "
484 "sending a create_lock message for lock %.*s!\n",
505 lksb = newlock->lksb;
509 mlog(0,
"set DLM_LKSB_GET_LVB flag\n");
520 status = __dlm_lockres_state_to_status(res);
524 mlog(0,
"lockres recovering/migrating/in-progress\n");
546 static inline void dlm_get_next_cookie(
u8 node_num,
u64 *
cookie)
548 u64 tmpnode = node_num;
553 spin_lock(&dlm_cookie_lock);
554 *cookie = (dlm_next_cookie | tmpnode);
555 if (++dlm_next_cookie & 0xff00000000000000ull) {
556 mlog(0,
"This node's cookie will now wrap!\n");
559 spin_unlock(&dlm_cookie_lock);
595 (!dlm_is_recovery_lock(name, namelen) ||
convert) ) {
621 dlm_lockres_get(res);
628 if (lock->
lksb != lksb || lock->
ast != ast ||
632 "astdata=%p\n", lksb, ast, bast, data);
634 "astdata=%p\n", lock->
lksb, lock->
ast,
651 mlog(0,
"retrying convert with migration/recovery/"
672 dlm_get_next_cookie(dlm->
node_num, &tmpcookie);
690 mlog(0,
"type=%d, flags = 0x%x\n", mode, flags);
691 mlog(0,
"creating lock: lock=%p res=%p\n", lock, res);
700 mlog(0,
"LKM_VALBLK passed by caller\n");
705 flags &= ~LKM_VALBLK;
713 status = dlmlock_master(dlm, res, lock, flags);
715 status = dlmlock_remote(dlm, res, lock, flags);
752 if (lock && !convert)