30 #include <linux/signal.h>
32 #include <linux/module.h>
34 #include <linux/types.h>
41 #define MLOG_MASK_PREFIX ML_DLMFS
50 static inline int user_check_wait_flag(
struct user_lock_res *lockres,
55 spin_lock(&lockres->
l_lock);
57 spin_unlock(&lockres->
l_lock);
62 static inline void user_wait_on_busy_lock(
struct user_lock_res *lockres)
69 static inline void user_wait_on_blocked_lock(
struct user_lock_res *lockres)
78 cluster_connection_from_user_lockres(
struct user_lock_res *lockres)
89 user_dlm_inode_from_user_lockres(
struct user_lock_res *lockres)
99 static inline void user_recover_from_dlm_error(
struct user_lock_res *lockres)
101 spin_lock(&lockres->
l_lock);
103 spin_unlock(&lockres->
l_lock);
106 #define user_log_dlm_error(_func, _stat, _lockres) do { \
107 mlog(ML_ERROR, "Dlm error %d while calling %s on " \
108 "resource %.*s\n", _stat, _func, \
109 _lockres->l_namelen, _lockres->l_name); \
115 static inline int user_highest_compat_lock_level(
int level)
131 mlog(
ML_BASTS,
"AST fired for lockres %.*s, level %d => %d\n",
135 spin_lock(&lockres->
l_lock);
139 mlog(
ML_ERROR,
"lksb status value of %u on lockres %.*s\n",
141 spin_unlock(&lockres->
l_lock);
146 "Lockres %.*s, requested ivmode. flags 0x%x\n",
152 user_highest_compat_lock_level(lockres->
l_blocking)) {
163 spin_unlock(&lockres->
l_lock);
168 static inline void user_dlm_grab_inode_ref(
struct user_lock_res *lockres)
171 inode = user_dlm_inode_from_user_lockres(lockres);
178 static void __user_dlm_queue_lockres(
struct user_lock_res *lockres)
181 user_dlm_grab_inode_ref(lockres);
190 static void __user_dlm_cond_queue_lockres(
struct user_lock_res *lockres)
211 __user_dlm_queue_lockres(lockres);
218 mlog(
ML_BASTS,
"BAST fired for lockres %.*s, blocking %d, level %d\n",
221 spin_lock(&lockres->
l_lock);
226 __user_dlm_queue_lockres(lockres);
227 spin_unlock(&lockres->
l_lock);
232 static void user_unlock_ast(
struct ocfs2_dlm_lksb *lksb,
int status)
236 mlog(
ML_BASTS,
"UNLOCK AST fired for lockres %.*s, flags 0x%x\n",
242 spin_lock(&lockres->
l_lock);
266 __user_dlm_queue_lockres(lockres);
271 spin_unlock(&lockres->
l_lock);
286 .lp_lock_ast = user_ast,
287 .lp_blocking_ast = user_bast,
288 .lp_unlock_ast = user_unlock_ast,
291 static inline void user_dlm_drop_inode_ref(
struct user_lock_res *lockres)
294 inode = user_dlm_inode_from_user_lockres(lockres);
304 cluster_connection_from_user_lockres(lockres);
308 spin_lock(&lockres->
l_lock);
311 "Lockres %.*s, flags 0x%x\n",
326 spin_unlock(&lockres->
l_lock);
333 spin_unlock(&lockres->
l_lock);
341 spin_unlock(&lockres->
l_lock);
346 spin_unlock(&lockres->
l_lock);
360 spin_unlock(&lockres->
l_lock);
369 spin_unlock(&lockres->
l_lock);
377 new_level = user_highest_compat_lock_level(lockres->
l_blocking);
382 spin_unlock(&lockres->
l_lock);
391 user_recover_from_dlm_error(lockres);
395 user_dlm_drop_inode_ref(lockres);
398 static inline void user_dlm_inc_holders(
struct user_lock_res *lockres,
417 user_may_continue_on_blocked_lock(
struct user_lock_res *lockres,
422 return wanted <= user_highest_compat_lock_level(lockres->
l_blocking);
431 cluster_connection_from_user_lockres(lockres);
441 mlog(
ML_BASTS,
"lockres %.*s, level %d, flags = 0x%x\n",
450 spin_lock(&lockres->
l_lock);
459 spin_unlock(&lockres->
l_lock);
461 user_wait_on_busy_lock(lockres);
466 (!user_may_continue_on_blocked_lock(lockres, level))) {
469 spin_unlock(&lockres->
l_lock);
471 user_wait_on_blocked_lock(lockres);
475 if (level > lockres->
l_level) {
482 spin_unlock(&lockres->
l_lock);
489 local_flags, lockres->
l_name,
496 user_recover_from_dlm_error(lockres);
500 user_wait_on_busy_lock(lockres);
504 user_dlm_inc_holders(lockres, level);
505 spin_unlock(&lockres->
l_lock);
512 static inline void user_dlm_dec_holders(
struct user_lock_res *lockres,
539 spin_lock(&lockres->
l_lock);
540 user_dlm_dec_holders(lockres, level);
541 __user_dlm_cond_queue_lockres(lockres);
542 spin_unlock(&lockres->
l_lock);
554 spin_lock(&lockres->
l_lock);
560 spin_unlock(&lockres->
l_lock);
573 spin_lock(&lockres->
l_lock);
582 spin_unlock(&lockres->
l_lock);
589 memset(lockres, 0,
sizeof(*lockres));
610 cluster_connection_from_user_lockres(lockres);
614 spin_lock(&lockres->
l_lock);
616 spin_unlock(&lockres->
l_lock);
623 spin_unlock(&lockres->
l_lock);
625 user_wait_on_busy_lock(lockres);
627 spin_lock(&lockres->
l_lock);
631 spin_unlock(&lockres->
l_lock);
637 spin_unlock(&lockres->
l_lock);
643 spin_unlock(&lockres->
l_lock);
651 user_wait_on_busy_lock(lockres);
658 static void user_dlm_recovery_handler_noop(
int node_num,
677 user_dlm_recovery_handler_noop,
682 return rc ? ERR_PTR(rc) : conn;