26 #include <linux/types.h>
27 #include <linux/slab.h>
34 #include <linux/time.h>
37 #define MLOG_MASK_PREFIX ML_DLM_GLUE
66 #ifdef CONFIG_OCFS2_FS_STATS
99 static int ocfs2_check_meta_downconvert(
struct ocfs2_lock_res *lockres,
103 static int ocfs2_data_convert_worker(
struct ocfs2_lock_res *lockres,
106 static int ocfs2_dentry_convert_worker(
struct ocfs2_lock_res *lockres,
109 static void ocfs2_dentry_post_unlock(
struct ocfs2_super *osb,
114 static int ocfs2_check_refcount_downconvert(
struct ocfs2_lock_res *lockres,
116 static int ocfs2_refcount_convert_worker(
struct ocfs2_lock_res *lockres,
119 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
122 static void ocfs2_dump_meta_lvb_info(
u64 level,
123 const char *
function,
129 mlog(level,
"LVB information for %s (called from %s:%u):\n",
130 lockres->
l_name,
function, line);
131 mlog(level,
"version: %u, clusters: %u, generation: 0x%x\n",
134 mlog(level,
"size: %llu, uid %u, gid %u, mode 0x%x\n",
138 mlog(level,
"nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
227 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
233 #define LOCK_TYPE_USES_LVB 0x2
236 .get_osb = ocfs2_get_inode_osb,
241 .get_osb = ocfs2_get_inode_osb,
242 .check_downconvert = ocfs2_check_meta_downconvert,
243 .set_lvb = ocfs2_set_meta_lvb,
244 .downconvert_worker = ocfs2_data_convert_worker,
265 .get_osb = ocfs2_get_dentry_osb,
266 .post_unlock = ocfs2_dentry_post_unlock,
267 .downconvert_worker = ocfs2_dentry_convert_worker,
272 .get_osb = ocfs2_get_inode_osb,
277 .get_osb = ocfs2_get_file_osb,
282 .set_lvb = ocfs2_set_qinfo_lvb,
283 .get_osb = ocfs2_get_qinfo_osb,
288 .check_downconvert = ocfs2_check_refcount_downconvert,
289 .downconvert_worker = ocfs2_refcount_convert_worker,
293 static inline int ocfs2_is_inode_lock(
struct ocfs2_lock_res *lockres)
307 BUG_ON(!ocfs2_is_inode_lock(lockres));
334 if (lockres->
l_ops->get_osb)
335 return lockres->
l_ops->get_osb(lockres);
340 static int ocfs2_lock_create(
struct ocfs2_super *osb,
344 static inline int ocfs2_may_continue_on_blocked_lock(
struct ocfs2_lock_res *lockres,
346 static void __ocfs2_cluster_unlock(
struct ocfs2_super *osb,
348 int level,
unsigned long caller_ip);
349 static inline void ocfs2_cluster_unlock(
struct ocfs2_super *osb,
353 __ocfs2_cluster_unlock(osb, lockres, level,
_RET_IP_);
356 static inline void ocfs2_generic_handle_downconvert_action(
struct ocfs2_lock_res *lockres);
357 static inline void ocfs2_generic_handle_convert_action(
struct ocfs2_lock_res *lockres);
358 static inline void ocfs2_generic_handle_attach_action(
struct ocfs2_lock_res *lockres);
360 static void ocfs2_schedule_blocked_lock(
struct ocfs2_super *osb,
362 static inline void ocfs2_recover_from_dlm_error(
struct ocfs2_lock_res *lockres,
364 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
365 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
366 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
367 _err, _func, _lockres->l_name); \
369 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
370 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
371 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
373 static int ocfs2_downconvert_thread(
void *
arg);
374 static void ocfs2_downconvert_on_unlock(
struct ocfs2_super *osb,
376 static int ocfs2_inode_lock_update(
struct inode *
inode,
377 struct buffer_head **bh);
378 static void ocfs2_drop_osb_locks(
struct ocfs2_super *osb);
379 static inline int ocfs2_highest_compat_lock_level(
int level);
380 static unsigned int ocfs2_prepare_downconvert(
struct ocfs2_lock_res *lockres,
382 static int ocfs2_downconvert_lock(
struct ocfs2_super *osb,
387 static int ocfs2_prepare_cancel_convert(
struct ocfs2_super *osb,
389 static int ocfs2_cancel_convert(
struct ocfs2_super *osb,
404 (
long long)blkno, generation);
408 mlog(0,
"built lock resource with name: %s\n", name);
416 mlog(0,
"Add tracking for lockres %s\n", res->
l_name);
418 spin_lock(&ocfs2_dlm_tracking_lock);
420 spin_unlock(&ocfs2_dlm_tracking_lock);
425 spin_lock(&ocfs2_dlm_tracking_lock);
428 spin_unlock(&ocfs2_dlm_tracking_lock);
431 #ifdef CONFIG_OCFS2_FS_STATS
434 res->l_lock_refresh = 0;
435 memset(&res->l_lock_prmode, 0,
sizeof(
struct ocfs2_lock_stats));
436 memset(&res->l_lock_exmode, 0,
sizeof(
struct ocfs2_lock_stats));
444 struct ocfs2_lock_stats *
stats;
447 stats = &res->l_lock_prmode;
449 stats = &res->l_lock_exmode;
453 kt = ktime_sub(
ktime_get(), mw->mw_lock_start);
454 usec = ktime_to_us(kt);
457 stats->ls_total += ktime_to_ns(kt);
459 if (
unlikely(stats->ls_gets == 0)) {
461 stats->ls_total = ktime_to_ns(kt);
464 if (stats->ls_max < usec)
465 stats->ls_max = usec;
471 static inline void ocfs2_track_lock_refresh(
struct ocfs2_lock_res *lockres)
473 lockres->l_lock_refresh++;
481 static inline void ocfs2_init_lock_stats(
struct ocfs2_lock_res *res)
484 static inline void ocfs2_update_lock_stats(
struct ocfs2_lock_res *res,
488 static inline void ocfs2_track_lock_refresh(
struct ocfs2_lock_res *lockres)
496 static void ocfs2_lock_res_init_common(
struct ocfs2_super *osb,
516 ocfs2_init_lock_stats(res);
517 #ifdef CONFIG_DEBUG_LOCK_ALLOC
522 res->l_lockdep_map.key =
NULL;
538 unsigned int generation,
545 ops = &ocfs2_inode_rw_lops;
548 ops = &ocfs2_inode_inode_lops;
551 ops = &ocfs2_inode_open_lops;
559 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
561 ocfs2_lock_res_init_common(
OCFS2_SB(inode->
i_sb), res, type, ops, inode);
566 struct inode *
inode = ocfs2_lock_res_inode(lockres);
589 memcpy(&inode_blkno_be, &lockres->
l_name[OCFS2_DENTRY_LOCK_INO_START],
606 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
637 ocfs2_lock_res_init_common(
OCFS2_SB(inode->
i_sb), lockres,
651 &ocfs2_super_lops, osb);
654 static void ocfs2_rename_lock_res_init(
struct ocfs2_lock_res *res,
662 &ocfs2_rename_lops, osb);
665 static void ocfs2_nfs_sync_lock_res_init(
struct ocfs2_lock_res *res,
673 &ocfs2_nfs_sync_lops, osb);
676 static void ocfs2_orphan_scan_lock_res_init(
struct ocfs2_lock_res *res,
682 &ocfs2_orphan_scan_lops, osb);
694 ocfs2_lock_res_init_common(
OCFS2_SB(inode->
i_sb), lockres,
706 ocfs2_lock_res_init_common(
OCFS2_SB(info->
dqi_gi.dqi_sb), lockres,
713 unsigned int generation)
717 generation, lockres->
l_name);
719 &ocfs2_refcount_block_lops, osb);
727 ocfs2_remove_lockres_tracking(res);
730 "Lockres %s is on the blocked list\n",
733 "Lockres %s has mask waiters pending\n",
736 "Lockres %s is locked\n",
739 "Lockres %s has %u ro holders\n",
742 "Lockres %s has %u ex holders\n",
751 static inline void ocfs2_inc_holders(
struct ocfs2_lock_res *lockres,
768 static inline void ocfs2_dec_holders(
struct ocfs2_lock_res *lockres,
790 static inline int ocfs2_highest_compat_lock_level(
int level)
802 unsigned long newflags)
819 static void lockres_or_flags(
struct ocfs2_lock_res *lockres,
unsigned long or)
821 lockres_set_flags(lockres, lockres->
l_flags | or);
826 lockres_set_flags(lockres, lockres->
l_flags & ~clear);
829 static inline void ocfs2_generic_handle_downconvert_action(
struct ocfs2_lock_res *lockres)
838 ocfs2_highest_compat_lock_level(lockres->
l_blocking)) {
845 static inline void ocfs2_generic_handle_convert_action(
struct ocfs2_lock_res *lockres)
870 static inline void ocfs2_generic_handle_attach_action(
struct ocfs2_lock_res *lockres)
885 static int ocfs2_generic_handle_bast(
struct ocfs2_lock_res *lockres,
888 int needs_downconvert = 0;
897 if (ocfs2_highest_compat_lock_level(level) <
898 ocfs2_highest_compat_lock_level(lockres->
l_blocking))
899 needs_downconvert = 1;
904 mlog(
ML_BASTS,
"lockres %s, block %d, level %d, l_block %d, dwn %d\n",
908 if (needs_downconvert)
910 mlog(0,
"needs_downconvert = %d\n", needs_downconvert);
911 return needs_downconvert;
973 static void __lockres_clear_pending(
struct ocfs2_lock_res *lockres,
974 unsigned int generation,
1000 unsigned int generation,
1003 unsigned long flags;
1006 __lockres_clear_pending(lockres, generation, osb);
1007 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1010 static unsigned int lockres_set_pending(
struct ocfs2_lock_res *lockres)
1020 static void ocfs2_blocking_ast(
struct ocfs2_dlm_lksb *lksb,
int level)
1023 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1024 int needs_downconvert;
1025 unsigned long flags;
1029 mlog(
ML_BASTS,
"BAST fired for lockres %s, blocking %d, level %d, "
1031 ocfs2_lock_type_string(lockres->
l_type));
1041 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1042 if (needs_downconvert)
1043 ocfs2_schedule_blocked_lock(osb, lockres);
1044 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1054 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1055 unsigned long flags;
1068 mlog(
ML_ERROR,
"lockres %s: lksb status value of %d!\n",
1069 lockres->
l_name, status);
1070 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1074 mlog(
ML_BASTS,
"AST fired for lockres %s, action %d, unlock %d, "
1080 ocfs2_generic_handle_attach_action(lockres);
1084 ocfs2_generic_handle_convert_action(lockres);
1087 ocfs2_generic_handle_downconvert_action(lockres);
1090 mlog(
ML_ERROR,
"lockres %s: AST fired with invalid action: %u, "
1091 "flags 0x%lx, unlock: %u\n",
1111 __lockres_clear_pending(lockres, lockres->
l_pending_gen, osb);
1114 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1120 unsigned long flags;
1122 mlog(
ML_BASTS,
"UNLOCK AST fired for lockres %s, action = %d\n",
1128 "unlock_action %d\n", error, lockres->
l_name,
1130 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1136 mlog(0,
"Cancel convert success for %s\n", lockres->
l_name);
1153 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1184 .lp_lock_ast = ocfs2_locking_ast,
1185 .lp_blocking_ast = ocfs2_blocking_ast,
1186 .lp_unlock_ast = ocfs2_unlock_ast,
1194 static inline void ocfs2_recover_from_dlm_error(
struct ocfs2_lock_res *lockres,
1197 unsigned long flags;
1206 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1215 static int ocfs2_lock_create(
struct ocfs2_super *osb,
1221 unsigned long flags;
1224 mlog(0,
"lock %s, level = %d, flags = %u\n", lockres->
l_name, level,
1230 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1237 gen = lockres_set_pending(lockres);
1238 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1246 lockres_clear_pending(lockres, gen, osb);
1249 ocfs2_recover_from_dlm_error(lockres, 1);
1252 mlog(0,
"lock %s, return from ocfs2_dlm_lock\n", lockres->
l_name);
1258 static inline int ocfs2_check_wait_flag(
struct ocfs2_lock_res *lockres,
1261 unsigned long flags;
1266 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1271 static inline void ocfs2_wait_on_busy_lock(
struct ocfs2_lock_res *lockres)
1278 static inline void ocfs2_wait_on_refreshing_lock(
struct ocfs2_lock_res *lockres)
1288 static inline int ocfs2_may_continue_on_blocked_lock(
struct ocfs2_lock_res *lockres,
1293 return wanted <= ocfs2_highest_compat_lock_level(lockres->
l_blocking);
1300 ocfs2_init_start_time(mw);
1311 static void lockres_add_mask_waiter(
struct ocfs2_lock_res *lockres,
1327 static int lockres_remove_mask_waiter(
struct ocfs2_lock_res *lockres,
1330 unsigned long flags;
1334 if (!list_empty(&mw->
mw_item)) {
1341 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1354 lockres_remove_mask_waiter(lockres, mw);
1362 static int __ocfs2_cluster_lock(
struct ocfs2_super *osb,
1368 unsigned long caller_ip)
1373 unsigned long flags;
1375 int noqueue_attempted = 0;
1377 ocfs2_init_mask_waiter(&mw);
1387 if (catch_signals && signal_pending(
current)) {
1393 "Cluster lock called on freeing lockres %s! flags "
1423 if (level <= lockres->l_level)
1424 goto update_holders;
1428 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1436 if (level > lockres->
l_level) {
1437 if (noqueue_attempted > 0) {
1442 noqueue_attempted = 1;
1458 gen = lockres_set_pending(lockres);
1459 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1474 lockres_clear_pending(lockres, gen, osb);
1476 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1481 ocfs2_recover_from_dlm_error(lockres, 1);
1485 mlog(0,
"lock %s, successful return from ocfs2_dlm_lock\n",
1498 ocfs2_inc_holders(lockres, level);
1504 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1517 if (lockres_remove_mask_waiter(lockres, &mw))
1523 ret = ocfs2_wait_for_mask(&mw);
1528 ocfs2_update_lock_stats(lockres, level, &mw, ret);
1530 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1531 if (!ret && lockres->l_lockdep_map.key !=
NULL) {
1545 static inline int ocfs2_cluster_lock(
struct ocfs2_super *osb,
1551 return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1556 static void __ocfs2_cluster_unlock(
struct ocfs2_super *osb,
1559 unsigned long caller_ip)
1561 unsigned long flags;
1564 ocfs2_dec_holders(lockres, level);
1565 ocfs2_downconvert_on_unlock(osb, lockres);
1566 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1567 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1568 if (lockres->l_lockdep_map.key !=
NULL)
1573 static int ocfs2_create_new_lock(
struct ocfs2_super *osb,
1579 unsigned long flags;
1585 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1587 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1602 BUG_ON(!ocfs2_inode_is_new(inode));
1604 mlog(0,
"Inode %llu\n", (
unsigned long long)OCFS2_I(inode)->ip_blkno);
1614 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1624 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1630 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1648 mlog(0,
"inode %llu take %s RW lock\n",
1649 (
unsigned long long)OCFS2_I(inode)->ip_blkno,
1650 write ?
"EXMODE" :
"PRMODE");
1652 if (ocfs2_mount_local(osb))
1655 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1659 status = ocfs2_cluster_lock(
OCFS2_SB(inode->
i_sb), lockres, level, 0,
1673 mlog(0,
"inode %llu drop %s RW lock\n",
1674 (
unsigned long long)OCFS2_I(inode)->ip_blkno,
1675 write ?
"EXMODE" :
"PRMODE");
1677 if (!ocfs2_mount_local(osb))
1678 ocfs2_cluster_unlock(
OCFS2_SB(inode->
i_sb), lockres, level);
1692 mlog(0,
"inode %llu take PRMODE open lock\n",
1693 (
unsigned long long)OCFS2_I(inode)->ip_blkno);
1695 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1698 lockres = &OCFS2_I(inode)->ip_open_lockres;
1700 status = ocfs2_cluster_lock(
OCFS2_SB(inode->
i_sb), lockres,
1711 int status = 0,
level;
1717 mlog(0,
"inode %llu try to take %s open lock\n",
1718 (
unsigned long long)OCFS2_I(inode)->ip_blkno,
1719 write ?
"EXMODE" :
"PRMODE");
1721 if (ocfs2_is_hard_readonly(osb)) {
1727 if (ocfs2_mount_local(osb))
1730 lockres = &OCFS2_I(inode)->ip_open_lockres;
1740 status = ocfs2_cluster_lock(
OCFS2_SB(inode->
i_sb), lockres,
1741 level, DLM_LKF_NOQUEUE, 0);
1752 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1755 mlog(0,
"inode %llu drop open lock\n",
1756 (
unsigned long long)OCFS2_I(inode)->ip_blkno);
1758 if (ocfs2_mount_local(osb))
1772 static int ocfs2_flock_handle_signal(
struct ocfs2_lock_res *lockres,
1776 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1777 unsigned long flags;
1780 ocfs2_init_mask_waiter(&mw);
1785 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1787 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1788 ret = ocfs2_cancel_convert(osb, lockres);
1796 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1798 ocfs2_wait_for_mask(&mw);
1807 if (lockres->
l_level == level)
1810 mlog(0,
"Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1813 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1843 unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1844 unsigned long flags;
1850 ocfs2_init_mask_waiter(&mw);
1855 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1864 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1870 ret = ocfs2_lock_create(osb, lockres,
DLM_LOCK_NL, 0);
1876 ret = ocfs2_wait_for_mask(&mw);
1890 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1895 if (!trylock || (ret != -
EAGAIN)) {
1900 ocfs2_recover_from_dlm_error(lockres, 1);
1901 lockres_remove_mask_waiter(lockres, &mw);
1905 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1922 ret = ocfs2_flock_handle_signal(lockres, level);
1923 }
else if (!ret && (level > lockres->
l_level)) {
1931 mlog(0,
"Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1932 lockres->
l_name, ex, trylock, ret);
1940 unsigned long flags;
1946 ocfs2_init_mask_waiter(&mw);
1954 mlog(0,
"Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
1965 gen = ocfs2_prepare_downconvert(lockres,
DLM_LOCK_NL);
1967 spin_unlock_irqrestore(&lockres->
l_lock, flags);
1969 ret = ocfs2_downconvert_lock(osb, lockres,
DLM_LOCK_NL, 0, gen);
1975 ret = ocfs2_wait_for_mask(&mw);
1980 static void ocfs2_downconvert_on_unlock(
struct ocfs2_super *osb,
2007 #define OCFS2_SEC_BITS 34
2008 #define OCFS2_SEC_SHIFT (64 - 34)
2009 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
2013 static u64 ocfs2_pack_timespec(
struct timespec *spec)
2027 static void __ocfs2_stuff_meta_lvb(
struct inode *
inode)
2066 static void ocfs2_unpack_timespec(
struct timespec *spec,
2073 static void ocfs2_refresh_inode_from_lvb(
struct inode *inode)
2096 inode->
i_blocks = ocfs2_inode_sector_count(inode);
2102 ocfs2_unpack_timespec(&inode->
i_atime,
2104 ocfs2_unpack_timespec(&inode->
i_mtime,
2106 ocfs2_unpack_timespec(&inode->
i_ctime,
2111 static inline int ocfs2_meta_lvb_is_trustable(
struct inode *inode,
2130 static int ocfs2_should_refresh_lock_res(
struct ocfs2_lock_res *lockres)
2132 unsigned long flags;
2138 spin_unlock_irqrestore(&lockres->
l_lock, flags);
2143 spin_unlock_irqrestore(&lockres->
l_lock, flags);
2145 ocfs2_wait_on_refreshing_lock(lockres);
2151 spin_unlock_irqrestore(&lockres->
l_lock, flags);
2155 mlog(0,
"status %d\n", status);
2161 static inline void ocfs2_complete_lock_res_refresh(
struct ocfs2_lock_res *lockres,
2164 unsigned long flags;
2170 spin_unlock_irqrestore(&lockres->
l_lock, flags);
2176 static int ocfs2_inode_lock_update(
struct inode *inode,
2177 struct buffer_head **bh)
2185 if (ocfs2_mount_local(osb))
2190 mlog(0,
"Orphaned inode %llu was deleted while we "
2191 "were waiting on a lock. ip_flags = 0x%x\n",
2199 if (!ocfs2_should_refresh_lock_res(lockres))
2208 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2209 mlog(0,
"Trusting LVB on inode %llu\n",
2211 ocfs2_refresh_inode_from_lvb(inode);
2233 "Invalid dinode %llu disk generation: %u "
2234 "inode->i_generation: %u\n",
2240 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2246 ocfs2_track_lock_refresh(lockres);
2251 ocfs2_complete_lock_res_refresh(lockres, status);
2256 static int ocfs2_assign_bh(
struct inode *inode,
2257 struct buffer_head **ret_bh,
2258 struct buffer_head *passed_bh)
2265 *ret_bh = passed_bh;
2283 struct buffer_head **ret_bh,
2292 struct buffer_head *local_bh =
NULL;
2296 mlog(0,
"inode %llu, take %s META lock\n",
2297 (
unsigned long long)OCFS2_I(inode)->ip_blkno,
2298 ex ?
"EXMODE" :
"PRMODE");
2304 if (ocfs2_is_hard_readonly(osb)) {
2310 if (ocfs2_mount_local(osb))
2316 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2322 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2337 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2350 ocfs2_complete_lock_res_refresh(lockres, 0);
2359 status = ocfs2_inode_lock_update(inode, &local_bh);
2367 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2376 if (ret_bh && (*ret_bh)) {
2413 struct buffer_head **ret_bh,
2447 struct buffer_head *bh =
NULL;
2470 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2473 mlog(0,
"inode %llu drop %s META lock\n",
2474 (
unsigned long long)OCFS2_I(inode)->ip_blkno,
2475 ex ?
"EXMODE" :
"PRMODE");
2478 !ocfs2_mount_local(osb))
2479 ocfs2_cluster_unlock(
OCFS2_SB(inode->
i_sb), lockres, level);
2488 if (ocfs2_is_hard_readonly(osb))
2491 if (ocfs2_mount_local(osb))
2495 status = ocfs2_cluster_lock(osb, lockres,
DLM_LOCK_EX, 0, 0);
2514 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2530 if (ocfs2_is_hard_readonly(osb))
2533 if (ocfs2_mount_local(osb))
2536 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2546 status = ocfs2_should_refresh_lock_res(lockres);
2554 ocfs2_complete_lock_res_refresh(lockres, status);
2558 ocfs2_track_lock_refresh(lockres);
2570 if (!ocfs2_mount_local(osb))
2571 ocfs2_cluster_unlock(osb, lockres, level);
2579 if (ocfs2_is_hard_readonly(osb))
2582 if (ocfs2_mount_local(osb))
2585 status = ocfs2_cluster_lock(osb, lockres,
DLM_LOCK_EX, 0, 0);
2596 if (!ocfs2_mount_local(osb))
2605 if (ocfs2_is_hard_readonly(osb))
2608 if (ocfs2_mount_local(osb))
2614 mlog(
ML_ERROR,
"lock on nfs sync lock failed %d\n", status);
2623 if (!ocfs2_mount_local(osb))
2624 ocfs2_cluster_unlock(osb, lockres,
2637 if (ocfs2_is_hard_readonly(osb)) {
2643 if (ocfs2_mount_local(osb))
2646 ret = ocfs2_cluster_lock(osb, &dl->
dl_lockres, level, 0, 0);
2659 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2660 ocfs2_cluster_unlock(osb, &dl->
dl_lockres, level);
2666 static void ocfs2_dlm_debug_free(
struct kref *
kref)
2678 kref_put(&dlm_debug->
d_refcnt, ocfs2_dlm_debug_free);
2721 mlog(0,
"End of list found, %p\n", ret);
2736 static void *ocfs2_dlm_seq_start(
struct seq_file *
m, loff_t *
pos)
2741 spin_lock(&ocfs2_dlm_tracking_lock);
2742 iter = ocfs2_dlm_next_res(&priv->
p_iter_res, priv);
2754 spin_unlock(&ocfs2_dlm_tracking_lock);
2759 static void ocfs2_dlm_seq_stop(
struct seq_file *m,
void *
v)
2763 static void *ocfs2_dlm_seq_next(
struct seq_file *m,
void *
v, loff_t *pos)
2769 spin_lock(&ocfs2_dlm_tracking_lock);
2770 iter = ocfs2_dlm_next_res(iter, priv);
2777 spin_unlock(&ocfs2_dlm_tracking_lock);
2790 #define OCFS2_DLM_DEBUG_STR_VERSION 3
2791 static int ocfs2_dlm_seq_show(
struct seq_file *m,
void *v)
2805 (
unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2831 #ifdef CONFIG_OCFS2_FS_STATS
2832 # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
2833 # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
2834 # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
2835 # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
2836 # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
2837 # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
2838 # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
2839 # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
2840 # define lock_refresh(_l) ((_l)->l_lock_refresh)
2842 # define lock_num_prmode(_l) (0)
2843 # define lock_num_exmode(_l) (0)
2844 # define lock_num_prmode_failed(_l) (0)
2845 # define lock_num_exmode_failed(_l) (0)
2846 # define lock_total_prmode(_l) (0ULL)
2847 # define lock_total_exmode(_l) (0ULL)
2848 # define lock_max_prmode(_l) (0)
2849 # define lock_max_exmode(_l) (0)
2850 # define lock_refresh(_l) (0)
2878 .start = ocfs2_dlm_seq_start,
2879 .stop = ocfs2_dlm_seq_stop,
2880 .next = ocfs2_dlm_seq_next,
2881 .show = ocfs2_dlm_seq_show,
2884 static int ocfs2_dlm_debug_release(
struct inode *inode,
struct file *
file)
2890 ocfs2_remove_lockres_tracking(res);
2895 static int ocfs2_dlm_debug_open(
struct inode *inode,
struct file *
file)
2911 INIT_LIST_HEAD(&priv->
p_iter_res.l_debug_list);
2913 ret =
seq_open(file, &ocfs2_dlm_seq_ops);
2923 ocfs2_add_lockres_tracking(&priv->
p_iter_res,
2931 .open = ocfs2_dlm_debug_open,
2932 .release = ocfs2_dlm_debug_release,
2937 static int ocfs2_dlm_init_debug(
struct ocfs2_super *osb)
2946 &ocfs2_dlm_debug_fops);
2950 "Unable to create locking state debugfs file.\n");
2954 ocfs2_get_dlm_debug(dlm_debug);
2959 static void ocfs2_dlm_shutdown_debug(
struct ocfs2_super *osb)
2974 if (ocfs2_mount_local(osb)) {
2979 status = ocfs2_dlm_init_debug(osb);
2988 status = PTR_ERR(osb->
dc_task);
3009 "could not find this host's node number\n");
3018 ocfs2_orphan_scan_lock_res_init(&osb->
osb_orphan_scan.os_lockres, osb);
3025 ocfs2_dlm_shutdown_debug(osb);
3036 ocfs2_drop_osb_locks(osb);
3057 ocfs2_dlm_shutdown_debug(osb);
3060 static int ocfs2_drop_lock(
struct ocfs2_super *osb,
3064 unsigned long flags;
3077 "lockres %s, flags 0x%lx\n",
3081 mlog(0,
"waiting on busy lock \"%s\": flags = %lx, action = "
3082 "%u, unlock_action = %u\n",
3086 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3091 ocfs2_wait_on_busy_lock(lockres);
3100 lockres->
l_ops->set_lvb(lockres);
3107 mlog(0,
"destroying blocked lock: \"%s\"\n", lockres->
l_name);
3110 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3123 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3134 mlog(0,
"lock %s, successful return from ocfs2_dlm_unlock\n",
3137 ocfs2_wait_on_busy_lock(lockres);
3152 unsigned long flags;
3154 ocfs2_init_mask_waiter(&mw);
3160 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3162 mlog(0,
"Waiting on lockres %s\n", lockres->
l_name);
3164 status = ocfs2_wait_for_mask(&mw);
3170 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3179 ret = ocfs2_drop_lock(osb, lockres);
3184 static void ocfs2_drop_osb_locks(
struct ocfs2_super *osb)
3200 &OCFS2_I(inode)->ip_open_lockres);
3207 &OCFS2_I(inode)->ip_inode_lockres);
3210 if (err < 0 && !status)
3214 &OCFS2_I(inode)->ip_rw_lockres);
3217 if (err < 0 && !status)
3223 static unsigned int ocfs2_prepare_downconvert(
struct ocfs2_lock_res *lockres,
3230 if (lockres->
l_level <= new_level) {
3231 mlog(
ML_ERROR,
"lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3232 "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3243 mlog(
ML_BASTS,
"lockres %s, level %d => %d, blocking %d\n",
3249 return lockres_set_pending(lockres);
3252 static int ocfs2_downconvert_lock(
struct ocfs2_super *osb,
3256 unsigned int generation)
3273 lockres_clear_pending(lockres, generation, osb);
3276 ocfs2_recover_from_dlm_error(lockres, 1);
3286 static int ocfs2_prepare_cancel_convert(
struct ocfs2_super *osb,
3307 "lock %s, invalid flags: 0x%lx\n",
3315 static int ocfs2_cancel_convert(
struct ocfs2_super *osb,
3324 ocfs2_recover_from_dlm_error(lockres, 0);
3332 static int ocfs2_unblock_lock(
struct ocfs2_super *osb,
3336 unsigned long flags;
3352 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3388 ret = ocfs2_prepare_cancel_convert(osb, lockres);
3389 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3391 ret = ocfs2_cancel_convert(osb, lockres);
3420 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3428 mlog(
ML_BASTS,
"lockres %s, ReQ: EX/PR Holders %u,%u\n",
3454 new_level = ocfs2_highest_compat_lock_level(lockres->
l_blocking);
3456 if (lockres->
l_ops->check_downconvert
3457 && !lockres->
l_ops->check_downconvert(lockres, new_level)) {
3466 if (!lockres->
l_ops->downconvert_worker)
3475 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3489 mlog(
ML_BASTS,
"lockres %s, block=%d:%d, level=%d:%d, "
3490 "Recheck\n", lockres->
l_name, blocking,
3509 lockres->
l_ops->set_lvb(lockres);
3512 gen = ocfs2_prepare_downconvert(lockres, new_level);
3513 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3514 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3523 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3529 static int ocfs2_data_convert_worker(
struct ocfs2_lock_res *lockres,
3532 struct inode *
inode;
3536 inode = ocfs2_lock_res_inode(lockres);
3540 oi = OCFS2_I(inode);
3559 mlog(
ML_ERROR,
"Could not sync inode %llu for downconvert!",
3560 (
unsigned long long)OCFS2_I(inode)->
ip_blkno);
3582 int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3594 static int ocfs2_check_meta_downconvert(
struct ocfs2_lock_res *lockres,
3597 struct inode *inode = ocfs2_lock_res_inode(lockres);
3599 return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3604 struct inode *inode = ocfs2_lock_res_inode(lockres);
3606 __ocfs2_stuff_meta_lvb(inode);
3614 static void ocfs2_dentry_post_unlock(
struct ocfs2_super *osb,
3640 static int ocfs2_dentry_convert_worker(
struct ocfs2_lock_res *lockres,
3646 unsigned long flags;
3676 spin_lock(&dentry_attach_lock);
3682 spin_unlock(&dentry_attach_lock);
3683 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3685 mlog(0,
"extra_ref = %d\n", extra_ref);
3695 spin_lock(&dentry_attach_lock);
3701 spin_unlock(&dentry_attach_lock);
3703 mlog(0,
"d_delete(%.*s);\n", dentry->
d_name.len,
3717 spin_lock(&dentry_attach_lock);
3719 spin_unlock(&dentry_attach_lock);
3731 static int ocfs2_check_refcount_downconvert(
struct ocfs2_lock_res *lockres,
3735 ocfs2_lock_res_refcount_tree(lockres);
3737 return ocfs2_ci_checkpointed(&tree->
rf_ci, lockres, new_level);
3740 static int ocfs2_refcount_convert_worker(
struct ocfs2_lock_res *lockres,
3744 ocfs2_lock_res_refcount_tree(lockres);
3774 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
3775 ocfs2_cluster_unlock(osb, lockres, level);
3784 struct buffer_head *bh =
NULL;
3795 oinfo->
dqi_gi.dqi_free_entry =
3811 oinfo->
dqi_gi.dqi_free_entry =
3814 ocfs2_track_lock_refresh(lockres);
3831 if (ocfs2_is_hard_readonly(osb)) {
3836 if (ocfs2_mount_local(osb))
3839 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3844 if (!ocfs2_should_refresh_lock_res(lockres))
3847 status = ocfs2_refresh_qinfo(oinfo);
3850 ocfs2_complete_lock_res_refresh(lockres, status);
3863 if (ocfs2_is_hard_readonly(osb))
3866 if (ocfs2_mount_local(osb))
3869 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3882 if (!ocfs2_mount_local(osb))
3883 ocfs2_cluster_unlock(osb, lockres, level);
3886 static void ocfs2_process_blocked_lock(
struct ocfs2_super *osb,
3891 unsigned long flags;
3910 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3912 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3921 ocfs2_schedule_blocked_lock(osb, lockres);
3925 spin_unlock_irqrestore(&lockres->
l_lock, flags);
3928 && lockres->
l_ops->post_unlock)
3929 lockres->
l_ops->post_unlock(osb, lockres);
3932 static void ocfs2_schedule_blocked_lock(
struct ocfs2_super *osb,
3935 unsigned long flags;
3943 mlog(
ML_BASTS,
"lockres %s won't be scheduled: flags 0x%lx\n",
3959 static void ocfs2_downconvert_thread_do_work(
struct ocfs2_super *osb)
3961 unsigned long processed;
3962 unsigned long flags;
3983 ocfs2_process_blocked_lock(osb, lockres);
3990 static int ocfs2_downconvert_thread_lists_empty(
struct ocfs2_super *osb)
3993 unsigned long flags;
4003 static int ocfs2_downconvert_thread_should_wake(
struct ocfs2_super *osb)
4005 int should_wake = 0;
4006 unsigned long flags;
4016 static int ocfs2_downconvert_thread(
void *
arg)
4024 ocfs2_downconvert_thread_lists_empty(osb))) {
4027 ocfs2_downconvert_thread_should_wake(osb) ||
4030 mlog(0,
"downconvert_thread: awoken\n");
4032 ocfs2_downconvert_thread_do_work(osb);
4041 unsigned long flags;