Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
glock.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 #include <linux/percpu.h>
33 
34 #include "gfs2.h"
35 #include "incore.h"
36 #include "glock.h"
37 #include "glops.h"
38 #include "inode.h"
39 #include "lops.h"
40 #include "meta_io.h"
41 #include "quota.h"
42 #include "super.h"
43 #include "util.h"
44 #include "bmap.h"
45 #define CREATE_TRACE_POINTS
46 #include "trace_gfs2.h"
47 
49  int hash; /* hash bucket index */
50  unsigned nhash; /* Index within current bucket */
51  struct gfs2_sbd *sdp; /* incore superblock */
52  struct gfs2_glock *gl; /* current glock struct */
53  loff_t last_pos; /* last position */
54 };
55 
56 typedef void (*glock_examiner) (struct gfs2_glock * gl);
57 
58 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
59 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61 
62 static struct dentry *gfs2_root;
63 static struct workqueue_struct *glock_workqueue;
65 static LIST_HEAD(lru_list);
66 static atomic_t lru_count = ATOMIC_INIT(0);
67 static DEFINE_SPINLOCK(lru_lock);
68 
69 #define GFS2_GL_HASH_SHIFT 15
70 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
71 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
72 
73 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
74 static struct dentry *gfs2_root;
75 
83 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
84  const struct lm_lockname *name)
85 {
86  unsigned int h;
87 
88  h = jhash(&name->ln_number, sizeof(u64), 0);
89  h = jhash(&name->ln_type, sizeof(unsigned int), h);
90  h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
91  h &= GFS2_GL_HASH_MASK;
92 
93  return h;
94 }
95 
96 static inline void spin_lock_bucket(unsigned int hash)
97 {
98  hlist_bl_lock(&gl_hash_table[hash]);
99 }
100 
101 static inline void spin_unlock_bucket(unsigned int hash)
102 {
103  hlist_bl_unlock(&gl_hash_table[hash]);
104 }
105 
106 static void gfs2_glock_dealloc(struct rcu_head *rcu)
107 {
108  struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
109 
110  if (gl->gl_ops->go_flags & GLOF_ASPACE)
111  kmem_cache_free(gfs2_glock_aspace_cachep, gl);
112  else
113  kmem_cache_free(gfs2_glock_cachep, gl);
114 }
115 
116 void gfs2_glock_free(struct gfs2_glock *gl)
117 {
118  struct gfs2_sbd *sdp = gl->gl_sbd;
119 
120  call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
122  wake_up(&sdp->sd_glock_wait);
123 }
124 
131 void gfs2_glock_hold(struct gfs2_glock *gl)
132 {
133  GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
134  atomic_inc(&gl->gl_ref);
135 }
136 
144 static int demote_ok(const struct gfs2_glock *gl)
145 {
146  const struct gfs2_glock_operations *glops = gl->gl_ops;
147 
148  if (gl->gl_state == LM_ST_UNLOCKED)
149  return 0;
150  if (!list_empty(&gl->gl_holders))
151  return 0;
152  if (glops->go_demote_ok)
153  return glops->go_demote_ok(gl);
154  return 1;
155 }
156 
157 
159 {
160  spin_lock(&lru_lock);
161 
162  if (!list_empty(&gl->gl_lru))
163  list_del_init(&gl->gl_lru);
164  else
165  atomic_inc(&lru_count);
166 
167  list_add_tail(&gl->gl_lru, &lru_list);
168  set_bit(GLF_LRU, &gl->gl_flags);
169  spin_unlock(&lru_lock);
170 }
171 
172 static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
173 {
174  if (!list_empty(&gl->gl_lru)) {
175  list_del_init(&gl->gl_lru);
176  atomic_dec(&lru_count);
177  clear_bit(GLF_LRU, &gl->gl_flags);
178  }
179 }
180 
181 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
182 {
183  spin_lock(&lru_lock);
184  __gfs2_glock_remove_from_lru(gl);
185  spin_unlock(&lru_lock);
186 }
187 
197 {
198  if (atomic_dec_and_test(&gl->gl_ref))
199  GLOCK_BUG_ON(gl, 1);
200 }
201 
208 void gfs2_glock_put(struct gfs2_glock *gl)
209 {
210  struct gfs2_sbd *sdp = gl->gl_sbd;
211  struct address_space *mapping = gfs2_glock2aspace(gl);
212 
213  if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
214  __gfs2_glock_remove_from_lru(gl);
215  spin_unlock(&lru_lock);
216  spin_lock_bucket(gl->gl_hash);
217  hlist_bl_del_rcu(&gl->gl_list);
218  spin_unlock_bucket(gl->gl_hash);
219  GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
220  GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
221  trace_gfs2_glock_put(gl);
222  sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
223  }
224 }
225 
234 static struct gfs2_glock *search_bucket(unsigned int hash,
235  const struct gfs2_sbd *sdp,
236  const struct lm_lockname *name)
237 {
238  struct gfs2_glock *gl;
239  struct hlist_bl_node *h;
240 
241  hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
242  if (!lm_name_equal(&gl->gl_name, name))
243  continue;
244  if (gl->gl_sbd != sdp)
245  continue;
246  if (atomic_inc_not_zero(&gl->gl_ref))
247  return gl;
248  }
249 
250  return NULL;
251 }
252 
261 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
262 {
263  const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
264  if ((gh->gh_state == LM_ST_EXCLUSIVE ||
265  gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
266  return 0;
267  if (gl->gl_state == gh->gh_state)
268  return 1;
269  if (gh->gh_flags & GL_EXACT)
270  return 0;
271  if (gl->gl_state == LM_ST_EXCLUSIVE) {
272  if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
273  return 1;
274  if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
275  return 1;
276  }
277  if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
278  return 1;
279  return 0;
280 }
281 
282 static void gfs2_holder_wake(struct gfs2_holder *gh)
283 {
287 }
288 
294 static inline void do_error(struct gfs2_glock *gl, const int ret)
295 {
296  struct gfs2_holder *gh, *tmp;
297 
299  if (test_bit(HIF_HOLDER, &gh->gh_iflags))
300  continue;
301  if (ret & LM_OUT_ERROR)
302  gh->gh_error = -EIO;
303  else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
304  gh->gh_error = GLR_TRYFAILED;
305  else
306  continue;
307  list_del_init(&gh->gh_list);
308  trace_gfs2_glock_queue(gh, 0);
309  gfs2_holder_wake(gh);
310  }
311 }
312 
321 static int do_promote(struct gfs2_glock *gl)
322 __releases(&gl->gl_spin)
323 __acquires(&gl->gl_spin)
324 {
325  const struct gfs2_glock_operations *glops = gl->gl_ops;
326  struct gfs2_holder *gh, *tmp;
327  int ret;
328 
329 restart:
331  if (test_bit(HIF_HOLDER, &gh->gh_iflags))
332  continue;
333  if (may_grant(gl, gh)) {
334  if (gh->gh_list.prev == &gl->gl_holders &&
335  glops->go_lock) {
336  spin_unlock(&gl->gl_spin);
337  /* FIXME: eliminate this eventually */
338  ret = glops->go_lock(gh);
339  spin_lock(&gl->gl_spin);
340  if (ret) {
341  if (ret == 1)
342  return 2;
343  gh->gh_error = ret;
344  list_del_init(&gh->gh_list);
345  trace_gfs2_glock_queue(gh, 0);
346  gfs2_holder_wake(gh);
347  goto restart;
348  }
350  trace_gfs2_promote(gh, 1);
351  gfs2_holder_wake(gh);
352  goto restart;
353  }
355  trace_gfs2_promote(gh, 0);
356  gfs2_holder_wake(gh);
357  continue;
358  }
359  if (gh->gh_list.prev == &gl->gl_holders)
360  return 1;
361  do_error(gl, 0);
362  break;
363  }
364  return 0;
365 }
366 
372 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
373 {
374  struct gfs2_holder *gh;
375 
377  if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
378  return gh;
379  }
380  return NULL;
381 }
382 
390 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
391 {
392  int held1, held2;
393 
394  held1 = (gl->gl_state != LM_ST_UNLOCKED);
395  held2 = (new_state != LM_ST_UNLOCKED);
396 
397  if (held1 != held2) {
398  if (held2)
399  gfs2_glock_hold(gl);
400  else
402  }
403  if (held1 && held2 && list_empty(&gl->gl_holders))
405 
406  if (new_state != gl->gl_target)
407  /* shorten our minimum hold time */
410  gl->gl_state = new_state;
411  gl->gl_tchange = jiffies;
412 }
413 
414 static void gfs2_demote_wake(struct gfs2_glock *gl)
415 {
420 }
421 
429 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
430 {
431  const struct gfs2_glock_operations *glops = gl->gl_ops;
432  struct gfs2_holder *gh;
433  unsigned state = ret & LM_OUT_ST_MASK;
434  int rv;
435 
436  spin_lock(&gl->gl_spin);
437  trace_gfs2_glock_state_change(gl, state);
438  state_change(gl, state);
439  gh = find_first_waiter(gl);
440 
441  /* Demote to UN request arrived during demote to SH or DF */
443  state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
445 
446  /* Check for state != intended state */
447  if (unlikely(state != gl->gl_target)) {
448  if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
449  /* move to back of queue and try next entry */
450  if (ret & LM_OUT_CANCELED) {
451  if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
452  list_move_tail(&gh->gh_list, &gl->gl_holders);
453  gh = find_first_waiter(gl);
454  gl->gl_target = gh->gh_state;
455  goto retry;
456  }
457  /* Some error or failed "try lock" - report it */
458  if ((ret & LM_OUT_ERROR) ||
459  (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
460  gl->gl_target = gl->gl_state;
461  do_error(gl, ret);
462  goto out;
463  }
464  }
465  switch(state) {
466  /* Unlocked due to conversion deadlock, try again */
467  case LM_ST_UNLOCKED:
468 retry:
469  do_xmote(gl, gh, gl->gl_target);
470  break;
471  /* Conversion fails, unlock and try again */
472  case LM_ST_SHARED:
473  case LM_ST_DEFERRED:
474  do_xmote(gl, gh, LM_ST_UNLOCKED);
475  break;
476  default: /* Everything else */
477  printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
478  GLOCK_BUG_ON(gl, 1);
479  }
480  spin_unlock(&gl->gl_spin);
481  return;
482  }
483 
484  /* Fast path - we got what we asked for */
486  gfs2_demote_wake(gl);
487  if (state != LM_ST_UNLOCKED) {
488  if (glops->go_xmote_bh) {
489  spin_unlock(&gl->gl_spin);
490  rv = glops->go_xmote_bh(gl, gh);
491  spin_lock(&gl->gl_spin);
492  if (rv) {
493  do_error(gl, rv);
494  goto out;
495  }
496  }
497  rv = do_promote(gl);
498  if (rv == 2)
499  goto out_locked;
500  }
501 out:
502  clear_bit(GLF_LOCK, &gl->gl_flags);
503 out_locked:
504  spin_unlock(&gl->gl_spin);
505 }
506 
515 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
516 __releases(&gl->gl_spin)
517 __acquires(&gl->gl_spin)
518 {
519  const struct gfs2_glock_operations *glops = gl->gl_ops;
520  struct gfs2_sbd *sdp = gl->gl_sbd;
521  unsigned int lck_flags = gh ? gh->gh_flags : 0;
522  int ret;
523 
524  lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
526  GLOCK_BUG_ON(gl, gl->gl_state == target);
527  GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
528  if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
529  glops->go_inval) {
531  do_error(gl, 0); /* Fail queued try locks */
532  }
533  gl->gl_req = target;
535  if ((gl->gl_req == LM_ST_UNLOCKED) ||
536  (gl->gl_state == LM_ST_EXCLUSIVE) ||
537  (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
539  spin_unlock(&gl->gl_spin);
540  if (glops->go_xmote_th)
541  glops->go_xmote_th(gl);
543  glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
545 
546  gfs2_glock_hold(gl);
547  if (sdp->sd_lockstruct.ls_ops->lm_lock) {
548  /* lock_dlm */
549  ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
550  GLOCK_BUG_ON(gl, ret);
551  } else { /* lock_nolock */
552  finish_xmote(gl, target);
553  if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
554  gfs2_glock_put(gl);
555  }
556 
557  spin_lock(&gl->gl_spin);
558 }
559 
565 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
566 {
567  struct gfs2_holder *gh;
568 
569  if (!list_empty(&gl->gl_holders)) {
570  gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
571  if (test_bit(HIF_HOLDER, &gh->gh_iflags))
572  return gh;
573  }
574  return NULL;
575 }
576 
584 static void run_queue(struct gfs2_glock *gl, const int nonblock)
585 __releases(&gl->gl_spin)
586 __acquires(&gl->gl_spin)
587 {
588  struct gfs2_holder *gh = NULL;
589  int ret;
590 
592  return;
593 
595 
596  if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
597  gl->gl_demote_state != gl->gl_state) {
598  if (find_first_holder(gl))
599  goto out_unlock;
600  if (nonblock)
601  goto out_sched;
604  gl->gl_target = gl->gl_demote_state;
605  } else {
606  if (test_bit(GLF_DEMOTE, &gl->gl_flags))
607  gfs2_demote_wake(gl);
608  ret = do_promote(gl);
609  if (ret == 0)
610  goto out_unlock;
611  if (ret == 2)
612  goto out;
613  gh = find_first_waiter(gl);
614  gl->gl_target = gh->gh_state;
615  if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
616  do_error(gl, 0); /* Fail queued try locks */
617  }
618  do_xmote(gl, gh, gl->gl_target);
619 out:
620  return;
621 
622 out_sched:
623  clear_bit(GLF_LOCK, &gl->gl_flags);
625  gfs2_glock_hold(gl);
626  if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
628  return;
629 
630 out_unlock:
631  clear_bit(GLF_LOCK, &gl->gl_flags);
633  return;
634 }
635 
636 static void delete_work_func(struct work_struct *work)
637 {
638  struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
639  struct gfs2_sbd *sdp = gl->gl_sbd;
640  struct gfs2_inode *ip;
641  struct inode *inode;
642  u64 no_addr = gl->gl_name.ln_number;
643 
644  ip = gl->gl_object;
645  /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
646 
647  if (ip)
648  inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
649  else
650  inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
651  if (inode && !IS_ERR(inode)) {
652  d_prune_aliases(inode);
653  iput(inode);
654  }
655  gfs2_glock_put(gl);
656 }
657 
658 static void glock_work_func(struct work_struct *work)
659 {
660  unsigned long delay = 0;
661  struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
662  int drop_ref = 0;
663 
665  finish_xmote(gl, gl->gl_reply);
666  drop_ref = 1;
667  }
668  spin_lock(&gl->gl_spin);
669  if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
670  gl->gl_state != LM_ST_UNLOCKED &&
672  unsigned long holdtime, now = jiffies;
673 
674  holdtime = gl->gl_tchange + gl->gl_hold_time;
675  if (time_before(now, holdtime))
676  delay = holdtime - now;
677 
678  if (!delay) {
680  set_bit(GLF_DEMOTE, &gl->gl_flags);
681  }
682  }
683  run_queue(gl, 0);
684  spin_unlock(&gl->gl_spin);
685  if (!delay)
686  gfs2_glock_put(gl);
687  else {
688  if (gl->gl_name.ln_type != LM_TYPE_INODE)
689  delay = 0;
690  if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
691  gfs2_glock_put(gl);
692  }
693  if (drop_ref)
694  gfs2_glock_put(gl);
695 }
696 
710 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
711  const struct gfs2_glock_operations *glops, int create,
712  struct gfs2_glock **glp)
713 {
714  struct super_block *s = sdp->sd_vfs;
715  struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
716  struct gfs2_glock *gl, *tmp;
717  unsigned int hash = gl_hash(sdp, &name);
718  struct address_space *mapping;
719  struct kmem_cache *cachep;
720 
721  rcu_read_lock();
722  gl = search_bucket(hash, sdp, &name);
723  rcu_read_unlock();
724 
725  *glp = gl;
726  if (gl)
727  return 0;
728  if (!create)
729  return -ENOENT;
730 
731  if (glops->go_flags & GLOF_ASPACE)
732  cachep = gfs2_glock_aspace_cachep;
733  else
734  cachep = gfs2_glock_cachep;
735  gl = kmem_cache_alloc(cachep, GFP_KERNEL);
736  if (!gl)
737  return -ENOMEM;
738 
740  gl->gl_sbd = sdp;
741  gl->gl_flags = 0;
742  gl->gl_name = name;
743  atomic_set(&gl->gl_ref, 1);
744  gl->gl_state = LM_ST_UNLOCKED;
747  gl->gl_hash = hash;
748  gl->gl_ops = glops;
749  gl->gl_dstamp = ktime_set(0, 0);
750  preempt_disable();
751  /* We use the global stats to estimate the initial per-glock stats */
752  gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
753  preempt_enable();
754  gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
755  gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
756  memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
757  memset(gl->gl_lvb, 0, 32 * sizeof(char));
758  gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
759  gl->gl_tchange = jiffies;
760  gl->gl_object = NULL;
762  INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
763  INIT_WORK(&gl->gl_delete, delete_work_func);
764 
765  mapping = gfs2_glock2aspace(gl);
766  if (mapping) {
767  mapping->a_ops = &gfs2_meta_aops;
768  mapping->host = s->s_bdev->bd_inode;
769  mapping->flags = 0;
770  mapping_set_gfp_mask(mapping, GFP_NOFS);
771  mapping->assoc_mapping = NULL;
772  mapping->backing_dev_info = s->s_bdi;
773  mapping->writeback_index = 0;
774  }
775 
776  spin_lock_bucket(hash);
777  tmp = search_bucket(hash, sdp, &name);
778  if (tmp) {
779  spin_unlock_bucket(hash);
780  kmem_cache_free(cachep, gl);
782  gl = tmp;
783  } else {
784  hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
785  spin_unlock_bucket(hash);
786  }
787 
788  *glp = gl;
789 
790  return 0;
791 }
792 
802 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
803  struct gfs2_holder *gh)
804 {
805  INIT_LIST_HEAD(&gh->gh_list);
806  gh->gh_gl = gl;
807  gh->gh_ip = (unsigned long)__builtin_return_address(0);
808  gh->gh_owner_pid = get_pid(task_pid(current));
809  gh->gh_state = state;
810  gh->gh_flags = flags;
811  gh->gh_error = 0;
812  gh->gh_iflags = 0;
813  gfs2_glock_hold(gl);
814 }
815 
826 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
827 {
828  gh->gh_state = state;
829  gh->gh_flags = flags;
830  gh->gh_iflags = 0;
831  gh->gh_ip = (unsigned long)__builtin_return_address(0);
832  if (gh->gh_owner_pid)
833  put_pid(gh->gh_owner_pid);
834  gh->gh_owner_pid = get_pid(task_pid(current));
835 }
836 
844 {
845  put_pid(gh->gh_owner_pid);
846  gfs2_glock_put(gh->gh_gl);
847  gh->gh_gl = NULL;
848  gh->gh_ip = 0;
849 }
850 
860 static int gfs2_glock_holder_wait(void *word)
861 {
862  schedule();
863  return 0;
864 }
865 
866 static int gfs2_glock_demote_wait(void *word)
867 {
868  schedule();
869  return 0;
870 }
871 
880 {
881  unsigned long time1 = jiffies;
882 
883  might_sleep();
884  wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
885  if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
886  /* Lengthen the minimum hold time. */
887  gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
890  return gh->gh_error;
891 }
892 
902 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
903  unsigned long delay)
904 {
905  int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
906 
907  set_bit(bit, &gl->gl_flags);
908  if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
909  gl->gl_demote_state = state;
910  gl->gl_demote_time = jiffies;
911  } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
912  gl->gl_demote_state != state) {
914  }
915  if (gl->gl_ops->go_callback)
916  gl->gl_ops->go_callback(gl);
917  trace_gfs2_demote_rq(gl);
918 }
919 
920 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
921 {
922  struct va_format vaf;
923  va_list args;
924 
925  va_start(args, fmt);
926 
927  if (seq) {
928  seq_vprintf(seq, fmt, args);
929  } else {
930  vaf.fmt = fmt;
931  vaf.va = &args;
932 
933  printk(KERN_ERR " %pV", &vaf);
934  }
935 
936  va_end(args);
937 }
938 
949 static inline void add_to_queue(struct gfs2_holder *gh)
950 __releases(&gl->gl_spin)
951 __acquires(&gl->gl_spin)
952 {
953  struct gfs2_glock *gl = gh->gh_gl;
954  struct gfs2_sbd *sdp = gl->gl_sbd;
955  struct list_head *insert_pt = NULL;
956  struct gfs2_holder *gh2;
957  int try_futile = 0;
958 
959  BUG_ON(gh->gh_owner_pid == NULL);
961  BUG();
962 
963  if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
964  if (test_bit(GLF_LOCK, &gl->gl_flags))
965  try_futile = !may_grant(gl, gh);
967  goto fail;
968  }
969 
971  if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
972  (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
973  goto trap_recursive;
974  if (try_futile &&
975  !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
976 fail:
977  gh->gh_error = GLR_TRYFAILED;
978  gfs2_holder_wake(gh);
979  return;
980  }
981  if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
982  continue;
983  if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
984  insert_pt = &gh2->gh_list;
985  }
986  set_bit(GLF_QUEUED, &gl->gl_flags);
987  trace_gfs2_glock_queue(gh, 1);
988  gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
989  gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
990  if (likely(insert_pt == NULL)) {
991  list_add_tail(&gh->gh_list, &gl->gl_holders);
993  goto do_cancel;
994  return;
995  }
996  list_add_tail(&gh->gh_list, insert_pt);
997 do_cancel:
998  gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
999  if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1000  spin_unlock(&gl->gl_spin);
1001  if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1002  sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1003  spin_lock(&gl->gl_spin);
1004  }
1005  return;
1006 
1007 trap_recursive:
1008  print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1009  printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1010  printk(KERN_ERR "lock type: %d req lock state : %d\n",
1011  gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1012  print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1013  printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1014  printk(KERN_ERR "lock type: %d req lock state : %d\n",
1015  gh->gh_gl->gl_name.ln_type, gh->gh_state);
1016  __dump_glock(NULL, gl);
1017  BUG();
1018 }
1019 
1030 {
1031  struct gfs2_glock *gl = gh->gh_gl;
1032  struct gfs2_sbd *sdp = gl->gl_sbd;
1033  int error = 0;
1034 
1035  if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1036  return -EIO;
1037 
1038  if (test_bit(GLF_LRU, &gl->gl_flags))
1039  gfs2_glock_remove_from_lru(gl);
1040 
1041  spin_lock(&gl->gl_spin);
1042  add_to_queue(gh);
1043  if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1046  run_queue(gl, 1);
1047  spin_unlock(&gl->gl_spin);
1048 
1049  if (!(gh->gh_flags & GL_ASYNC))
1050  error = gfs2_glock_wait(gh);
1051 
1052  return error;
1053 }
1054 
1063 {
1064  return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1065 }
1066 
1073 void gfs2_glock_dq(struct gfs2_holder *gh)
1074 {
1075  struct gfs2_glock *gl = gh->gh_gl;
1076  const struct gfs2_glock_operations *glops = gl->gl_ops;
1077  unsigned delay = 0;
1078  int fast_path = 0;
1079 
1080  spin_lock(&gl->gl_spin);
1081  if (gh->gh_flags & GL_NOCACHE)
1082  handle_callback(gl, LM_ST_UNLOCKED, 0);
1083 
1084  list_del_init(&gh->gh_list);
1085  if (find_first_holder(gl) == NULL) {
1086  if (glops->go_unlock) {
1088  spin_unlock(&gl->gl_spin);
1089  glops->go_unlock(gh);
1090  spin_lock(&gl->gl_spin);
1091  clear_bit(GLF_LOCK, &gl->gl_flags);
1092  }
1093  if (list_empty(&gl->gl_holders) &&
1095  !test_bit(GLF_DEMOTE, &gl->gl_flags))
1096  fast_path = 1;
1097  }
1098  if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1100 
1101  trace_gfs2_glock_queue(gh, 0);
1102  spin_unlock(&gl->gl_spin);
1103  if (likely(fast_path))
1104  return;
1105 
1106  gfs2_glock_hold(gl);
1107  if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1108  !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1109  gl->gl_name.ln_type == LM_TYPE_INODE)
1110  delay = gl->gl_hold_time;
1111  if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1112  gfs2_glock_put(gl);
1113 }
1114 
1116 {
1117  struct gfs2_glock *gl = gh->gh_gl;
1118  gfs2_glock_dq(gh);
1119  might_sleep();
1120  wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
1121 }
1122 
1130 {
1131  gfs2_glock_dq(gh);
1132  gfs2_holder_uninit(gh);
1133 }
1134 
1147 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1148  const struct gfs2_glock_operations *glops,
1149  unsigned int state, int flags, struct gfs2_holder *gh)
1150 {
1151  struct gfs2_glock *gl;
1152  int error;
1153 
1154  error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1155  if (!error) {
1156  error = gfs2_glock_nq_init(gl, state, flags, gh);
1157  gfs2_glock_put(gl);
1158  }
1159 
1160  return error;
1161 }
1162 
1170 static int glock_compare(const void *arg_a, const void *arg_b)
1171 {
1172  const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1173  const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1174  const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1175  const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1176 
1177  if (a->ln_number > b->ln_number)
1178  return 1;
1179  if (a->ln_number < b->ln_number)
1180  return -1;
1181  BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1182  return 0;
1183 }
1184 
1194 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1195  struct gfs2_holder **p)
1196 {
1197  unsigned int x;
1198  int error = 0;
1199 
1200  for (x = 0; x < num_gh; x++)
1201  p[x] = &ghs[x];
1202 
1203  sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1204 
1205  for (x = 0; x < num_gh; x++) {
1206  p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1207 
1208  error = gfs2_glock_nq(p[x]);
1209  if (error) {
1210  while (x--)
1211  gfs2_glock_dq(p[x]);
1212  break;
1213  }
1214  }
1215 
1216  return error;
1217 }
1218 
1229 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1230 {
1231  struct gfs2_holder *tmp[4];
1232  struct gfs2_holder **pph = tmp;
1233  int error = 0;
1234 
1235  switch(num_gh) {
1236  case 0:
1237  return 0;
1238  case 1:
1239  ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1240  return gfs2_glock_nq(ghs);
1241  default:
1242  if (num_gh <= 4)
1243  break;
1244  pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1245  if (!pph)
1246  return -ENOMEM;
1247  }
1248 
1249  error = nq_m_sync(num_gh, ghs, pph);
1250 
1251  if (pph != tmp)
1252  kfree(pph);
1253 
1254  return error;
1255 }
1256 
1264 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1265 {
1266  while (num_gh--)
1267  gfs2_glock_dq(&ghs[num_gh]);
1268 }
1269 
1277 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1278 {
1279  while (num_gh--)
1280  gfs2_glock_dq_uninit(&ghs[num_gh]);
1281 }
1282 
1283 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1284 {
1285  unsigned long delay = 0;
1286  unsigned long holdtime;
1287  unsigned long now = jiffies;
1288 
1289  gfs2_glock_hold(gl);
1290  holdtime = gl->gl_tchange + gl->gl_hold_time;
1291  if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1292  gl->gl_name.ln_type == LM_TYPE_INODE) {
1293  if (time_before(now, holdtime))
1294  delay = holdtime - now;
1295  if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1296  delay = gl->gl_hold_time;
1297  }
1298 
1299  spin_lock(&gl->gl_spin);
1300  handle_callback(gl, state, delay);
1301  spin_unlock(&gl->gl_spin);
1302  if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1303  gfs2_glock_put(gl);
1304 }
1305 
1317 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1318 {
1319  const struct gfs2_holder *gh;
1320 
1321  if (gl->gl_reply & ~LM_OUT_ST_MASK)
1322  return 0;
1323  if (gl->gl_target == LM_ST_UNLOCKED)
1324  return 0;
1325 
1327  if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1328  continue;
1329  if (LM_FLAG_NOEXP & gh->gh_flags)
1330  return 0;
1331  }
1332 
1333  return 1;
1334 }
1335 
1345 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1346 {
1347  struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1348 
1349  spin_lock(&gl->gl_spin);
1350  gl->gl_reply = ret;
1351 
1353  if (gfs2_should_freeze(gl)) {
1354  set_bit(GLF_FROZEN, &gl->gl_flags);
1355  spin_unlock(&gl->gl_spin);
1356  return;
1357  }
1358  }
1359 
1360  spin_unlock(&gl->gl_spin);
1362  smp_wmb();
1363  gfs2_glock_hold(gl);
1364  if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1365  gfs2_glock_put(gl);
1366 }
1367 
1368 
1369 static int gfs2_shrink_glock_memory(struct shrinker *shrink,
1370  struct shrink_control *sc)
1371 {
1372  struct gfs2_glock *gl;
1373  int may_demote;
1374  int nr_skipped = 0;
1375  int nr = sc->nr_to_scan;
1376  gfp_t gfp_mask = sc->gfp_mask;
1377  LIST_HEAD(skipped);
1378 
1379  if (nr == 0)
1380  goto out;
1381 
1382  if (!(gfp_mask & __GFP_FS))
1383  return -1;
1384 
1385  spin_lock(&lru_lock);
1386  while(nr && !list_empty(&lru_list)) {
1387  gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1388  list_del_init(&gl->gl_lru);
1389  clear_bit(GLF_LRU, &gl->gl_flags);
1390  atomic_dec(&lru_count);
1391 
1392  /* Test for being demotable */
1393  if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1394  gfs2_glock_hold(gl);
1395  spin_unlock(&lru_lock);
1396  spin_lock(&gl->gl_spin);
1397  may_demote = demote_ok(gl);
1398  if (may_demote) {
1399  handle_callback(gl, LM_ST_UNLOCKED, 0);
1400  nr--;
1401  }
1402  clear_bit(GLF_LOCK, &gl->gl_flags);
1404  if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1406  spin_unlock(&gl->gl_spin);
1407  spin_lock(&lru_lock);
1408  continue;
1409  }
1410  nr_skipped++;
1411  list_add(&gl->gl_lru, &skipped);
1412  set_bit(GLF_LRU, &gl->gl_flags);
1413  }
1414  list_splice(&skipped, &lru_list);
1415  atomic_add(nr_skipped, &lru_count);
1416  spin_unlock(&lru_lock);
1417 out:
1418  return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1419 }
1420 
1421 static struct shrinker glock_shrinker = {
1422  .shrink = gfs2_shrink_glock_memory,
1423  .seeks = DEFAULT_SEEKS,
1424 };
1425 
1434 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1435  unsigned int hash)
1436 {
1437  struct gfs2_glock *gl;
1438  struct hlist_bl_head *head = &gl_hash_table[hash];
1439  struct hlist_bl_node *pos;
1440 
1441  rcu_read_lock();
1442  hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1443  if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1444  examiner(gl);
1445  }
1446  rcu_read_unlock();
1447  cond_resched();
1448 }
1449 
1450 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1451 {
1452  unsigned x;
1453 
1454  for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1455  examine_bucket(examiner, sdp, x);
1456 }
1457 
1458 
1467 static void thaw_glock(struct gfs2_glock *gl)
1468 {
1470  return;
1472  gfs2_glock_hold(gl);
1473  if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1474  gfs2_glock_put(gl);
1475 }
1476 
1483 static void clear_glock(struct gfs2_glock *gl)
1484 {
1485  gfs2_glock_remove_from_lru(gl);
1486 
1487  spin_lock(&gl->gl_spin);
1488  if (gl->gl_state != LM_ST_UNLOCKED)
1489  handle_callback(gl, LM_ST_UNLOCKED, 0);
1490  spin_unlock(&gl->gl_spin);
1491  gfs2_glock_hold(gl);
1492  if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1493  gfs2_glock_put(gl);
1494 }
1495 
1502 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1503 {
1504  glock_hash_walk(thaw_glock, sdp);
1505 }
1506 
1507 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1508 {
1509  int ret;
1510  spin_lock(&gl->gl_spin);
1511  ret = __dump_glock(seq, gl);
1512  spin_unlock(&gl->gl_spin);
1513  return ret;
1514 }
1515 
1516 static void dump_glock_func(struct gfs2_glock *gl)
1517 {
1518  dump_glock(NULL, gl);
1519 }
1520 
1529 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1530 {
1531  glock_hash_walk(clear_glock, sdp);
1532  flush_workqueue(glock_workqueue);
1534  glock_hash_walk(dump_glock_func, sdp);
1535 }
1536 
1538 {
1539  struct gfs2_glock *gl = ip->i_gl;
1540  int ret;
1541 
1542  ret = gfs2_truncatei_resume(ip);
1543  gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1544 
1545  spin_lock(&gl->gl_spin);
1546  clear_bit(GLF_LOCK, &gl->gl_flags);
1547  run_queue(gl, 1);
1548  spin_unlock(&gl->gl_spin);
1549 }
1550 
1551 static const char *state2str(unsigned state)
1552 {
1553  switch(state) {
1554  case LM_ST_UNLOCKED:
1555  return "UN";
1556  case LM_ST_SHARED:
1557  return "SH";
1558  case LM_ST_DEFERRED:
1559  return "DF";
1560  case LM_ST_EXCLUSIVE:
1561  return "EX";
1562  }
1563  return "??";
1564 }
1565 
1566 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1567 {
1568  char *p = buf;
1569  if (flags & LM_FLAG_TRY)
1570  *p++ = 't';
1571  if (flags & LM_FLAG_TRY_1CB)
1572  *p++ = 'T';
1573  if (flags & LM_FLAG_NOEXP)
1574  *p++ = 'e';
1575  if (flags & LM_FLAG_ANY)
1576  *p++ = 'A';
1577  if (flags & LM_FLAG_PRIORITY)
1578  *p++ = 'p';
1579  if (flags & GL_ASYNC)
1580  *p++ = 'a';
1581  if (flags & GL_EXACT)
1582  *p++ = 'E';
1583  if (flags & GL_NOCACHE)
1584  *p++ = 'c';
1585  if (test_bit(HIF_HOLDER, &iflags))
1586  *p++ = 'H';
1587  if (test_bit(HIF_WAIT, &iflags))
1588  *p++ = 'W';
1589  if (test_bit(HIF_FIRST, &iflags))
1590  *p++ = 'F';
1591  *p = 0;
1592  return buf;
1593 }
1594 
1603 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1604 {
1605  struct task_struct *gh_owner = NULL;
1606  char flags_buf[32];
1607 
1608  if (gh->gh_owner_pid)
1609  gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1610  gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1611  state2str(gh->gh_state),
1612  hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1613  gh->gh_error,
1614  gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1615  gh_owner ? gh_owner->comm : "(ended)",
1616  (void *)gh->gh_ip);
1617  return 0;
1618 }
1619 
1620 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1621 {
1622  const unsigned long *gflags = &gl->gl_flags;
1623  char *p = buf;
1624 
1625  if (test_bit(GLF_LOCK, gflags))
1626  *p++ = 'l';
1627  if (test_bit(GLF_DEMOTE, gflags))
1628  *p++ = 'D';
1629  if (test_bit(GLF_PENDING_DEMOTE, gflags))
1630  *p++ = 'd';
1631  if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1632  *p++ = 'p';
1633  if (test_bit(GLF_DIRTY, gflags))
1634  *p++ = 'y';
1635  if (test_bit(GLF_LFLUSH, gflags))
1636  *p++ = 'f';
1637  if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1638  *p++ = 'i';
1639  if (test_bit(GLF_REPLY_PENDING, gflags))
1640  *p++ = 'r';
1641  if (test_bit(GLF_INITIAL, gflags))
1642  *p++ = 'I';
1643  if (test_bit(GLF_FROZEN, gflags))
1644  *p++ = 'F';
1645  if (test_bit(GLF_QUEUED, gflags))
1646  *p++ = 'q';
1647  if (test_bit(GLF_LRU, gflags))
1648  *p++ = 'L';
1649  if (gl->gl_object)
1650  *p++ = 'o';
1651  if (test_bit(GLF_BLOCKING, gflags))
1652  *p++ = 'b';
1653  *p = 0;
1654  return buf;
1655 }
1656 
1675 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1676 {
1677  const struct gfs2_glock_operations *glops = gl->gl_ops;
1678  unsigned long long dtime;
1679  const struct gfs2_holder *gh;
1680  char gflags_buf[32];
1681  int error = 0;
1682 
1683  dtime = jiffies - gl->gl_demote_time;
1684  dtime *= 1000000/HZ; /* demote time in uSec */
1685  if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1686  dtime = 0;
1687  gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1688  state2str(gl->gl_state),
1689  gl->gl_name.ln_type,
1690  (unsigned long long)gl->gl_name.ln_number,
1691  gflags2str(gflags_buf, gl),
1692  state2str(gl->gl_target),
1693  state2str(gl->gl_demote_state), dtime,
1694  atomic_read(&gl->gl_ail_count),
1695  atomic_read(&gl->gl_revokes),
1696  atomic_read(&gl->gl_ref), gl->gl_hold_time);
1697 
1699  error = dump_holder(seq, gh);
1700  if (error)
1701  goto out;
1702  }
1703  if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1704  error = glops->go_dump(seq, gl);
1705 out:
1706  return error;
1707 }
1708 
1709 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1710 {
1711  struct gfs2_glock *gl = iter_ptr;
1712 
1713  seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1714  gl->gl_name.ln_type,
1715  (unsigned long long)gl->gl_name.ln_number,
1716  (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1717  (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1718  (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1719  (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1720  (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1721  (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1722  (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1723  (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1724  return 0;
1725 }
1726 
1727 static const char *gfs2_gltype[] = {
1728  "type",
1729  "reserved",
1730  "nondisk",
1731  "inode",
1732  "rgrp",
1733  "meta",
1734  "iopen",
1735  "flock",
1736  "plock",
1737  "quota",
1738  "journal",
1739 };
1740 
1741 static const char *gfs2_stype[] = {
1742  [GFS2_LKS_SRTT] = "srtt",
1743  [GFS2_LKS_SRTTVAR] = "srttvar",
1744  [GFS2_LKS_SRTTB] = "srttb",
1745  [GFS2_LKS_SRTTVARB] = "srttvarb",
1746  [GFS2_LKS_SIRT] = "sirt",
1747  [GFS2_LKS_SIRTVAR] = "sirtvar",
1748  [GFS2_LKS_DCOUNT] = "dlm",
1749  [GFS2_LKS_QCOUNT] = "queue",
1750 };
1751 
1752 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1753 
1754 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1755 {
1756  struct gfs2_glock_iter *gi = seq->private;
1757  struct gfs2_sbd *sdp = gi->sdp;
1758  unsigned index = gi->hash >> 3;
1759  unsigned subindex = gi->hash & 0x07;
1760  s64 value;
1761  int i;
1762 
1763  if (index == 0 && subindex != 0)
1764  return 0;
1765 
1766  seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1767  (index == 0) ? "cpu": gfs2_stype[subindex]);
1768 
1770  const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1771  if (index == 0) {
1772  value = i;
1773  } else {
1774  value = lkstats->lkstats[index - 1].stats[subindex];
1775  }
1776  seq_printf(seq, " %15lld", (long long)value);
1777  }
1778  seq_putc(seq, '\n');
1779  return 0;
1780 }
1781 
1783 {
1784  unsigned i;
1785  for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1786  INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1787  }
1788 
1789  glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1790  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1791  if (IS_ERR(glock_workqueue))
1792  return PTR_ERR(glock_workqueue);
1793  gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1795  0);
1796  if (IS_ERR(gfs2_delete_workqueue)) {
1797  destroy_workqueue(glock_workqueue);
1798  return PTR_ERR(gfs2_delete_workqueue);
1799  }
1800 
1801  register_shrinker(&glock_shrinker);
1802 
1803  return 0;
1804 }
1805 
1807 {
1808  unregister_shrinker(&glock_shrinker);
1809  destroy_workqueue(glock_workqueue);
1810  destroy_workqueue(gfs2_delete_workqueue);
1811 }
1812 
1813 static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1814 {
1815  return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1816  struct gfs2_glock, gl_list);
1817 }
1818 
1819 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1820 {
1821  return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1822  struct gfs2_glock, gl_list);
1823 }
1824 
1825 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1826 {
1827  struct gfs2_glock *gl;
1828 
1829  do {
1830  gl = gi->gl;
1831  if (gl) {
1832  gi->gl = glock_hash_next(gl);
1833  gi->nhash++;
1834  } else {
1835  if (gi->hash >= GFS2_GL_HASH_SIZE) {
1836  rcu_read_unlock();
1837  return 1;
1838  }
1839  gi->gl = glock_hash_chain(gi->hash);
1840  gi->nhash = 0;
1841  }
1842  while (gi->gl == NULL) {
1843  gi->hash++;
1844  if (gi->hash >= GFS2_GL_HASH_SIZE) {
1845  rcu_read_unlock();
1846  return 1;
1847  }
1848  gi->gl = glock_hash_chain(gi->hash);
1849  gi->nhash = 0;
1850  }
1851  /* Skip entries for other sb and dead entries */
1852  } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1853 
1854  return 0;
1855 }
1856 
1857 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1858 {
1859  struct gfs2_glock_iter *gi = seq->private;
1860  loff_t n = *pos;
1861 
1862  if (gi->last_pos <= *pos)
1863  n = gi->nhash + (*pos - gi->last_pos);
1864  else
1865  gi->hash = 0;
1866 
1867  gi->nhash = 0;
1868  rcu_read_lock();
1869 
1870  do {
1871  if (gfs2_glock_iter_next(gi))
1872  return NULL;
1873  } while (n--);
1874 
1875  gi->last_pos = *pos;
1876  return gi->gl;
1877 }
1878 
1879 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1880  loff_t *pos)
1881 {
1882  struct gfs2_glock_iter *gi = seq->private;
1883 
1884  (*pos)++;
1885  gi->last_pos = *pos;
1886  if (gfs2_glock_iter_next(gi))
1887  return NULL;
1888 
1889  return gi->gl;
1890 }
1891 
1892 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1893 {
1894  struct gfs2_glock_iter *gi = seq->private;
1895 
1896  if (gi->gl)
1897  rcu_read_unlock();
1898  gi->gl = NULL;
1899 }
1900 
1901 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1902 {
1903  return dump_glock(seq, iter_ptr);
1904 }
1905 
1906 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1907 {
1908  struct gfs2_glock_iter *gi = seq->private;
1909 
1910  gi->hash = *pos;
1911  if (*pos >= GFS2_NR_SBSTATS)
1912  return NULL;
1913  preempt_disable();
1914  return SEQ_START_TOKEN;
1915 }
1916 
1917 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1918  loff_t *pos)
1919 {
1920  struct gfs2_glock_iter *gi = seq->private;
1921  (*pos)++;
1922  gi->hash++;
1923  if (gi->hash >= GFS2_NR_SBSTATS) {
1924  preempt_enable();
1925  return NULL;
1926  }
1927  return SEQ_START_TOKEN;
1928 }
1929 
1930 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1931 {
1932  preempt_enable();
1933 }
1934 
1935 static const struct seq_operations gfs2_glock_seq_ops = {
1936  .start = gfs2_glock_seq_start,
1937  .next = gfs2_glock_seq_next,
1938  .stop = gfs2_glock_seq_stop,
1939  .show = gfs2_glock_seq_show,
1940 };
1941 
1942 static const struct seq_operations gfs2_glstats_seq_ops = {
1943  .start = gfs2_glock_seq_start,
1944  .next = gfs2_glock_seq_next,
1945  .stop = gfs2_glock_seq_stop,
1946  .show = gfs2_glstats_seq_show,
1947 };
1948 
1949 static const struct seq_operations gfs2_sbstats_seq_ops = {
1950  .start = gfs2_sbstats_seq_start,
1951  .next = gfs2_sbstats_seq_next,
1952  .stop = gfs2_sbstats_seq_stop,
1953  .show = gfs2_sbstats_seq_show,
1954 };
1955 
1956 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
1957 
1958 static int gfs2_glocks_open(struct inode *inode, struct file *file)
1959 {
1960  int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1961  sizeof(struct gfs2_glock_iter));
1962  if (ret == 0) {
1963  struct seq_file *seq = file->private_data;
1964  struct gfs2_glock_iter *gi = seq->private;
1965  gi->sdp = inode->i_private;
1967  if (seq->buf)
1968  seq->size = GFS2_SEQ_GOODSIZE;
1969  }
1970  return ret;
1971 }
1972 
1973 static int gfs2_glstats_open(struct inode *inode, struct file *file)
1974 {
1975  int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
1976  sizeof(struct gfs2_glock_iter));
1977  if (ret == 0) {
1978  struct seq_file *seq = file->private_data;
1979  struct gfs2_glock_iter *gi = seq->private;
1980  gi->sdp = inode->i_private;
1982  if (seq->buf)
1983  seq->size = GFS2_SEQ_GOODSIZE;
1984  }
1985  return ret;
1986 }
1987 
1988 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
1989 {
1990  int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
1991  sizeof(struct gfs2_glock_iter));
1992  if (ret == 0) {
1993  struct seq_file *seq = file->private_data;
1994  struct gfs2_glock_iter *gi = seq->private;
1995  gi->sdp = inode->i_private;
1996  }
1997  return ret;
1998 }
1999 
2000 static const struct file_operations gfs2_glocks_fops = {
2001  .owner = THIS_MODULE,
2002  .open = gfs2_glocks_open,
2003  .read = seq_read,
2004  .llseek = seq_lseek,
2005  .release = seq_release_private,
2006 };
2007 
2008 static const struct file_operations gfs2_glstats_fops = {
2009  .owner = THIS_MODULE,
2010  .open = gfs2_glstats_open,
2011  .read = seq_read,
2012  .llseek = seq_lseek,
2013  .release = seq_release_private,
2014 };
2015 
2016 static const struct file_operations gfs2_sbstats_fops = {
2017  .owner = THIS_MODULE,
2018  .open = gfs2_sbstats_open,
2019  .read = seq_read,
2020  .llseek = seq_lseek,
2021  .release = seq_release_private,
2022 };
2023 
2025 {
2026  sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2027  if (!sdp->debugfs_dir)
2028  return -ENOMEM;
2030  S_IFREG | S_IRUGO,
2031  sdp->debugfs_dir, sdp,
2032  &gfs2_glocks_fops);
2033  if (!sdp->debugfs_dentry_glocks)
2034  goto fail;
2035 
2037  S_IFREG | S_IRUGO,
2038  sdp->debugfs_dir, sdp,
2039  &gfs2_glstats_fops);
2040  if (!sdp->debugfs_dentry_glstats)
2041  goto fail;
2042 
2044  S_IFREG | S_IRUGO,
2045  sdp->debugfs_dir, sdp,
2046  &gfs2_sbstats_fops);
2047  if (!sdp->debugfs_dentry_sbstats)
2048  goto fail;
2049 
2050  return 0;
2051 fail:
2053  return -ENOMEM;
2054 }
2055 
2057 {
2058  if (sdp->debugfs_dir) {
2059  if (sdp->debugfs_dentry_glocks) {
2061  sdp->debugfs_dentry_glocks = NULL;
2062  }
2063  if (sdp->debugfs_dentry_glstats) {
2066  }
2067  if (sdp->debugfs_dentry_sbstats) {
2070  }
2072  sdp->debugfs_dir = NULL;
2073  }
2074 }
2075 
2077 {
2078  gfs2_root = debugfs_create_dir("gfs2", NULL);
2079  return gfs2_root ? 0 : -ENOMEM;
2080 }
2081 
2083 {
2084  debugfs_remove(gfs2_root);
2085  gfs2_root = NULL;
2086 }