Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
list_bl.h
Go to the documentation of this file.
1 #ifndef _LINUX_LIST_BL_H
2 #define _LINUX_LIST_BL_H
3 
4 #include <linux/list.h>
5 #include <linux/bit_spinlock.h>
6 
7 /*
8  * Special version of lists, where head of the list has a lock in the lowest
9  * bit. This is useful for scalable hash tables without increasing memory
10  * footprint overhead.
11  *
12  * For modification operations, the 0 bit of hlist_bl_head->first
13  * pointer must be set.
14  *
15  * With some small modifications, this can easily be adapted to store several
16  * arbitrary bits (not just a single lock bit), if the need arises to store
17  * some fast and compact auxiliary data.
18  */
19 
20 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
21 #define LIST_BL_LOCKMASK 1UL
22 #else
23 #define LIST_BL_LOCKMASK 0UL
24 #endif
25 
26 #ifdef CONFIG_DEBUG_LIST
27 #define LIST_BL_BUG_ON(x) BUG_ON(x)
28 #else
29 #define LIST_BL_BUG_ON(x)
30 #endif
31 
32 
33 struct hlist_bl_head {
35 };
36 
37 struct hlist_bl_node {
38  struct hlist_bl_node *next, **pprev;
39 };
40 #define INIT_HLIST_BL_HEAD(ptr) \
41  ((ptr)->first = NULL)
42 
43 static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
44 {
45  h->next = NULL;
46  h->pprev = NULL;
47 }
48 
49 #define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
50 
51 static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
52 {
53  return !h->pprev;
54 }
55 
56 static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
57 {
58  return (struct hlist_bl_node *)
59  ((unsigned long)h->first & ~LIST_BL_LOCKMASK);
60 }
61 
62 static inline void hlist_bl_set_first(struct hlist_bl_head *h,
63  struct hlist_bl_node *n)
64 {
65  LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
66  LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
67  LIST_BL_LOCKMASK);
68  h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
69 }
70 
71 static inline int hlist_bl_empty(const struct hlist_bl_head *h)
72 {
73  return !((unsigned long)h->first & ~LIST_BL_LOCKMASK);
74 }
75 
76 static inline void hlist_bl_add_head(struct hlist_bl_node *n,
77  struct hlist_bl_head *h)
78 {
79  struct hlist_bl_node *first = hlist_bl_first(h);
80 
81  n->next = first;
82  if (first)
83  first->pprev = &n->next;
84  n->pprev = &h->first;
85  hlist_bl_set_first(h, n);
86 }
87 
88 static inline void __hlist_bl_del(struct hlist_bl_node *n)
89 {
90  struct hlist_bl_node *next = n->next;
91  struct hlist_bl_node **pprev = n->pprev;
92 
93  LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
94 
95  /* pprev may be `first`, so be careful not to lose the lock bit */
96  *pprev = (struct hlist_bl_node *)
97  ((unsigned long)next |
98  ((unsigned long)*pprev & LIST_BL_LOCKMASK));
99  if (next)
100  next->pprev = pprev;
101 }
102 
103 static inline void hlist_bl_del(struct hlist_bl_node *n)
104 {
105  __hlist_bl_del(n);
106  n->next = LIST_POISON1;
107  n->pprev = LIST_POISON2;
108 }
109 
110 static inline void hlist_bl_del_init(struct hlist_bl_node *n)
111 {
112  if (!hlist_bl_unhashed(n)) {
113  __hlist_bl_del(n);
114  INIT_HLIST_BL_NODE(n);
115  }
116 }
117 
118 static inline void hlist_bl_lock(struct hlist_bl_head *b)
119 {
120  bit_spin_lock(0, (unsigned long *)b);
121 }
122 
123 static inline void hlist_bl_unlock(struct hlist_bl_head *b)
124 {
125  __bit_spin_unlock(0, (unsigned long *)b);
126 }
127 
136 #define hlist_bl_for_each_entry(tpos, pos, head, member) \
137  for (pos = hlist_bl_first(head); \
138  pos && \
139  ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
140  pos = pos->next)
141 
150 #define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \
151  for (pos = hlist_bl_first(head); \
152  pos && ({ n = pos->next; 1; }) && \
153  ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
154  pos = n)
155 
156 #endif