Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
idr.c
Go to the documentation of this file.
1 /*
2  * 2002-10-18 written by Jim Houston [email protected]
3  * Copyright (C) 2002 by Concurrent Computer Corporation
4  * Distributed under the GNU GPL license version 2.
5  *
6  * Modified by George Anzinger to reuse immediately and to use
7  * find bit instructions. Also removed _irq on spinlocks.
8  *
9  * Modified by Nadia Derbey to make it RCU safe.
10  *
11  * Small id to pointer translation service.
12  *
13  * It uses a radix tree like structure as a sparse array indexed
14  * by the id to obtain the pointer. The bitmap makes allocating
15  * a new id quick.
16  *
17  * You call it to allocate an id (an int) an associate with that id a
18  * pointer or what ever, we treat it as a (void *). You can pass this
19  * id to a user for him to pass back at a later time. You then pass
20  * that id to this code and it returns your pointer.
21 
22  * You can release ids at any time. When all ids are released, most of
23  * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
24  * don't need to go to the memory "store" during an id allocate, just
25  * so you don't need to be too concerned about locking and conflicts
26  * with the slab allocator.
27  */
28 
29 #ifndef TEST // to test in user space...
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/export.h>
33 #endif
34 #include <linux/err.h>
35 #include <linux/string.h>
36 #include <linux/idr.h>
37 #include <linux/spinlock.h>
38 
39 static struct kmem_cache *idr_layer_cache;
40 static DEFINE_SPINLOCK(simple_ida_lock);
41 
42 static struct idr_layer *get_from_free_list(struct idr *idp)
43 {
44  struct idr_layer *p;
45  unsigned long flags;
46 
47  spin_lock_irqsave(&idp->lock, flags);
48  if ((p = idp->id_free)) {
49  idp->id_free = p->ary[0];
50  idp->id_free_cnt--;
51  p->ary[0] = NULL;
52  }
53  spin_unlock_irqrestore(&idp->lock, flags);
54  return(p);
55 }
56 
57 static void idr_layer_rcu_free(struct rcu_head *head)
58 {
59  struct idr_layer *layer;
60 
61  layer = container_of(head, struct idr_layer, rcu_head);
62  kmem_cache_free(idr_layer_cache, layer);
63 }
64 
65 static inline void free_layer(struct idr_layer *p)
66 {
67  call_rcu(&p->rcu_head, idr_layer_rcu_free);
68 }
69 
70 /* only called when idp->lock is held */
71 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
72 {
73  p->ary[0] = idp->id_free;
74  idp->id_free = p;
75  idp->id_free_cnt++;
76 }
77 
78 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
79 {
80  unsigned long flags;
81 
82  /*
83  * Depends on the return element being zeroed.
84  */
85  spin_lock_irqsave(&idp->lock, flags);
86  __move_to_free_list(idp, p);
87  spin_unlock_irqrestore(&idp->lock, flags);
88 }
89 
90 static void idr_mark_full(struct idr_layer **pa, int id)
91 {
92  struct idr_layer *p = pa[0];
93  int l = 0;
94 
95  __set_bit(id & IDR_MASK, &p->bitmap);
96  /*
97  * If this layer is full mark the bit in the layer above to
98  * show that this part of the radix tree is full. This may
99  * complete the layer above and require walking up the radix
100  * tree.
101  */
102  while (p->bitmap == IDR_FULL) {
103  if (!(p = pa[++l]))
104  break;
105  id = id >> IDR_BITS;
106  __set_bit((id & IDR_MASK), &p->bitmap);
107  }
108 }
109 
123 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
124 {
125  while (idp->id_free_cnt < MAX_IDR_FREE) {
126  struct idr_layer *new;
127  new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
128  if (new == NULL)
129  return (0);
130  move_to_free_list(idp, new);
131  }
132  return 1;
133 }
135 
136 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
137 {
138  int n, m, sh;
139  struct idr_layer *p, *new;
140  int l, id, oid;
141  unsigned long bm;
142 
143  id = *starting_id;
144  restart:
145  p = idp->top;
146  l = idp->layers;
147  pa[l--] = NULL;
148  while (1) {
149  /*
150  * We run around this while until we reach the leaf node...
151  */
152  n = (id >> (IDR_BITS*l)) & IDR_MASK;
153  bm = ~p->bitmap;
154  m = find_next_bit(&bm, IDR_SIZE, n);
155  if (m == IDR_SIZE) {
156  /* no space available go back to previous layer. */
157  l++;
158  oid = id;
159  id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
160 
161  /* if already at the top layer, we need to grow */
162  if (id >= 1 << (idp->layers * IDR_BITS)) {
163  *starting_id = id;
164  return IDR_NEED_TO_GROW;
165  }
166  p = pa[l];
167  BUG_ON(!p);
168 
169  /* If we need to go up one layer, continue the
170  * loop; otherwise, restart from the top.
171  */
172  sh = IDR_BITS * (l + 1);
173  if (oid >> sh == id >> sh)
174  continue;
175  else
176  goto restart;
177  }
178  if (m != n) {
179  sh = IDR_BITS*l;
180  id = ((id >> sh) ^ n ^ m) << sh;
181  }
182  if ((id >= MAX_IDR_BIT) || (id < 0))
183  return IDR_NOMORE_SPACE;
184  if (l == 0)
185  break;
186  /*
187  * Create the layer below if it is missing.
188  */
189  if (!p->ary[m]) {
190  new = get_from_free_list(idp);
191  if (!new)
192  return -1;
193  new->layer = l-1;
194  rcu_assign_pointer(p->ary[m], new);
195  p->count++;
196  }
197  pa[l--] = p;
198  p = p->ary[m];
199  }
200 
201  pa[l] = p;
202  return id;
203 }
204 
205 static int idr_get_empty_slot(struct idr *idp, int starting_id,
206  struct idr_layer **pa)
207 {
208  struct idr_layer *p, *new;
209  int layers, v, id;
210  unsigned long flags;
211 
212  id = starting_id;
213 build_up:
214  p = idp->top;
215  layers = idp->layers;
216  if (unlikely(!p)) {
217  if (!(p = get_from_free_list(idp)))
218  return -1;
219  p->layer = 0;
220  layers = 1;
221  }
222  /*
223  * Add a new layer to the top of the tree if the requested
224  * id is larger than the currently allocated space.
225  */
226  while ((layers < (MAX_IDR_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
227  layers++;
228  if (!p->count) {
229  /* special case: if the tree is currently empty,
230  * then we grow the tree by moving the top node
231  * upwards.
232  */
233  p->layer++;
234  continue;
235  }
236  if (!(new = get_from_free_list(idp))) {
237  /*
238  * The allocation failed. If we built part of
239  * the structure tear it down.
240  */
241  spin_lock_irqsave(&idp->lock, flags);
242  for (new = p; p && p != idp->top; new = p) {
243  p = p->ary[0];
244  new->ary[0] = NULL;
245  new->bitmap = new->count = 0;
246  __move_to_free_list(idp, new);
247  }
248  spin_unlock_irqrestore(&idp->lock, flags);
249  return -1;
250  }
251  new->ary[0] = p;
252  new->count = 1;
253  new->layer = layers-1;
254  if (p->bitmap == IDR_FULL)
255  __set_bit(0, &new->bitmap);
256  p = new;
257  }
258  rcu_assign_pointer(idp->top, p);
259  idp->layers = layers;
260  v = sub_alloc(idp, &id, pa);
261  if (v == IDR_NEED_TO_GROW)
262  goto build_up;
263  return(v);
264 }
265 
266 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
267 {
268  struct idr_layer *pa[MAX_IDR_LEVEL];
269  int id;
270 
271  id = idr_get_empty_slot(idp, starting_id, pa);
272  if (id >= 0) {
273  /*
274  * Successfully found an empty slot. Install the user
275  * pointer and mark the slot full.
276  */
277  rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
278  (struct idr_layer *)ptr);
279  pa[0]->count++;
280  idr_mark_full(pa, id);
281  }
282 
283  return id;
284 }
285 
304 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
305 {
306  int rv;
307 
308  rv = idr_get_new_above_int(idp, ptr, starting_id);
309  /*
310  * This is a cheap hack until the IDR code can be fixed to
311  * return proper error values.
312  */
313  if (rv < 0)
314  return _idr_rc_to_errno(rv);
315  *id = rv;
316  return 0;
317 }
319 
334 int idr_get_new(struct idr *idp, void *ptr, int *id)
335 {
336  int rv;
337 
338  rv = idr_get_new_above_int(idp, ptr, 0);
339  /*
340  * This is a cheap hack until the IDR code can be fixed to
341  * return proper error values.
342  */
343  if (rv < 0)
344  return _idr_rc_to_errno(rv);
345  *id = rv;
346  return 0;
347 }
349 
350 static void idr_remove_warning(int id)
351 {
353  "idr_remove called for id=%d which is not allocated.\n", id);
354  dump_stack();
355 }
356 
357 static void sub_remove(struct idr *idp, int shift, int id)
358 {
359  struct idr_layer *p = idp->top;
360  struct idr_layer **pa[MAX_IDR_LEVEL];
361  struct idr_layer ***paa = &pa[0];
362  struct idr_layer *to_free;
363  int n;
364 
365  *paa = NULL;
366  *++paa = &idp->top;
367 
368  while ((shift > 0) && p) {
369  n = (id >> shift) & IDR_MASK;
370  __clear_bit(n, &p->bitmap);
371  *++paa = &p->ary[n];
372  p = p->ary[n];
373  shift -= IDR_BITS;
374  }
375  n = id & IDR_MASK;
376  if (likely(p != NULL && test_bit(n, &p->bitmap))){
377  __clear_bit(n, &p->bitmap);
378  rcu_assign_pointer(p->ary[n], NULL);
379  to_free = NULL;
380  while(*paa && ! --((**paa)->count)){
381  if (to_free)
382  free_layer(to_free);
383  to_free = **paa;
384  **paa-- = NULL;
385  }
386  if (!*paa)
387  idp->layers = 0;
388  if (to_free)
389  free_layer(to_free);
390  } else
391  idr_remove_warning(id);
392 }
393 
399 void idr_remove(struct idr *idp, int id)
400 {
401  struct idr_layer *p;
402  struct idr_layer *to_free;
403 
404  /* Mask off upper bits we don't use for the search. */
405  id &= MAX_IDR_MASK;
406 
407  sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
408  if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
409  idp->top->ary[0]) {
410  /*
411  * Single child at leftmost slot: we can shrink the tree.
412  * This level is not needed anymore since when layers are
413  * inserted, they are inserted at the top of the existing
414  * tree.
415  */
416  to_free = idp->top;
417  p = idp->top->ary[0];
418  rcu_assign_pointer(idp->top, p);
419  --idp->layers;
420  to_free->bitmap = to_free->count = 0;
421  free_layer(to_free);
422  }
423  while (idp->id_free_cnt >= MAX_IDR_FREE) {
424  p = get_from_free_list(idp);
425  /*
426  * Note: we don't call the rcu callback here, since the only
427  * layers that fall into the freelist are those that have been
428  * preallocated.
429  */
430  kmem_cache_free(idr_layer_cache, p);
431  }
432  return;
433 }
435 
449 void idr_remove_all(struct idr *idp)
450 {
451  int n, id, max;
452  int bt_mask;
453  struct idr_layer *p;
454  struct idr_layer *pa[MAX_IDR_LEVEL];
455  struct idr_layer **paa = &pa[0];
456 
457  n = idp->layers * IDR_BITS;
458  p = idp->top;
459  rcu_assign_pointer(idp->top, NULL);
460  max = 1 << n;
461 
462  id = 0;
463  while (id < max) {
464  while (n > IDR_BITS && p) {
465  n -= IDR_BITS;
466  *paa++ = p;
467  p = p->ary[(id >> n) & IDR_MASK];
468  }
469 
470  bt_mask = id;
471  id += 1 << n;
472  /* Get the highest bit that the above add changed from 0->1. */
473  while (n < fls(id ^ bt_mask)) {
474  if (p)
475  free_layer(p);
476  n += IDR_BITS;
477  p = *--paa;
478  }
479  }
480  idp->layers = 0;
481 }
483 
488 void idr_destroy(struct idr *idp)
489 {
490  while (idp->id_free_cnt) {
491  struct idr_layer *p = get_from_free_list(idp);
492  kmem_cache_free(idr_layer_cache, p);
493  }
494 }
496 
509 void *idr_find(struct idr *idp, int id)
510 {
511  int n;
512  struct idr_layer *p;
513 
514  p = rcu_dereference_raw(idp->top);
515  if (!p)
516  return NULL;
517  n = (p->layer+1) * IDR_BITS;
518 
519  /* Mask off upper bits we don't use for the search. */
520  id &= MAX_IDR_MASK;
521 
522  if (id >= (1 << n))
523  return NULL;
524  BUG_ON(n == 0);
525 
526  while (n > 0 && p) {
527  n -= IDR_BITS;
528  BUG_ON(n != p->layer*IDR_BITS);
529  p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
530  }
531  return((void *)p);
532 }
534 
553 int idr_for_each(struct idr *idp,
554  int (*fn)(int id, void *p, void *data), void *data)
555 {
556  int n, id, max, error = 0;
557  struct idr_layer *p;
558  struct idr_layer *pa[MAX_IDR_LEVEL];
559  struct idr_layer **paa = &pa[0];
560 
561  n = idp->layers * IDR_BITS;
562  p = rcu_dereference_raw(idp->top);
563  max = 1 << n;
564 
565  id = 0;
566  while (id < max) {
567  while (n > 0 && p) {
568  n -= IDR_BITS;
569  *paa++ = p;
570  p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
571  }
572 
573  if (p) {
574  error = fn(id, (void *)p, data);
575  if (error)
576  break;
577  }
578 
579  id += 1 << n;
580  while (n < fls(id)) {
581  n += IDR_BITS;
582  p = *--paa;
583  }
584  }
585 
586  return error;
587 }
589 
602 void *idr_get_next(struct idr *idp, int *nextidp)
603 {
604  struct idr_layer *p, *pa[MAX_IDR_LEVEL];
605  struct idr_layer **paa = &pa[0];
606  int id = *nextidp;
607  int n, max;
608 
609  /* find first ent */
610  p = rcu_dereference_raw(idp->top);
611  if (!p)
612  return NULL;
613  n = (p->layer + 1) * IDR_BITS;
614  max = 1 << n;
615 
616  while (id < max) {
617  while (n > 0 && p) {
618  n -= IDR_BITS;
619  *paa++ = p;
620  p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
621  }
622 
623  if (p) {
624  *nextidp = id;
625  return p;
626  }
627 
628  id += 1 << n;
629  while (n < fls(id)) {
630  n += IDR_BITS;
631  p = *--paa;
632  }
633  }
634  return NULL;
635 }
637 
638 
651 void *idr_replace(struct idr *idp, void *ptr, int id)
652 {
653  int n;
654  struct idr_layer *p, *old_p;
655 
656  p = idp->top;
657  if (!p)
658  return ERR_PTR(-EINVAL);
659 
660  n = (p->layer+1) * IDR_BITS;
661 
662  id &= MAX_IDR_MASK;
663 
664  if (id >= (1 << n))
665  return ERR_PTR(-EINVAL);
666 
667  n -= IDR_BITS;
668  while ((n > 0) && p) {
669  p = p->ary[(id >> n) & IDR_MASK];
670  n -= IDR_BITS;
671  }
672 
673  n = id & IDR_MASK;
674  if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
675  return ERR_PTR(-ENOENT);
676 
677  old_p = p->ary[n];
678  rcu_assign_pointer(p->ary[n], ptr);
679 
680  return old_p;
681 }
683 
685 {
686  idr_layer_cache = kmem_cache_create("idr_layer_cache",
687  sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
688 }
689 
697 void idr_init(struct idr *idp)
698 {
699  memset(idp, 0, sizeof(struct idr));
700  spin_lock_init(&idp->lock);
701 }
703 
704 
717 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
718 {
719  unsigned long flags;
720 
721  if (!ida->free_bitmap) {
722  spin_lock_irqsave(&ida->idr.lock, flags);
723  if (!ida->free_bitmap) {
724  ida->free_bitmap = bitmap;
725  bitmap = NULL;
726  }
727  spin_unlock_irqrestore(&ida->idr.lock, flags);
728  }
729 
730  kfree(bitmap);
731 }
732 
745 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
746 {
747  /* allocate idr_layers */
748  if (!idr_pre_get(&ida->idr, gfp_mask))
749  return 0;
750 
751  /* allocate free_bitmap */
752  if (!ida->free_bitmap) {
753  struct ida_bitmap *bitmap;
754 
755  bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
756  if (!bitmap)
757  return 0;
758 
759  free_bitmap(ida, bitmap);
760  }
761 
762  return 1;
763 }
765 
781 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
782 {
783  struct idr_layer *pa[MAX_IDR_LEVEL];
784  struct ida_bitmap *bitmap;
785  unsigned long flags;
786  int idr_id = starting_id / IDA_BITMAP_BITS;
787  int offset = starting_id % IDA_BITMAP_BITS;
788  int t, id;
789 
790  restart:
791  /* get vacant slot */
792  t = idr_get_empty_slot(&ida->idr, idr_id, pa);
793  if (t < 0)
794  return _idr_rc_to_errno(t);
795 
796  if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
797  return -ENOSPC;
798 
799  if (t != idr_id)
800  offset = 0;
801  idr_id = t;
802 
803  /* if bitmap isn't there, create a new one */
804  bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
805  if (!bitmap) {
806  spin_lock_irqsave(&ida->idr.lock, flags);
807  bitmap = ida->free_bitmap;
808  ida->free_bitmap = NULL;
809  spin_unlock_irqrestore(&ida->idr.lock, flags);
810 
811  if (!bitmap)
812  return -EAGAIN;
813 
814  memset(bitmap, 0, sizeof(struct ida_bitmap));
815  rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
816  (void *)bitmap);
817  pa[0]->count++;
818  }
819 
820  /* lookup for empty slot */
821  t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
822  if (t == IDA_BITMAP_BITS) {
823  /* no empty slot after offset, continue to the next chunk */
824  idr_id++;
825  offset = 0;
826  goto restart;
827  }
828 
829  id = idr_id * IDA_BITMAP_BITS + t;
830  if (id >= MAX_IDR_BIT)
831  return -ENOSPC;
832 
833  __set_bit(t, bitmap->bitmap);
834  if (++bitmap->nr_busy == IDA_BITMAP_BITS)
835  idr_mark_full(pa, idr_id);
836 
837  *p_id = id;
838 
839  /* Each leaf node can handle nearly a thousand slots and the
840  * whole idea of ida is to have small memory foot print.
841  * Throw away extra resources one by one after each successful
842  * allocation.
843  */
844  if (ida->idr.id_free_cnt || ida->free_bitmap) {
845  struct idr_layer *p = get_from_free_list(&ida->idr);
846  if (p)
847  kmem_cache_free(idr_layer_cache, p);
848  }
849 
850  return 0;
851 }
853 
867 int ida_get_new(struct ida *ida, int *p_id)
868 {
869  return ida_get_new_above(ida, 0, p_id);
870 }
872 
878 void ida_remove(struct ida *ida, int id)
879 {
880  struct idr_layer *p = ida->idr.top;
881  int shift = (ida->idr.layers - 1) * IDR_BITS;
882  int idr_id = id / IDA_BITMAP_BITS;
883  int offset = id % IDA_BITMAP_BITS;
884  int n;
885  struct ida_bitmap *bitmap;
886 
887  /* clear full bits while looking up the leaf idr_layer */
888  while ((shift > 0) && p) {
889  n = (idr_id >> shift) & IDR_MASK;
890  __clear_bit(n, &p->bitmap);
891  p = p->ary[n];
892  shift -= IDR_BITS;
893  }
894 
895  if (p == NULL)
896  goto err;
897 
898  n = idr_id & IDR_MASK;
899  __clear_bit(n, &p->bitmap);
900 
901  bitmap = (void *)p->ary[n];
902  if (!test_bit(offset, bitmap->bitmap))
903  goto err;
904 
905  /* update bitmap and remove it if empty */
906  __clear_bit(offset, bitmap->bitmap);
907  if (--bitmap->nr_busy == 0) {
908  __set_bit(n, &p->bitmap); /* to please idr_remove() */
909  idr_remove(&ida->idr, idr_id);
910  free_bitmap(ida, bitmap);
911  }
912 
913  return;
914 
915  err:
917  "ida_remove called for id=%d which is not allocated.\n", id);
918 }
920 
925 void ida_destroy(struct ida *ida)
926 {
927  idr_destroy(&ida->idr);
928  kfree(ida->free_bitmap);
929 }
931 
944 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
945  gfp_t gfp_mask)
946 {
947  int ret, id;
948  unsigned int max;
949  unsigned long flags;
950 
951  BUG_ON((int)start < 0);
952  BUG_ON((int)end < 0);
953 
954  if (end == 0)
955  max = 0x80000000;
956  else {
957  BUG_ON(end < start);
958  max = end - 1;
959  }
960 
961 again:
962  if (!ida_pre_get(ida, gfp_mask))
963  return -ENOMEM;
964 
965  spin_lock_irqsave(&simple_ida_lock, flags);
966  ret = ida_get_new_above(ida, start, &id);
967  if (!ret) {
968  if (id > max) {
969  ida_remove(ida, id);
970  ret = -ENOSPC;
971  } else {
972  ret = id;
973  }
974  }
975  spin_unlock_irqrestore(&simple_ida_lock, flags);
976 
977  if (unlikely(ret == -EAGAIN))
978  goto again;
979 
980  return ret;
981 }
983 
989 void ida_simple_remove(struct ida *ida, unsigned int id)
990 {
991  unsigned long flags;
992 
993  BUG_ON((int)id < 0);
994  spin_lock_irqsave(&simple_ida_lock, flags);
995  ida_remove(ida, id);
996  spin_unlock_irqrestore(&simple_ida_lock, flags);
997 }
999 
1007 void ida_init(struct ida *ida)
1008 {
1009  memset(ida, 0, sizeof(struct ida));
1010  idr_init(&ida->idr);
1011 
1012 }