Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
genalloc.c
Go to the documentation of this file.
1 /*
2  * Basic general purpose allocator for managing special purpose
3  * memory, for example, memory that is not managed by the regular
4  * kmalloc/kfree interface. Uses for this includes on-device special
5  * memory, uncached memory etc.
6  *
7  * It is safe to use the allocator in NMI handlers and other special
8  * unblockable contexts that could otherwise deadlock on locks. This
9  * is implemented by using atomic operations and retries on any
10  * conflicts. The disadvantage is that there may be livelocks in
11  * extreme cases. For better scalability, one allocator can be used
12  * for each CPU.
13  *
14  * The lockless operation only works if there is enough memory
15  * available. If new memory is added to the pool a lock has to be
16  * still taken. So any user relying on locklessness has to ensure
17  * that sufficient memory is preallocated.
18  *
19  * The basic atomic operation of this allocator is cmpxchg on long.
20  * On architectures that don't have NMI-safe cmpxchg implementation,
21  * the allocator can NOT be used in NMI handler. So code uses the
22  * allocator in NMI handler should depend on
23  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
24  *
25  * Copyright 2005 (C) Jes Sorensen <[email protected]>
26  *
27  * This source code is licensed under the GNU General Public License,
28  * Version 2. See the file COPYING for more details.
29  */
30 
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
37 
38 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
39 {
40  unsigned long val, nval;
41 
42  nval = *addr;
43  do {
44  val = nval;
45  if (val & mask_to_set)
46  return -EBUSY;
47  cpu_relax();
48  } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
49 
50  return 0;
51 }
52 
53 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
54 {
55  unsigned long val, nval;
56 
57  nval = *addr;
58  do {
59  val = nval;
60  if ((val & mask_to_clear) != mask_to_clear)
61  return -EBUSY;
62  cpu_relax();
63  } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
64 
65  return 0;
66 }
67 
68 /*
69  * bitmap_set_ll - set the specified number of bits at the specified position
70  * @map: pointer to a bitmap
71  * @start: a bit position in @map
72  * @nr: number of bits to set
73  *
74  * Set @nr bits start from @start in @map lock-lessly. Several users
75  * can set/clear the same bitmap simultaneously without lock. If two
76  * users set the same bit, one user will return remain bits, otherwise
77  * return 0.
78  */
79 static int bitmap_set_ll(unsigned long *map, int start, int nr)
80 {
81  unsigned long *p = map + BIT_WORD(start);
82  const int size = start + nr;
83  int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
84  unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
85 
86  while (nr - bits_to_set >= 0) {
87  if (set_bits_ll(p, mask_to_set))
88  return nr;
89  nr -= bits_to_set;
90  bits_to_set = BITS_PER_LONG;
91  mask_to_set = ~0UL;
92  p++;
93  }
94  if (nr) {
95  mask_to_set &= BITMAP_LAST_WORD_MASK(size);
96  if (set_bits_ll(p, mask_to_set))
97  return nr;
98  }
99 
100  return 0;
101 }
102 
103 /*
104  * bitmap_clear_ll - clear the specified number of bits at the specified position
105  * @map: pointer to a bitmap
106  * @start: a bit position in @map
107  * @nr: number of bits to set
108  *
109  * Clear @nr bits start from @start in @map lock-lessly. Several users
110  * can set/clear the same bitmap simultaneously without lock. If two
111  * users clear the same bit, one user will return remain bits,
112  * otherwise return 0.
113  */
114 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
115 {
116  unsigned long *p = map + BIT_WORD(start);
117  const int size = start + nr;
118  int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
119  unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
120 
121  while (nr - bits_to_clear >= 0) {
122  if (clear_bits_ll(p, mask_to_clear))
123  return nr;
124  nr -= bits_to_clear;
125  bits_to_clear = BITS_PER_LONG;
126  mask_to_clear = ~0UL;
127  p++;
128  }
129  if (nr) {
130  mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
131  if (clear_bits_ll(p, mask_to_clear))
132  return nr;
133  }
134 
135  return 0;
136 }
137 
147 {
148  struct gen_pool *pool;
149 
150  pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
151  if (pool != NULL) {
152  spin_lock_init(&pool->lock);
153  INIT_LIST_HEAD(&pool->chunks);
155  pool->algo = gen_pool_first_fit;
156  pool->data = NULL;
157  }
158  return pool;
159 }
161 
175 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
176  size_t size, int nid)
177 {
178  struct gen_pool_chunk *chunk;
179  int nbits = size >> pool->min_alloc_order;
180  int nbytes = sizeof(struct gen_pool_chunk) +
181  BITS_TO_LONGS(nbits) * sizeof(long);
182 
183  chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
184  if (unlikely(chunk == NULL))
185  return -ENOMEM;
186 
187  chunk->phys_addr = phys;
188  chunk->start_addr = virt;
189  chunk->end_addr = virt + size;
190  atomic_set(&chunk->avail, size);
191 
192  spin_lock(&pool->lock);
193  list_add_rcu(&chunk->next_chunk, &pool->chunks);
194  spin_unlock(&pool->lock);
195 
196  return 0;
197 }
199 
207 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
208 {
209  struct gen_pool_chunk *chunk;
210  phys_addr_t paddr = -1;
211 
212  rcu_read_lock();
213  list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
214  if (addr >= chunk->start_addr && addr < chunk->end_addr) {
215  paddr = chunk->phys_addr + (addr - chunk->start_addr);
216  break;
217  }
218  }
219  rcu_read_unlock();
220 
221  return paddr;
222 }
224 
233 {
234  struct list_head *_chunk, *_next_chunk;
235  struct gen_pool_chunk *chunk;
236  int order = pool->min_alloc_order;
237  int bit, end_bit;
238 
239  list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
240  chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
241  list_del(&chunk->next_chunk);
242 
243  end_bit = (chunk->end_addr - chunk->start_addr) >> order;
244  bit = find_next_bit(chunk->bits, end_bit, 0);
245  BUG_ON(bit < end_bit);
246 
247  kfree(chunk);
248  }
249  kfree(pool);
250  return;
251 }
253 
264 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
265 {
266  struct gen_pool_chunk *chunk;
267  unsigned long addr = 0;
268  int order = pool->min_alloc_order;
269  int nbits, start_bit = 0, end_bit, remain;
270 
271 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
272  BUG_ON(in_nmi());
273 #endif
274 
275  if (size == 0)
276  return 0;
277 
278  nbits = (size + (1UL << order) - 1) >> order;
279  rcu_read_lock();
280  list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
281  if (size > atomic_read(&chunk->avail))
282  continue;
283 
284  end_bit = (chunk->end_addr - chunk->start_addr) >> order;
285 retry:
286  start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
287  pool->data);
288  if (start_bit >= end_bit)
289  continue;
290  remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
291  if (remain) {
292  remain = bitmap_clear_ll(chunk->bits, start_bit,
293  nbits - remain);
294  BUG_ON(remain);
295  goto retry;
296  }
297 
298  addr = chunk->start_addr + ((unsigned long)start_bit << order);
299  size = nbits << order;
300  atomic_sub(size, &chunk->avail);
301  break;
302  }
303  rcu_read_unlock();
304  return addr;
305 }
307 
318 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
319 {
320  struct gen_pool_chunk *chunk;
321  int order = pool->min_alloc_order;
322  int start_bit, nbits, remain;
323 
324 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
325  BUG_ON(in_nmi());
326 #endif
327 
328  nbits = (size + (1UL << order) - 1) >> order;
329  rcu_read_lock();
330  list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
331  if (addr >= chunk->start_addr && addr < chunk->end_addr) {
332  BUG_ON(addr + size > chunk->end_addr);
333  start_bit = (addr - chunk->start_addr) >> order;
334  remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
335  BUG_ON(remain);
336  size = nbits << order;
337  atomic_add(size, &chunk->avail);
338  rcu_read_unlock();
339  return;
340  }
341  }
342  rcu_read_unlock();
343  BUG();
344 }
346 
357  void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
358  void *data)
359 {
360  struct gen_pool_chunk *chunk;
361 
362  rcu_read_lock();
363  list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
364  func(pool, chunk, data);
365  rcu_read_unlock();
366 }
368 
375 size_t gen_pool_avail(struct gen_pool *pool)
376 {
377  struct gen_pool_chunk *chunk;
378  size_t avail = 0;
379 
380  rcu_read_lock();
381  list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
382  avail += atomic_read(&chunk->avail);
383  rcu_read_unlock();
384  return avail;
385 }
387 
394 size_t gen_pool_size(struct gen_pool *pool)
395 {
396  struct gen_pool_chunk *chunk;
397  size_t size = 0;
398 
399  rcu_read_lock();
400  list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
401  size += chunk->end_addr - chunk->start_addr;
402  rcu_read_unlock();
403  return size;
404 }
406 
417 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
418 {
419  rcu_read_lock();
420 
421  pool->algo = algo;
422  if (!pool->algo)
423  pool->algo = gen_pool_first_fit;
424 
425  pool->data = data;
426 
427  rcu_read_unlock();
428 }
430 
440 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
441  unsigned long start, unsigned int nr, void *data)
442 {
443  return bitmap_find_next_zero_area(map, size, start, nr, 0);
444 }
446 
459 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
460  unsigned long start, unsigned int nr, void *data)
461 {
462  unsigned long start_bit = size;
463  unsigned long len = size + 1;
464  unsigned long index;
465 
466  index = bitmap_find_next_zero_area(map, size, start, nr, 0);
467 
468  while (index < size) {
469  int next_bit = find_next_bit(map, size, index + nr);
470  if ((next_bit - index) < len) {
471  len = next_bit - index;
472  start_bit = index;
473  if (len == nr)
474  return start_bit;
475  }
476  index = bitmap_find_next_zero_area(map, size,
477  next_bit + 1, nr, 0);
478  }
479 
480  return start_bit;
481 }