Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
alloc.c
Go to the documentation of this file.
1 /*
2  * alloc.c - NILFS dat/inode allocator
3  *
4  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19  *
20  * Original code was written by Koji Sato <[email protected]>.
21  * Two allocators were unified by Ryusuke Konishi <[email protected]>,
22  * Amagai Yoshiji <[email protected]>.
23  */
24 
25 #include <linux/types.h>
26 #include <linux/buffer_head.h>
27 #include <linux/fs.h>
28 #include <linux/bitops.h>
29 #include <linux/slab.h>
30 #include "mdt.h"
31 #include "alloc.h"
32 
33 
39 static inline unsigned long
40 nilfs_palloc_groups_per_desc_block(const struct inode *inode)
41 {
42  return (1UL << inode->i_blkbits) /
43  sizeof(struct nilfs_palloc_group_desc);
44 }
45 
50 static inline unsigned long
51 nilfs_palloc_groups_count(const struct inode *inode)
52 {
53  return 1UL << (BITS_PER_LONG - (inode->i_blkbits + 3 /* log2(8) */));
54 }
55 
61 int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size)
62 {
63  struct nilfs_mdt_info *mi = NILFS_MDT(inode);
64 
65  mi->mi_bgl = kmalloc(sizeof(*mi->mi_bgl), GFP_NOFS);
66  if (!mi->mi_bgl)
67  return -ENOMEM;
68 
69  bgl_lock_init(mi->mi_bgl);
70 
71  nilfs_mdt_set_entry_size(inode, entry_size, 0);
72 
74  DIV_ROUND_UP(nilfs_palloc_entries_per_group(inode),
75  mi->mi_entries_per_block) + 1;
76  /* Number of blocks in a group including entry blocks and
77  a bitmap block */
79  nilfs_palloc_groups_per_desc_block(inode) *
80  mi->mi_blocks_per_group + 1;
81  /* Number of blocks per descriptor including the
82  descriptor block */
83  return 0;
84 }
85 
92 static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
93  unsigned long *offset)
94 {
95  __u64 group = nr;
96 
97  *offset = do_div(group, nilfs_palloc_entries_per_group(inode));
98  return group;
99 }
100 
109 static unsigned long
110 nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
111 {
112  unsigned long desc_block =
113  group / nilfs_palloc_groups_per_desc_block(inode);
114  return desc_block * NILFS_MDT(inode)->mi_blocks_per_desc_block;
115 }
116 
125 static unsigned long
126 nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
127 {
128  unsigned long desc_offset =
129  group % nilfs_palloc_groups_per_desc_block(inode);
130  return nilfs_palloc_desc_blkoff(inode, group) + 1 +
131  desc_offset * NILFS_MDT(inode)->mi_blocks_per_group;
132 }
133 
140 static unsigned long
141 nilfs_palloc_group_desc_nfrees(struct inode *inode, unsigned long group,
142  const struct nilfs_palloc_group_desc *desc)
143 {
144  unsigned long nfree;
145 
146  spin_lock(nilfs_mdt_bgl_lock(inode, group));
147  nfree = le32_to_cpu(desc->pg_nfrees);
148  spin_unlock(nilfs_mdt_bgl_lock(inode, group));
149  return nfree;
150 }
151 
159 static void
160 nilfs_palloc_group_desc_add_entries(struct inode *inode,
161  unsigned long group,
162  struct nilfs_palloc_group_desc *desc,
163  u32 n)
164 {
165  spin_lock(nilfs_mdt_bgl_lock(inode, group));
166  le32_add_cpu(&desc->pg_nfrees, n);
167  spin_unlock(nilfs_mdt_bgl_lock(inode, group));
168 }
169 
175 static unsigned long
176 nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
177 {
178  unsigned long group, group_offset;
179 
180  group = nilfs_palloc_group(inode, nr, &group_offset);
181 
182  return nilfs_palloc_bitmap_blkoff(inode, group) + 1 +
183  group_offset / NILFS_MDT(inode)->mi_entries_per_block;
184 }
185 
192 static void nilfs_palloc_desc_block_init(struct inode *inode,
193  struct buffer_head *bh, void *kaddr)
194 {
195  struct nilfs_palloc_group_desc *desc = kaddr + bh_offset(bh);
196  unsigned long n = nilfs_palloc_groups_per_desc_block(inode);
197  __le32 nfrees;
198 
199  nfrees = cpu_to_le32(nilfs_palloc_entries_per_group(inode));
200  while (n-- > 0) {
201  desc->pg_nfrees = nfrees;
202  desc++;
203  }
204 }
205 
206 static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
207  int create,
208  void (*init_block)(struct inode *,
209  struct buffer_head *,
210  void *),
211  struct buffer_head **bhp,
212  struct nilfs_bh_assoc *prev,
213  spinlock_t *lock)
214 {
215  int ret;
216 
217  spin_lock(lock);
218  if (prev->bh && blkoff == prev->blkoff) {
219  get_bh(prev->bh);
220  *bhp = prev->bh;
221  spin_unlock(lock);
222  return 0;
223  }
224  spin_unlock(lock);
225 
226  ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
227  if (!ret) {
228  spin_lock(lock);
229  /*
230  * The following code must be safe for change of the
231  * cache contents during the get block call.
232  */
233  brelse(prev->bh);
234  get_bh(*bhp);
235  prev->bh = *bhp;
236  prev->blkoff = blkoff;
237  spin_unlock(lock);
238  }
239  return ret;
240 }
241 
249 static int nilfs_palloc_get_desc_block(struct inode *inode,
250  unsigned long group,
251  int create, struct buffer_head **bhp)
252 {
253  struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
254 
255  return nilfs_palloc_get_block(inode,
256  nilfs_palloc_desc_blkoff(inode, group),
257  create, nilfs_palloc_desc_block_init,
258  bhp, &cache->prev_desc, &cache->lock);
259 }
260 
268 static int nilfs_palloc_get_bitmap_block(struct inode *inode,
269  unsigned long group,
270  int create, struct buffer_head **bhp)
271 {
272  struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
273 
274  return nilfs_palloc_get_block(inode,
275  nilfs_palloc_bitmap_blkoff(inode, group),
276  create, NULL, bhp,
277  &cache->prev_bitmap, &cache->lock);
278 }
279 
287 int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
288  int create, struct buffer_head **bhp)
289 {
290  struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
291 
292  return nilfs_palloc_get_block(inode,
293  nilfs_palloc_entry_blkoff(inode, nr),
294  create, NULL, bhp,
295  &cache->prev_entry, &cache->lock);
296 }
297 
305 static struct nilfs_palloc_group_desc *
306 nilfs_palloc_block_get_group_desc(const struct inode *inode,
307  unsigned long group,
308  const struct buffer_head *bh, void *kaddr)
309 {
310  return (struct nilfs_palloc_group_desc *)(kaddr + bh_offset(bh)) +
311  group % nilfs_palloc_groups_per_desc_block(inode);
312 }
313 
321 void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
322  const struct buffer_head *bh, void *kaddr)
323 {
324  unsigned long entry_offset, group_offset;
325 
326  nilfs_palloc_group(inode, nr, &group_offset);
327  entry_offset = group_offset % NILFS_MDT(inode)->mi_entries_per_block;
328 
329  return kaddr + bh_offset(bh) +
330  entry_offset * NILFS_MDT(inode)->mi_entry_size;
331 }
332 
341 static int nilfs_palloc_find_available_slot(struct inode *inode,
342  unsigned long group,
343  unsigned long target,
344  unsigned char *bitmap,
345  int bsize)
346 {
347  int curr, pos, end, i;
348 
349  if (target > 0) {
350  end = (target + BITS_PER_LONG - 1) & ~(BITS_PER_LONG - 1);
351  if (end > bsize)
352  end = bsize;
353  pos = nilfs_find_next_zero_bit(bitmap, end, target);
354  if (pos < end &&
356  nilfs_mdt_bgl_lock(inode, group), pos, bitmap))
357  return pos;
358  } else
359  end = 0;
360 
361  for (i = 0, curr = end;
362  i < bsize;
363  i += BITS_PER_LONG, curr += BITS_PER_LONG) {
364  /* wrap around */
365  if (curr >= bsize)
366  curr = 0;
367  while (*((unsigned long *)bitmap + curr / BITS_PER_LONG)
368  != ~0UL) {
369  end = curr + BITS_PER_LONG;
370  if (end > bsize)
371  end = bsize;
372  pos = nilfs_find_next_zero_bit(bitmap, end, curr);
373  if ((pos < end) &&
375  nilfs_mdt_bgl_lock(inode, group), pos,
376  bitmap))
377  return pos;
378  }
379  }
380  return -ENOSPC;
381 }
382 
390 static unsigned long
391 nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
392  unsigned long curr, unsigned long max)
393 {
394  return min_t(unsigned long,
395  nilfs_palloc_groups_per_desc_block(inode) -
396  curr % nilfs_palloc_groups_per_desc_block(inode),
397  max - curr + 1);
398 }
399 
405 int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
406  struct nilfs_palloc_req *req)
407 {
408  struct buffer_head *desc_bh, *bitmap_bh;
409  struct nilfs_palloc_group_desc *desc;
410  unsigned char *bitmap;
411  void *desc_kaddr, *bitmap_kaddr;
412  unsigned long group, maxgroup, ngroups;
413  unsigned long group_offset, maxgroup_offset;
414  unsigned long n, entries_per_group, groups_per_desc_block;
415  unsigned long i, j;
416  int pos, ret;
417 
418  ngroups = nilfs_palloc_groups_count(inode);
419  maxgroup = ngroups - 1;
420  group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
421  entries_per_group = nilfs_palloc_entries_per_group(inode);
422  groups_per_desc_block = nilfs_palloc_groups_per_desc_block(inode);
423 
424  for (i = 0; i < ngroups; i += n) {
425  if (group >= ngroups) {
426  /* wrap around */
427  group = 0;
428  maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
429  &maxgroup_offset) - 1;
430  }
431  ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
432  if (ret < 0)
433  return ret;
434  desc_kaddr = kmap(desc_bh->b_page);
435  desc = nilfs_palloc_block_get_group_desc(
436  inode, group, desc_bh, desc_kaddr);
437  n = nilfs_palloc_rest_groups_in_desc_block(inode, group,
438  maxgroup);
439  for (j = 0; j < n; j++, desc++, group++) {
440  if (nilfs_palloc_group_desc_nfrees(inode, group, desc)
441  > 0) {
442  ret = nilfs_palloc_get_bitmap_block(
443  inode, group, 1, &bitmap_bh);
444  if (ret < 0)
445  goto out_desc;
446  bitmap_kaddr = kmap(bitmap_bh->b_page);
447  bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
448  pos = nilfs_palloc_find_available_slot(
449  inode, group, group_offset, bitmap,
450  entries_per_group);
451  if (pos >= 0) {
452  /* found a free entry */
453  nilfs_palloc_group_desc_add_entries(
454  inode, group, desc, -1);
455  req->pr_entry_nr =
456  entries_per_group * group + pos;
457  kunmap(desc_bh->b_page);
458  kunmap(bitmap_bh->b_page);
459 
460  req->pr_desc_bh = desc_bh;
461  req->pr_bitmap_bh = bitmap_bh;
462  return 0;
463  }
464  kunmap(bitmap_bh->b_page);
465  brelse(bitmap_bh);
466  }
467 
468  group_offset = 0;
469  }
470 
471  kunmap(desc_bh->b_page);
472  brelse(desc_bh);
473  }
474 
475  /* no entries left */
476  return -ENOSPC;
477 
478  out_desc:
479  kunmap(desc_bh->b_page);
480  brelse(desc_bh);
481  return ret;
482 }
483 
489 void nilfs_palloc_commit_alloc_entry(struct inode *inode,
490  struct nilfs_palloc_req *req)
491 {
494  nilfs_mdt_mark_dirty(inode);
495 
496  brelse(req->pr_bitmap_bh);
497  brelse(req->pr_desc_bh);
498 }
499 
505 void nilfs_palloc_commit_free_entry(struct inode *inode,
506  struct nilfs_palloc_req *req)
507 {
508  struct nilfs_palloc_group_desc *desc;
509  unsigned long group, group_offset;
510  unsigned char *bitmap;
511  void *desc_kaddr, *bitmap_kaddr;
512 
513  group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
514  desc_kaddr = kmap(req->pr_desc_bh->b_page);
515  desc = nilfs_palloc_block_get_group_desc(inode, group,
516  req->pr_desc_bh, desc_kaddr);
517  bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
518  bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
519 
520  if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
521  group_offset, bitmap))
522  printk(KERN_WARNING "%s: entry number %llu already freed\n",
523  __func__, (unsigned long long)req->pr_entry_nr);
524  else
525  nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
526 
527  kunmap(req->pr_bitmap_bh->b_page);
528  kunmap(req->pr_desc_bh->b_page);
529 
532  nilfs_mdt_mark_dirty(inode);
533 
534  brelse(req->pr_bitmap_bh);
535  brelse(req->pr_desc_bh);
536 }
537 
543 void nilfs_palloc_abort_alloc_entry(struct inode *inode,
544  struct nilfs_palloc_req *req)
545 {
546  struct nilfs_palloc_group_desc *desc;
547  void *desc_kaddr, *bitmap_kaddr;
548  unsigned char *bitmap;
549  unsigned long group, group_offset;
550 
551  group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
552  desc_kaddr = kmap(req->pr_desc_bh->b_page);
553  desc = nilfs_palloc_block_get_group_desc(inode, group,
554  req->pr_desc_bh, desc_kaddr);
555  bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
556  bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
557  if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
558  group_offset, bitmap))
559  printk(KERN_WARNING "%s: entry number %llu already freed\n",
560  __func__, (unsigned long long)req->pr_entry_nr);
561  else
562  nilfs_palloc_group_desc_add_entries(inode, group, desc, 1);
563 
564  kunmap(req->pr_bitmap_bh->b_page);
565  kunmap(req->pr_desc_bh->b_page);
566 
567  brelse(req->pr_bitmap_bh);
568  brelse(req->pr_desc_bh);
569 
570  req->pr_entry_nr = 0;
571  req->pr_bitmap_bh = NULL;
572  req->pr_desc_bh = NULL;
573 }
574 
580 int nilfs_palloc_prepare_free_entry(struct inode *inode,
581  struct nilfs_palloc_req *req)
582 {
583  struct buffer_head *desc_bh, *bitmap_bh;
584  unsigned long group, group_offset;
585  int ret;
586 
587  group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
588  ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
589  if (ret < 0)
590  return ret;
591  ret = nilfs_palloc_get_bitmap_block(inode, group, 1, &bitmap_bh);
592  if (ret < 0) {
593  brelse(desc_bh);
594  return ret;
595  }
596 
597  req->pr_desc_bh = desc_bh;
598  req->pr_bitmap_bh = bitmap_bh;
599  return 0;
600 }
601 
607 void nilfs_palloc_abort_free_entry(struct inode *inode,
608  struct nilfs_palloc_req *req)
609 {
610  brelse(req->pr_bitmap_bh);
611  brelse(req->pr_desc_bh);
612 
613  req->pr_entry_nr = 0;
614  req->pr_bitmap_bh = NULL;
615  req->pr_desc_bh = NULL;
616 }
617 
624 static int
625 nilfs_palloc_group_is_in(struct inode *inode, unsigned long group, __u64 nr)
626 {
627  __u64 first, last;
628 
629  first = group * nilfs_palloc_entries_per_group(inode);
630  last = first + nilfs_palloc_entries_per_group(inode) - 1;
631  return (nr >= first) && (nr <= last);
632 }
633 
640 int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
641 {
642  struct buffer_head *desc_bh, *bitmap_bh;
643  struct nilfs_palloc_group_desc *desc;
644  unsigned char *bitmap;
645  void *desc_kaddr, *bitmap_kaddr;
646  unsigned long group, group_offset;
647  int i, j, n, ret;
648 
649  for (i = 0; i < nitems; i = j) {
650  group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset);
651  ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh);
652  if (ret < 0)
653  return ret;
654  ret = nilfs_palloc_get_bitmap_block(inode, group, 0,
655  &bitmap_bh);
656  if (ret < 0) {
657  brelse(desc_bh);
658  return ret;
659  }
660  desc_kaddr = kmap(desc_bh->b_page);
661  desc = nilfs_palloc_block_get_group_desc(
662  inode, group, desc_bh, desc_kaddr);
663  bitmap_kaddr = kmap(bitmap_bh->b_page);
664  bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
665  for (j = i, n = 0;
666  (j < nitems) && nilfs_palloc_group_is_in(inode, group,
667  entry_nrs[j]);
668  j++) {
669  nilfs_palloc_group(inode, entry_nrs[j], &group_offset);
671  nilfs_mdt_bgl_lock(inode, group),
672  group_offset, bitmap)) {
674  "%s: entry number %llu already freed\n",
675  __func__,
676  (unsigned long long)entry_nrs[j]);
677  } else {
678  n++;
679  }
680  }
681  nilfs_palloc_group_desc_add_entries(inode, group, desc, n);
682 
683  kunmap(bitmap_bh->b_page);
684  kunmap(desc_bh->b_page);
685 
686  mark_buffer_dirty(desc_bh);
687  mark_buffer_dirty(bitmap_bh);
688  nilfs_mdt_mark_dirty(inode);
689 
690  brelse(bitmap_bh);
691  brelse(desc_bh);
692  }
693  return 0;
694 }
695 
696 void nilfs_palloc_setup_cache(struct inode *inode,
697  struct nilfs_palloc_cache *cache)
698 {
699  NILFS_MDT(inode)->mi_palloc_cache = cache;
700  spin_lock_init(&cache->lock);
701 }
702 
703 void nilfs_palloc_clear_cache(struct inode *inode)
704 {
705  struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
706 
707  spin_lock(&cache->lock);
708  brelse(cache->prev_desc.bh);
709  brelse(cache->prev_bitmap.bh);
710  brelse(cache->prev_entry.bh);
711  cache->prev_desc.bh = NULL;
712  cache->prev_bitmap.bh = NULL;
713  cache->prev_entry.bh = NULL;
714  spin_unlock(&cache->lock);
715 }
716 
717 void nilfs_palloc_destroy_cache(struct inode *inode)
718 {
720  NILFS_MDT(inode)->mi_palloc_cache = NULL;
721 }