Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sufile.c
Go to the documentation of this file.
1 /*
2  * sufile.c - NILFS segment usage file.
3  *
4  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19  *
20  * Written by Koji Sato <[email protected]>.
21  * Revised by Ryusuke Konishi <[email protected]>.
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/string.h>
27 #include <linux/buffer_head.h>
28 #include <linux/errno.h>
29 #include <linux/nilfs2_fs.h>
30 #include "mdt.h"
31 #include "sufile.h"
32 
42  unsigned long ncleansegs;/* number of clean segments */
43  __u64 allocmin; /* lower limit of allocatable segment range */
44  __u64 allocmax; /* upper limit of allocatable segment range */
45 };
46 
47 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
48 {
49  return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
50 }
51 
52 static inline unsigned long
53 nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
54 {
55  return NILFS_MDT(sufile)->mi_entries_per_block;
56 }
57 
58 static unsigned long
59 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
60 {
61  __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
62  do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
63  return (unsigned long)t;
64 }
65 
66 static unsigned long
67 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
68 {
69  __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
70  return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
71 }
72 
73 static unsigned long
74 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
75  __u64 max)
76 {
77  return min_t(unsigned long,
78  nilfs_sufile_segment_usages_per_block(sufile) -
79  nilfs_sufile_get_offset(sufile, curr),
80  max - curr + 1);
81 }
82 
83 static struct nilfs_segment_usage *
84 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
85  struct buffer_head *bh, void *kaddr)
86 {
87  return kaddr + bh_offset(bh) +
88  nilfs_sufile_get_offset(sufile, segnum) *
89  NILFS_MDT(sufile)->mi_entry_size;
90 }
91 
92 static inline int nilfs_sufile_get_header_block(struct inode *sufile,
93  struct buffer_head **bhp)
94 {
95  return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
96 }
97 
98 static inline int
99 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
100  int create, struct buffer_head **bhp)
101 {
102  return nilfs_mdt_get_block(sufile,
103  nilfs_sufile_get_blkoff(sufile, segnum),
104  create, NULL, bhp);
105 }
106 
107 static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
108  __u64 segnum)
109 {
110  return nilfs_mdt_delete_block(sufile,
111  nilfs_sufile_get_blkoff(sufile, segnum));
112 }
113 
114 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
115  u64 ncleanadd, u64 ndirtyadd)
116 {
117  struct nilfs_sufile_header *header;
118  void *kaddr;
119 
120  kaddr = kmap_atomic(header_bh->b_page);
121  header = kaddr + bh_offset(header_bh);
122  le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
123  le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
124  kunmap_atomic(kaddr);
125 
126  mark_buffer_dirty(header_bh);
127 }
128 
133 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
134 {
135  return NILFS_SUI(sufile)->ncleansegs;
136 }
137 
166 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
167  int create, size_t *ndone,
168  void (*dofunc)(struct inode *, __u64,
169  struct buffer_head *,
170  struct buffer_head *))
171 {
172  struct buffer_head *header_bh, *bh;
173  unsigned long blkoff, prev_blkoff;
174  __u64 *seg;
175  size_t nerr = 0, n = 0;
176  int ret = 0;
177 
178  if (unlikely(nsegs == 0))
179  goto out;
180 
181  down_write(&NILFS_MDT(sufile)->mi_sem);
182  for (seg = segnumv; seg < segnumv + nsegs; seg++) {
183  if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
185  "%s: invalid segment number: %llu\n", __func__,
186  (unsigned long long)*seg);
187  nerr++;
188  }
189  }
190  if (nerr > 0) {
191  ret = -EINVAL;
192  goto out_sem;
193  }
194 
195  ret = nilfs_sufile_get_header_block(sufile, &header_bh);
196  if (ret < 0)
197  goto out_sem;
198 
199  seg = segnumv;
200  blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
201  ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
202  if (ret < 0)
203  goto out_header;
204 
205  for (;;) {
206  dofunc(sufile, *seg, header_bh, bh);
207 
208  if (++seg >= segnumv + nsegs)
209  break;
210  prev_blkoff = blkoff;
211  blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
212  if (blkoff == prev_blkoff)
213  continue;
214 
215  /* get different block */
216  brelse(bh);
217  ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
218  if (unlikely(ret < 0))
219  goto out_header;
220  }
221  brelse(bh);
222 
223  out_header:
224  n = seg - segnumv;
225  brelse(header_bh);
226  out_sem:
227  up_write(&NILFS_MDT(sufile)->mi_sem);
228  out:
229  if (ndone)
230  *ndone = n;
231  return ret;
232 }
233 
234 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
235  void (*dofunc)(struct inode *, __u64,
236  struct buffer_head *,
237  struct buffer_head *))
238 {
239  struct buffer_head *header_bh, *bh;
240  int ret;
241 
242  if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
243  printk(KERN_WARNING "%s: invalid segment number: %llu\n",
244  __func__, (unsigned long long)segnum);
245  return -EINVAL;
246  }
247  down_write(&NILFS_MDT(sufile)->mi_sem);
248 
249  ret = nilfs_sufile_get_header_block(sufile, &header_bh);
250  if (ret < 0)
251  goto out_sem;
252 
253  ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
254  if (!ret) {
255  dofunc(sufile, segnum, header_bh, bh);
256  brelse(bh);
257  }
258  brelse(header_bh);
259 
260  out_sem:
261  up_write(&NILFS_MDT(sufile)->mi_sem);
262  return ret;
263 }
264 
277 {
278  struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
279  __u64 nsegs;
280  int ret = -ERANGE;
281 
282  down_write(&NILFS_MDT(sufile)->mi_sem);
283  nsegs = nilfs_sufile_get_nsegments(sufile);
284 
285  if (start <= end && end < nsegs) {
286  sui->allocmin = start;
287  sui->allocmax = end;
288  ret = 0;
289  }
290  up_write(&NILFS_MDT(sufile)->mi_sem);
291  return ret;
292 }
293 
311 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
312 {
313  struct buffer_head *header_bh, *su_bh;
314  struct nilfs_sufile_header *header;
315  struct nilfs_segment_usage *su;
316  struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
317  size_t susz = NILFS_MDT(sufile)->mi_entry_size;
318  __u64 segnum, maxsegnum, last_alloc;
319  void *kaddr;
320  unsigned long nsegments, ncleansegs, nsus, cnt;
321  int ret, j;
322 
323  down_write(&NILFS_MDT(sufile)->mi_sem);
324 
325  ret = nilfs_sufile_get_header_block(sufile, &header_bh);
326  if (ret < 0)
327  goto out_sem;
328  kaddr = kmap_atomic(header_bh->b_page);
329  header = kaddr + bh_offset(header_bh);
330  ncleansegs = le64_to_cpu(header->sh_ncleansegs);
331  last_alloc = le64_to_cpu(header->sh_last_alloc);
332  kunmap_atomic(kaddr);
333 
334  nsegments = nilfs_sufile_get_nsegments(sufile);
335  maxsegnum = sui->allocmax;
336  segnum = last_alloc + 1;
337  if (segnum < sui->allocmin || segnum > sui->allocmax)
338  segnum = sui->allocmin;
339 
340  for (cnt = 0; cnt < nsegments; cnt += nsus) {
341  if (segnum > maxsegnum) {
342  if (cnt < sui->allocmax - sui->allocmin + 1) {
343  /*
344  * wrap around in the limited region.
345  * if allocation started from
346  * sui->allocmin, this never happens.
347  */
348  segnum = sui->allocmin;
349  maxsegnum = last_alloc;
350  } else if (segnum > sui->allocmin &&
351  sui->allocmax + 1 < nsegments) {
352  segnum = sui->allocmax + 1;
353  maxsegnum = nsegments - 1;
354  } else if (sui->allocmin > 0) {
355  segnum = 0;
356  maxsegnum = sui->allocmin - 1;
357  } else {
358  break; /* never happens */
359  }
360  }
361  ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
362  &su_bh);
363  if (ret < 0)
364  goto out_header;
365  kaddr = kmap_atomic(su_bh->b_page);
366  su = nilfs_sufile_block_get_segment_usage(
367  sufile, segnum, su_bh, kaddr);
368 
369  nsus = nilfs_sufile_segment_usages_in_block(
370  sufile, segnum, maxsegnum);
371  for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
372  if (!nilfs_segment_usage_clean(su))
373  continue;
374  /* found a clean segment */
375  nilfs_segment_usage_set_dirty(su);
376  kunmap_atomic(kaddr);
377 
378  kaddr = kmap_atomic(header_bh->b_page);
379  header = kaddr + bh_offset(header_bh);
380  le64_add_cpu(&header->sh_ncleansegs, -1);
381  le64_add_cpu(&header->sh_ndirtysegs, 1);
382  header->sh_last_alloc = cpu_to_le64(segnum);
383  kunmap_atomic(kaddr);
384 
385  sui->ncleansegs--;
386  mark_buffer_dirty(header_bh);
387  mark_buffer_dirty(su_bh);
388  nilfs_mdt_mark_dirty(sufile);
389  brelse(su_bh);
390  *segnump = segnum;
391  goto out_header;
392  }
393 
394  kunmap_atomic(kaddr);
395  brelse(su_bh);
396  }
397 
398  /* no segments left */
399  ret = -ENOSPC;
400 
401  out_header:
402  brelse(header_bh);
403 
404  out_sem:
405  up_write(&NILFS_MDT(sufile)->mi_sem);
406  return ret;
407 }
408 
409 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
410  struct buffer_head *header_bh,
411  struct buffer_head *su_bh)
412 {
413  struct nilfs_segment_usage *su;
414  void *kaddr;
415 
416  kaddr = kmap_atomic(su_bh->b_page);
417  su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
418  if (unlikely(!nilfs_segment_usage_clean(su))) {
419  printk(KERN_WARNING "%s: segment %llu must be clean\n",
420  __func__, (unsigned long long)segnum);
421  kunmap_atomic(kaddr);
422  return;
423  }
424  nilfs_segment_usage_set_dirty(su);
425  kunmap_atomic(kaddr);
426 
427  nilfs_sufile_mod_counter(header_bh, -1, 1);
428  NILFS_SUI(sufile)->ncleansegs--;
429 
430  mark_buffer_dirty(su_bh);
431  nilfs_mdt_mark_dirty(sufile);
432 }
433 
434 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
435  struct buffer_head *header_bh,
436  struct buffer_head *su_bh)
437 {
438  struct nilfs_segment_usage *su;
439  void *kaddr;
440  int clean, dirty;
441 
442  kaddr = kmap_atomic(su_bh->b_page);
443  su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
445  su->su_nblocks == cpu_to_le32(0)) {
446  kunmap_atomic(kaddr);
447  return;
448  }
449  clean = nilfs_segment_usage_clean(su);
450  dirty = nilfs_segment_usage_dirty(su);
451 
452  /* make the segment garbage */
453  su->su_lastmod = cpu_to_le64(0);
454  su->su_nblocks = cpu_to_le32(0);
456  kunmap_atomic(kaddr);
457 
458  nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
459  NILFS_SUI(sufile)->ncleansegs -= clean;
460 
461  mark_buffer_dirty(su_bh);
462  nilfs_mdt_mark_dirty(sufile);
463 }
464 
465 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
466  struct buffer_head *header_bh,
467  struct buffer_head *su_bh)
468 {
469  struct nilfs_segment_usage *su;
470  void *kaddr;
471  int sudirty;
472 
473  kaddr = kmap_atomic(su_bh->b_page);
474  su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
475  if (nilfs_segment_usage_clean(su)) {
476  printk(KERN_WARNING "%s: segment %llu is already clean\n",
477  __func__, (unsigned long long)segnum);
478  kunmap_atomic(kaddr);
479  return;
480  }
481  WARN_ON(nilfs_segment_usage_error(su));
482  WARN_ON(!nilfs_segment_usage_dirty(su));
483 
484  sudirty = nilfs_segment_usage_dirty(su);
485  nilfs_segment_usage_set_clean(su);
486  kunmap_atomic(kaddr);
487  mark_buffer_dirty(su_bh);
488 
489  nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
490  NILFS_SUI(sufile)->ncleansegs++;
491 
492  nilfs_mdt_mark_dirty(sufile);
493 }
494 
500 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
501 {
502  struct buffer_head *bh;
503  int ret;
504 
505  ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
506  if (!ret) {
507  mark_buffer_dirty(bh);
508  nilfs_mdt_mark_dirty(sufile);
509  brelse(bh);
510  }
511  return ret;
512 }
513 
521 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
522  unsigned long nblocks, time_t modtime)
523 {
524  struct buffer_head *bh;
525  struct nilfs_segment_usage *su;
526  void *kaddr;
527  int ret;
528 
529  down_write(&NILFS_MDT(sufile)->mi_sem);
530  ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
531  if (ret < 0)
532  goto out_sem;
533 
534  kaddr = kmap_atomic(bh->b_page);
535  su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
536  WARN_ON(nilfs_segment_usage_error(su));
537  if (modtime)
538  su->su_lastmod = cpu_to_le64(modtime);
539  su->su_nblocks = cpu_to_le32(nblocks);
540  kunmap_atomic(kaddr);
541 
542  mark_buffer_dirty(bh);
543  nilfs_mdt_mark_dirty(sufile);
544  brelse(bh);
545 
546  out_sem:
547  up_write(&NILFS_MDT(sufile)->mi_sem);
548  return ret;
549 }
550 
567 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
568 {
569  struct buffer_head *header_bh;
570  struct nilfs_sufile_header *header;
571  struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
572  void *kaddr;
573  int ret;
574 
575  down_read(&NILFS_MDT(sufile)->mi_sem);
576 
577  ret = nilfs_sufile_get_header_block(sufile, &header_bh);
578  if (ret < 0)
579  goto out_sem;
580 
581  kaddr = kmap_atomic(header_bh->b_page);
582  header = kaddr + bh_offset(header_bh);
583  sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
584  sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
585  sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
586  sustat->ss_ctime = nilfs->ns_ctime;
587  sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
588  spin_lock(&nilfs->ns_last_segment_lock);
589  sustat->ss_prot_seq = nilfs->ns_prot_seq;
590  spin_unlock(&nilfs->ns_last_segment_lock);
591  kunmap_atomic(kaddr);
592  brelse(header_bh);
593 
594  out_sem:
595  up_read(&NILFS_MDT(sufile)->mi_sem);
596  return ret;
597 }
598 
599 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
600  struct buffer_head *header_bh,
601  struct buffer_head *su_bh)
602 {
603  struct nilfs_segment_usage *su;
604  void *kaddr;
605  int suclean;
606 
607  kaddr = kmap_atomic(su_bh->b_page);
608  su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
609  if (nilfs_segment_usage_error(su)) {
610  kunmap_atomic(kaddr);
611  return;
612  }
613  suclean = nilfs_segment_usage_clean(su);
614  nilfs_segment_usage_set_error(su);
615  kunmap_atomic(kaddr);
616 
617  if (suclean) {
618  nilfs_sufile_mod_counter(header_bh, -1, 0);
619  NILFS_SUI(sufile)->ncleansegs--;
620  }
621  mark_buffer_dirty(su_bh);
622  nilfs_mdt_mark_dirty(sufile);
623 }
624 
642 static int nilfs_sufile_truncate_range(struct inode *sufile,
643  __u64 start, __u64 end)
644 {
645  struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
646  struct buffer_head *header_bh;
647  struct buffer_head *su_bh;
648  struct nilfs_segment_usage *su, *su2;
649  size_t susz = NILFS_MDT(sufile)->mi_entry_size;
650  unsigned long segusages_per_block;
651  unsigned long nsegs, ncleaned;
652  __u64 segnum;
653  void *kaddr;
654  ssize_t n, nc;
655  int ret;
656  int j;
657 
658  nsegs = nilfs_sufile_get_nsegments(sufile);
659 
660  ret = -EINVAL;
661  if (start > end || start >= nsegs)
662  goto out;
663 
664  ret = nilfs_sufile_get_header_block(sufile, &header_bh);
665  if (ret < 0)
666  goto out;
667 
668  segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
669  ncleaned = 0;
670 
671  for (segnum = start; segnum <= end; segnum += n) {
672  n = min_t(unsigned long,
673  segusages_per_block -
674  nilfs_sufile_get_offset(sufile, segnum),
675  end - segnum + 1);
676  ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
677  &su_bh);
678  if (ret < 0) {
679  if (ret != -ENOENT)
680  goto out_header;
681  /* hole */
682  continue;
683  }
684  kaddr = kmap_atomic(su_bh->b_page);
685  su = nilfs_sufile_block_get_segment_usage(
686  sufile, segnum, su_bh, kaddr);
687  su2 = su;
688  for (j = 0; j < n; j++, su = (void *)su + susz) {
689  if ((le32_to_cpu(su->su_flags) &
690  ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
691  nilfs_segment_is_active(nilfs, segnum + j)) {
692  ret = -EBUSY;
693  kunmap_atomic(kaddr);
694  brelse(su_bh);
695  goto out_header;
696  }
697  }
698  nc = 0;
699  for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
700  if (nilfs_segment_usage_error(su)) {
701  nilfs_segment_usage_set_clean(su);
702  nc++;
703  }
704  }
705  kunmap_atomic(kaddr);
706  if (nc > 0) {
707  mark_buffer_dirty(su_bh);
708  ncleaned += nc;
709  }
710  brelse(su_bh);
711 
712  if (n == segusages_per_block) {
713  /* make hole */
714  nilfs_sufile_delete_segment_usage_block(sufile, segnum);
715  }
716  }
717  ret = 0;
718 
719 out_header:
720  if (ncleaned > 0) {
721  NILFS_SUI(sufile)->ncleansegs += ncleaned;
722  nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
723  nilfs_mdt_mark_dirty(sufile);
724  }
725  brelse(header_bh);
726 out:
727  return ret;
728 }
729 
746 int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
747 {
748  struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
749  struct buffer_head *header_bh;
750  struct nilfs_sufile_header *header;
751  struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
752  void *kaddr;
753  unsigned long nsegs, nrsvsegs;
754  int ret = 0;
755 
756  down_write(&NILFS_MDT(sufile)->mi_sem);
757 
758  nsegs = nilfs_sufile_get_nsegments(sufile);
759  if (nsegs == newnsegs)
760  goto out;
761 
762  ret = -ENOSPC;
763  nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
764  if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
765  goto out;
766 
767  ret = nilfs_sufile_get_header_block(sufile, &header_bh);
768  if (ret < 0)
769  goto out;
770 
771  if (newnsegs > nsegs) {
772  sui->ncleansegs += newnsegs - nsegs;
773  } else /* newnsegs < nsegs */ {
774  ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
775  if (ret < 0)
776  goto out_header;
777 
778  sui->ncleansegs -= nsegs - newnsegs;
779  }
780 
781  kaddr = kmap_atomic(header_bh->b_page);
782  header = kaddr + bh_offset(header_bh);
783  header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
784  kunmap_atomic(kaddr);
785 
786  mark_buffer_dirty(header_bh);
787  nilfs_mdt_mark_dirty(sufile);
788  nilfs_set_nsegments(nilfs, newnsegs);
789 
790 out_header:
791  brelse(header_bh);
792 out:
793  up_write(&NILFS_MDT(sufile)->mi_sem);
794  return ret;
795 }
796 
814 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
815  unsigned sisz, size_t nsi)
816 {
817  struct buffer_head *su_bh;
818  struct nilfs_segment_usage *su;
819  struct nilfs_suinfo *si = buf;
820  size_t susz = NILFS_MDT(sufile)->mi_entry_size;
821  struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
822  void *kaddr;
823  unsigned long nsegs, segusages_per_block;
824  ssize_t n;
825  int ret, i, j;
826 
827  down_read(&NILFS_MDT(sufile)->mi_sem);
828 
829  segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
830  nsegs = min_t(unsigned long,
831  nilfs_sufile_get_nsegments(sufile) - segnum,
832  nsi);
833  for (i = 0; i < nsegs; i += n, segnum += n) {
834  n = min_t(unsigned long,
835  segusages_per_block -
836  nilfs_sufile_get_offset(sufile, segnum),
837  nsegs - i);
838  ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
839  &su_bh);
840  if (ret < 0) {
841  if (ret != -ENOENT)
842  goto out;
843  /* hole */
844  memset(si, 0, sisz * n);
845  si = (void *)si + sisz * n;
846  continue;
847  }
848 
849  kaddr = kmap_atomic(su_bh->b_page);
850  su = nilfs_sufile_block_get_segment_usage(
851  sufile, segnum, su_bh, kaddr);
852  for (j = 0; j < n;
853  j++, su = (void *)su + susz, si = (void *)si + sisz) {
854  si->sui_lastmod = le64_to_cpu(su->su_lastmod);
855  si->sui_nblocks = le32_to_cpu(su->su_nblocks);
856  si->sui_flags = le32_to_cpu(su->su_flags) &
858  if (nilfs_segment_is_active(nilfs, segnum + j))
859  si->sui_flags |=
861  }
862  kunmap_atomic(kaddr);
863  brelse(su_bh);
864  }
865  ret = nsegs;
866 
867  out:
868  up_read(&NILFS_MDT(sufile)->mi_sem);
869  return ret;
870 }
871 
879 int nilfs_sufile_read(struct super_block *sb, size_t susize,
880  struct nilfs_inode *raw_inode, struct inode **inodep)
881 {
882  struct inode *sufile;
883  struct nilfs_sufile_info *sui;
884  struct buffer_head *header_bh;
885  struct nilfs_sufile_header *header;
886  void *kaddr;
887  int err;
888 
889  sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
890  if (unlikely(!sufile))
891  return -ENOMEM;
892  if (!(sufile->i_state & I_NEW))
893  goto out;
894 
895  err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
896  if (err)
897  goto failed;
898 
899  nilfs_mdt_set_entry_size(sufile, susize,
900  sizeof(struct nilfs_sufile_header));
901 
902  err = nilfs_read_inode_common(sufile, raw_inode);
903  if (err)
904  goto failed;
905 
906  err = nilfs_sufile_get_header_block(sufile, &header_bh);
907  if (err)
908  goto failed;
909 
910  sui = NILFS_SUI(sufile);
911  kaddr = kmap_atomic(header_bh->b_page);
912  header = kaddr + bh_offset(header_bh);
913  sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
914  kunmap_atomic(kaddr);
915  brelse(header_bh);
916 
917  sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
918  sui->allocmin = 0;
919 
920  unlock_new_inode(sufile);
921  out:
922  *inodep = sufile;
923  return 0;
924  failed:
925  iget_failed(sufile);
926  return err;
927 }