24 #include <linux/kernel.h>
26 #include <linux/string.h>
28 #include <linux/errno.h>
52 static inline unsigned long
53 nilfs_sufile_segment_usages_per_block(
const struct inode *sufile)
55 return NILFS_MDT(sufile)->mi_entries_per_block;
59 nilfs_sufile_get_blkoff(
const struct inode *sufile,
__u64 segnum)
61 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
62 do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
63 return (
unsigned long)
t;
67 nilfs_sufile_get_offset(
const struct inode *sufile,
__u64 segnum)
69 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
70 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
74 nilfs_sufile_segment_usages_in_block(
const struct inode *sufile,
__u64 curr,
77 return min_t(
unsigned long,
78 nilfs_sufile_segment_usages_per_block(sufile) -
79 nilfs_sufile_get_offset(sufile, curr),
84 nilfs_sufile_block_get_segment_usage(
const struct inode *sufile,
__u64 segnum,
85 struct buffer_head *bh,
void *kaddr)
87 return kaddr + bh_offset(bh) +
88 nilfs_sufile_get_offset(sufile, segnum) *
89 NILFS_MDT(sufile)->mi_entry_size;
92 static inline int nilfs_sufile_get_header_block(
struct inode *sufile,
93 struct buffer_head **bhp)
99 nilfs_sufile_get_segment_usage_block(
struct inode *sufile,
__u64 segnum,
100 int create,
struct buffer_head **bhp)
103 nilfs_sufile_get_blkoff(sufile, segnum),
107 static int nilfs_sufile_delete_segment_usage_block(
struct inode *sufile,
111 nilfs_sufile_get_blkoff(sufile, segnum));
114 static void nilfs_sufile_mod_counter(
struct buffer_head *header_bh,
115 u64 ncleanadd,
u64 ndirtyadd)
121 header = kaddr + bh_offset(header_bh);
135 return NILFS_SUI(sufile)->ncleansegs;
167 int create,
size_t *ndone,
169 struct buffer_head *,
170 struct buffer_head *))
172 struct buffer_head *header_bh, *bh;
173 unsigned long blkoff, prev_blkoff;
175 size_t nerr = 0,
n = 0;
182 for (seg = segnumv; seg < segnumv + nsegs; seg++) {
183 if (
unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
185 "%s: invalid segment number: %llu\n", __func__,
186 (
unsigned long long)*seg);
195 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
200 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
206 dofunc(sufile, *seg, header_bh, bh);
208 if (++seg >= segnumv + nsegs)
210 prev_blkoff = blkoff;
211 blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
212 if (blkoff == prev_blkoff)
227 up_write(&NILFS_MDT(sufile)->mi_sem);
236 struct buffer_head *,
237 struct buffer_head *))
239 struct buffer_head *header_bh, *bh;
242 if (
unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
244 __func__, (
unsigned long long)segnum);
249 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
253 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
255 dofunc(sufile, segnum, header_bh, bh);
261 up_write(&NILFS_MDT(sufile)->mi_sem);
283 nsegs = nilfs_sufile_get_nsegments(sufile);
285 if (start <= end && end < nsegs) {
290 up_write(&NILFS_MDT(sufile)->mi_sem);
313 struct buffer_head *header_bh, *su_bh;
317 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
318 __u64 segnum, maxsegnum, last_alloc;
325 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
329 header = kaddr + bh_offset(header_bh);
334 nsegments = nilfs_sufile_get_nsegments(sufile);
336 segnum = last_alloc + 1;
340 for (cnt = 0; cnt < nsegments; cnt += nsus) {
341 if (segnum > maxsegnum) {
349 maxsegnum = last_alloc;
350 }
else if (segnum > sui->
allocmin &&
353 maxsegnum = nsegments - 1;
361 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
366 su = nilfs_sufile_block_get_segment_usage(
367 sufile, segnum, su_bh, kaddr);
369 nsus = nilfs_sufile_segment_usages_in_block(
370 sufile, segnum, maxsegnum);
371 for (j = 0; j < nsus; j++, su = (
void *)su + susz, segnum++) {
372 if (!nilfs_segment_usage_clean(su))
375 nilfs_segment_usage_set_dirty(su);
379 header = kaddr + bh_offset(header_bh);
388 nilfs_mdt_mark_dirty(sufile);
405 up_write(&NILFS_MDT(sufile)->mi_sem);
410 struct buffer_head *header_bh,
411 struct buffer_head *su_bh)
417 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
418 if (
unlikely(!nilfs_segment_usage_clean(su))) {
420 __func__, (
unsigned long long)segnum);
424 nilfs_segment_usage_set_dirty(su);
427 nilfs_sufile_mod_counter(header_bh, -1, 1);
428 NILFS_SUI(sufile)->ncleansegs--;
431 nilfs_mdt_mark_dirty(sufile);
435 struct buffer_head *header_bh,
436 struct buffer_head *su_bh)
443 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
449 clean = nilfs_segment_usage_clean(su);
450 dirty = nilfs_segment_usage_dirty(su);
458 nilfs_sufile_mod_counter(header_bh, clean ? (
u64)-1 : 0, dirty ? 0 : 1);
459 NILFS_SUI(sufile)->ncleansegs -=
clean;
462 nilfs_mdt_mark_dirty(sufile);
466 struct buffer_head *header_bh,
467 struct buffer_head *su_bh)
474 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
475 if (nilfs_segment_usage_clean(su)) {
477 __func__, (
unsigned long long)segnum);
481 WARN_ON(nilfs_segment_usage_error(su));
482 WARN_ON(!nilfs_segment_usage_dirty(su));
484 sudirty = nilfs_segment_usage_dirty(su);
485 nilfs_segment_usage_set_clean(su);
489 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (
u64)-1 : 0);
490 NILFS_SUI(sufile)->ncleansegs++;
492 nilfs_mdt_mark_dirty(sufile);
502 struct buffer_head *bh;
505 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
508 nilfs_mdt_mark_dirty(sufile);
522 unsigned long nblocks,
time_t modtime)
524 struct buffer_head *bh;
530 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
535 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
536 WARN_ON(nilfs_segment_usage_error(su));
543 nilfs_mdt_mark_dirty(sufile);
547 up_write(&NILFS_MDT(sufile)->mi_sem);
569 struct buffer_head *header_bh;
577 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
582 header = kaddr + bh_offset(header_bh);
583 sustat->
ss_nsegs = nilfs_sufile_get_nsegments(sufile);
595 up_read(&NILFS_MDT(sufile)->mi_sem);
600 struct buffer_head *header_bh,
601 struct buffer_head *su_bh)
608 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
609 if (nilfs_segment_usage_error(su)) {
613 suclean = nilfs_segment_usage_clean(su);
614 nilfs_segment_usage_set_error(su);
618 nilfs_sufile_mod_counter(header_bh, -1, 0);
619 NILFS_SUI(sufile)->ncleansegs--;
622 nilfs_mdt_mark_dirty(sufile);
642 static int nilfs_sufile_truncate_range(
struct inode *sufile,
646 struct buffer_head *header_bh;
647 struct buffer_head *su_bh;
649 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
650 unsigned long segusages_per_block;
651 unsigned long nsegs, ncleaned;
658 nsegs = nilfs_sufile_get_nsegments(sufile);
661 if (start > end || start >= nsegs)
664 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
668 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
671 for (segnum = start; segnum <=
end; segnum +=
n) {
672 n =
min_t(
unsigned long,
673 segusages_per_block -
674 nilfs_sufile_get_offset(sufile, segnum),
676 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
685 su = nilfs_sufile_block_get_segment_usage(
686 sufile, segnum, su_bh, kaddr);
688 for (j = 0; j <
n; j++, su = (
void *)su + susz) {
691 nilfs_segment_is_active(nilfs, segnum + j)) {
699 for (su = su2, j = 0; j <
n; j++, su = (
void *)su + susz) {
700 if (nilfs_segment_usage_error(su)) {
701 nilfs_segment_usage_set_clean(su);
712 if (n == segusages_per_block) {
714 nilfs_sufile_delete_segment_usage_block(sufile, segnum);
721 NILFS_SUI(sufile)->ncleansegs += ncleaned;
722 nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
723 nilfs_mdt_mark_dirty(sufile);
749 struct buffer_head *header_bh;
753 unsigned long nsegs, nrsvsegs;
758 nsegs = nilfs_sufile_get_nsegments(sufile);
759 if (nsegs == newnsegs)
764 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->
ncleansegs)
767 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
771 if (newnsegs > nsegs) {
774 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
782 header = kaddr + bh_offset(header_bh);
787 nilfs_mdt_mark_dirty(sufile);
793 up_write(&NILFS_MDT(sufile)->mi_sem);
815 unsigned sisz,
size_t nsi)
817 struct buffer_head *su_bh;
820 size_t susz = NILFS_MDT(sufile)->mi_entry_size;
823 unsigned long nsegs, segusages_per_block;
829 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
830 nsegs =
min_t(
unsigned long,
831 nilfs_sufile_get_nsegments(sufile) - segnum,
833 for (i = 0; i < nsegs; i +=
n, segnum +=
n) {
834 n =
min_t(
unsigned long,
835 segusages_per_block -
836 nilfs_sufile_get_offset(sufile, segnum),
838 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
845 si = (
void *)si + sisz * n;
850 su = nilfs_sufile_block_get_segment_usage(
851 sufile, segnum, su_bh, kaddr);
853 j++, su = (
void *)su + susz, si = (
void *)si + sisz) {
858 if (nilfs_segment_is_active(nilfs, segnum + j))
868 up_read(&NILFS_MDT(sufile)->mi_sem);
882 struct inode *sufile;
884 struct buffer_head *header_bh;
906 err = nilfs_sufile_get_header_block(sufile, &header_bh);
910 sui = NILFS_SUI(sufile);
912 header = kaddr + bh_offset(header_bh);
917 sui->
allocmax = nilfs_sufile_get_nsegments(sufile) - 1;