27 #include <linux/bitops.h>
29 #include <linux/list.h>
38 #define NILFS_BUFFER_INHERENT_BITS \
39 ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
40 (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked))
42 static struct buffer_head *
44 int blkbits,
unsigned long b_state)
47 unsigned long first_block;
48 struct buffer_head *bh;
50 if (!page_has_buffers(page))
54 bh = nilfs_page_get_nth_block(page, block - first_block);
64 unsigned long b_state)
69 struct buffer_head *bh;
71 page = grab_cache_page(mapping, index);
75 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
94 clear_buffer_nilfs_volatile(bh);
95 clear_buffer_nilfs_checked(bh);
96 clear_buffer_nilfs_redirected(bh);
97 clear_buffer_dirty(bh);
101 clear_buffer_uptodate(bh);
102 clear_buffer_mapped(bh);
104 ClearPageUptodate(page);
105 ClearPageMappedToDisk(page);
117 void *kaddr0, *kaddr1;
119 struct page *spage = sbh->b_page, *dpage = dbh->b_page;
120 struct buffer_head *bh;
124 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
129 dbh->b_blocknr = sbh->b_blocknr;
130 dbh->b_bdev = sbh->b_bdev;
133 bits = sbh->b_state & ((1
UL << BH_Uptodate) | (1
UL << BH_Mapped));
134 while ((bh = bh->b_this_page) != dbh) {
139 if (bits & (1
UL << BH_Uptodate))
140 SetPageUptodate(dpage);
142 ClearPageUptodate(dpage);
143 if (bits & (1
UL << BH_Mapped))
144 SetPageMappedToDisk(dpage);
146 ClearPageMappedToDisk(dpage);
158 struct buffer_head *bh, *
head;
160 bh = head = page_buffers(page);
162 if (buffer_dirty(bh))
164 bh = bh->b_this_page;
165 }
while (bh != head);
180 ino = m ? m->
host->i_ino : 0;
183 "mapping=%p ino=%lu\n",
185 (
unsigned long long)page->
index, page->
flags, m, ino);
187 if (page_has_buffers(page)) {
188 struct buffer_head *bh, *
head;
191 bh = head = page_buffers(page);
194 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
196 (
unsigned long long)bh->b_blocknr, bh->b_state);
197 bh = bh->b_this_page;
198 }
while (bh != head);
212 static void nilfs_copy_page(
struct page *
dst,
struct page *
src,
int copy_dirty)
214 struct buffer_head *dbh, *dbufs, *sbh, *sbufs;
217 BUG_ON(PageWriteback(dst));
219 sbh = sbufs = page_buffers(src);
220 if (!page_has_buffers(dst))
224 mask |= (1
UL << BH_Dirty);
226 dbh = dbufs = page_buffers(dst);
230 dbh->b_state = sbh->b_state &
mask;
231 dbh->b_blocknr = sbh->b_blocknr;
232 dbh->b_bdev = sbh->b_bdev;
233 sbh = sbh->b_this_page;
234 dbh = dbh->b_this_page;
235 }
while (dbh != dbufs);
237 copy_highpage(dst, src);
239 if (PageUptodate(src) && !PageUptodate(dst))
240 SetPageUptodate(dst);
241 else if (!PageUptodate(src) && PageUptodate(dst))
242 ClearPageUptodate(dst);
243 if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
244 SetPageMappedToDisk(dst);
245 else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
246 ClearPageMappedToDisk(dst);
251 sbh = sbh->b_this_page;
252 dbh = dbh->b_this_page;
253 }
while (dbh != dbufs);
264 pagevec_init(&pvec, 0);
270 for (i = 0; i < pagevec_count(&pvec); i++) {
277 dpage = grab_cache_page(dmap, page->
index);
284 if (
unlikely(!page_has_buffers(page)))
286 "found empty page in dat page cache");
288 nilfs_copy_page(dpage, page, 1);
295 pagevec_release(&pvec);
319 pagevec_init(&pvec, 0);
324 index = pvec.
pages[n - 1]->index + 1;
326 for (i = 0; i < pagevec_count(&pvec); i++) {
335 nilfs_copy_page(dpage, page, 0);
367 pagevec_release(&pvec);
379 pagevec_init(&pvec, 0);
383 for (i = 0; i < pagevec_count(&pvec); i++) {
385 struct buffer_head *bh, *
head;
388 ClearPageUptodate(page);
389 ClearPageMappedToDisk(page);
390 bh = head = page_buffers(page);
393 clear_buffer_dirty(bh);
394 clear_buffer_nilfs_volatile(bh);
395 clear_buffer_nilfs_checked(bh);
396 clear_buffer_nilfs_redirected(bh);
397 clear_buffer_uptodate(bh);
398 clear_buffer_mapped(bh);
400 bh = bh->b_this_page;
401 }
while (bh != head);
406 pagevec_release(&pvec);
412 unsigned from,
unsigned to)
414 unsigned block_start, block_end;
415 struct buffer_head *bh, *
head;
418 for (bh = head = page_buffers(page), block_start = 0;
419 bh != head || !block_start;
420 block_start = block_end, bh = bh->b_this_page) {
421 block_end = block_start + bh->b_size;
422 if (block_end > from && block_start < to && !buffer_dirty(bh))
433 mapping_set_gfp_mask(mapping,
GFP_NOFS);
466 return TestClearPageDirty(page);
487 unsigned int nblocks_in_page;
499 pagevec_init(&pvec, 0);
507 if (length > 0 && pvec.
pages[0]->index > index)
516 if (page_has_buffers(page)) {
517 struct buffer_head *bh, *
head;
519 bh = head = page_buffers(page);
523 if (buffer_delay(bh)) {
527 }
else if (length > 0) {
530 }
while (++b, bh = bh->b_this_page, bh != head);
535 b += nblocks_in_page;
539 }
while (++i < pagevec_count(&pvec));
541 index = page->
index + 1;
542 pagevec_release(&pvec);
549 pagevec_release(&pvec);