22 #include <linux/module.h>
24 #include <linux/slab.h>
36 #ifdef CONFIG_JFS_STATISTICS
44 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
45 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
47 static inline void unlock_metapage(
struct metapage *
mp)
53 static inline void __lock_metapage(
struct metapage *
mp)
73 static inline void lock_metapage(
struct metapage *mp)
79 #define METAPOOL_MIN_PAGES 32
83 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
92 #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
96 if (!PagePrivate(page))
98 return mp_anchor(page)->mp[offset >>
L2PSIZE];
101 static inline int insert_metapage(
struct page *page,
struct metapage *mp)
103 struct meta_anchor *
a;
107 if (PagePrivate(page))
110 a = kzalloc(
sizeof(
struct meta_anchor),
GFP_NOFS);
113 set_page_private(page, (
unsigned long)a);
114 SetPagePrivate(page);
128 static inline void remove_metapage(
struct page *page,
struct metapage *mp)
130 struct meta_anchor *a = mp_anchor(page);
136 BUG_ON(a->mp[index] != mp);
139 if (--a->mp_count == 0) {
141 set_page_private(page, 0);
142 ClearPagePrivate(page);
147 static inline void inc_io(
struct page *page)
152 static inline void dec_io(
struct page *page,
void (*handler) (
struct page *))
159 static inline struct metapage *page_to_mp(
struct page *page,
int offset)
161 return PagePrivate(page) ? (
struct metapage *)page_private(page) :
NULL;
164 static inline int insert_metapage(
struct page *page,
struct metapage *mp)
167 set_page_private(page, (
unsigned long)mp);
168 SetPagePrivate(page);
174 static inline void remove_metapage(
struct page *page,
struct metapage *mp)
176 set_page_private(page, 0);
177 ClearPagePrivate(page);
181 #define inc_io(page) do {} while(0)
182 #define dec_io(page, handler) handler(page)
186 static void init_once(
void *
foo)
205 static inline void free_metapage(
struct metapage *mp)
220 if (metapage_cache ==
NULL)
226 if (metapage_mempool ==
NULL) {
240 static inline void drop_metapage(
struct page *page,
struct metapage *mp)
245 remove_metapage(page, mp);
263 if (lblock >= file_blocks)
265 if (lblock + *len > file_blocks)
266 *len = file_blocks - lblock;
269 rc =
xtLookup(inode, (
s64)lblock, *len, &xflag, &xaddr, len, 0);
270 if ((rc == 0) && *len)
279 static void last_read_complete(
struct page *page)
281 if (!PageError(page))
282 SetPageUptodate(page);
286 static void metapage_read_end_io(
struct bio *bio,
int err)
288 struct page *page = bio->bi_private;
290 if (!
test_bit(BIO_UPTODATE, &bio->bi_flags)) {
295 dec_io(page, last_read_complete);
299 static void remove_from_logsync(
struct metapage *mp)
321 static void last_write_complete(
struct page *page)
327 mp = page_to_mp(page, offset);
330 remove_from_logsync(mp);
341 static void metapage_write_end_io(
struct bio *bio,
int err)
343 struct page *page = bio->bi_private;
345 BUG_ON(!PagePrivate(page));
347 if (!
test_bit(BIO_UPTODATE, &bio->bi_flags)) {
351 dec_io(page, last_write_complete);
357 struct bio *bio =
NULL;
359 struct inode *inode = page->
mapping->host;
360 int blocks_per_mp = JFS_SBI(inode->
i_sb)->nbperpage;
370 unsigned long bio_bytes = 0;
371 unsigned long bio_offset = 0;
377 BUG_ON(!PageLocked(page));
378 BUG_ON(PageWriteback(page));
379 set_page_writeback(page);
382 mp = page_to_mp(page, offset);
400 block_offset = offset >> inode->
i_blkbits;
403 if (xlen && lblock == next_block) {
405 len =
min(xlen, blocks_per_mp);
427 pblock = metapage_get_blocks(inode, lblock, &xlen);
437 len =
min(xlen, (
int)JFS_SBI(inode->
i_sb)->nbperpage);
440 bio->bi_bdev = inode->
i_sb->s_bdev;
441 bio->bi_sector = pblock << (inode->
i_blkbits - 9);
442 bio->bi_end_io = metapage_write_end_io;
443 bio->bi_private =
page;
450 next_block = lblock + len;
453 if (
bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
469 if (nr_underway == 0)
479 4, bio,
sizeof(*bio), 0);
483 dec_io(page, last_write_complete);
486 dec_io(page, last_write_complete);
490 static int metapage_readpage(
struct file *
fp,
struct page *page)
492 struct inode *inode = page->
mapping->host;
493 struct bio *bio =
NULL;
495 int blocks_per_page = PAGE_CACHE_SIZE >> inode->
i_blkbits;
502 BUG_ON(!PageLocked(page));
507 while (block_offset < blocks_per_page) {
509 pblock = metapage_get_blocks(inode, page_start + block_offset,
512 if (!PagePrivate(page))
513 insert_metapage(page,
NULL);
519 bio->bi_bdev = inode->
i_sb->s_bdev;
520 bio->bi_sector = pblock << (inode->
i_blkbits - 9);
521 bio->bi_end_io = metapage_read_end_io;
522 bio->bi_private =
page;
524 offset = block_offset << inode->
i_blkbits;
527 block_offset += xlen;
541 dec_io(page, last_read_complete);
545 static int metapage_releasepage(
struct page *page,
gfp_t gfp_mask)
552 mp = page_to_mp(page, offset);
557 jfs_info(
"metapage_releasepage: mp = 0x%p", mp);
566 remove_from_logsync(mp);
567 remove_metapage(page, mp);
574 static void metapage_invalidatepage(
struct page *page,
unsigned long offset)
578 BUG_ON(PageWriteback(page));
580 metapage_releasepage(page, 0);
584 .readpage = metapage_readpage,
585 .writepage = metapage_writepage,
586 .releasepage = metapage_releasepage,
587 .invalidatepage = metapage_invalidatepage,
600 unsigned long page_index;
603 jfs_info(
"__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
604 inode->
i_ino, lblock, absolute);
608 page_index = lblock >> l2BlocksPerPage;
609 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
611 jfs_err(
"MetaData crosses page boundary!!");
612 jfs_err(
"lblock = %lx, size = %d", lblock, size);
617 mapping = JFS_SBI(inode->
i_sb)->direct_inode->i_mapping;
629 if (
new && (
PSIZE == PAGE_CACHE_SIZE)) {
630 page = grab_cache_page(mapping, page_index);
632 jfs_err(
"grab_cache_page failed!");
635 SetPageUptodate(page);
637 page = read_mapping_page(mapping, page_index,
NULL);
638 if (IS_ERR(page) || !PageUptodate(page)) {
639 jfs_err(
"read_mapping_page failed!");
645 mp = page_to_mp(page, page_offset);
649 "__get_metapage: mp->logical_size != size");
650 jfs_err(
"logical_size = %d, size = %d",
660 "__get_metapage: using a "
661 "discarded metapage");
662 discard_metapage(mp);
678 if (
unlikely(insert_metapage(page, mp))) {
691 jfs_info(
"__get_metapage: returning = 0x%p data = 0x%p", mp, mp->
data);
701 jfs_info(
"grab_metapage: mp = 0x%p", mp);
711 struct page *page = mp->
page;
712 jfs_info(
"force_metapage: mp = 0x%p", mp);
744 struct page *page = mp->
page;
745 jfs_info(
"release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->
flag);
767 remove_from_logsync(mp);
770 drop_metapage(page, mp);
780 int BlocksPerPage = 1 << l2BlocksPerPage;
783 JFS_SBI(ip->
i_sb)->direct_inode->i_mapping;
792 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
793 lblock += BlocksPerPage) {
798 mp = page_to_mp(page, offset);
801 if (mp->
index < addr)
803 if (mp->
index >= addr + len)
809 remove_from_logsync(mp);
816 #ifdef CONFIG_JFS_STATISTICS
817 static int jfs_mpstat_proc_show(
struct seq_file *
m,
void *
v)
820 "JFS Metapage statistics\n"
821 "=======================\n"
822 "page allocations = %d\n"
831 static int jfs_mpstat_proc_open(
struct inode *inode,
struct file *
file)
838 .open = jfs_mpstat_proc_open,