21 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
32 #include <linux/export.h>
34 #include <linux/hash.h>
41 #include <linux/bitops.h>
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
50 init_buffer(
struct buffer_head *bh, bh_end_io_t *handler,
void *
private)
52 bh->b_end_io = handler;
53 bh->b_private =
private;
57 static int sleep_on_buffer(
void *
word)
65 wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
90 __clear_page_buffers(
struct page *
page)
92 ClearPagePrivate(page);
93 set_page_private(page, 0);
98 static int quiet_error(
struct buffer_head *bh)
100 if (!
test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
106 static void buffer_io_error(
struct buffer_head *bh)
109 printk(
KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
111 (
unsigned long long)bh->b_blocknr);
122 static void __end_buffer_read_notouch(
struct buffer_head *bh,
int uptodate)
125 set_buffer_uptodate(bh);
128 clear_buffer_uptodate(bh);
139 __end_buffer_read_notouch(bh, uptodate);
149 set_buffer_uptodate(bh);
151 if (!quiet_error(bh)) {
157 set_buffer_write_io_error(bh);
158 clear_buffer_uptodate(bh);
176 static struct buffer_head *
181 struct buffer_head *
ret =
NULL;
183 struct buffer_head *bh;
184 struct buffer_head *
head;
194 if (!page_has_buffers(page))
196 head = page_buffers(page);
199 if (!buffer_mapped(bh))
201 else if (bh->b_blocknr == block) {
206 bh = bh->b_this_page;
207 }
while (bh != head);
217 printk(
"__find_get_block_slow() failed. "
218 "block=%llu, b_blocknr=%llu\n",
219 (
unsigned long long)block,
220 (
unsigned long long)bh->b_blocknr);
221 printk(
"b_state=0x%08lx, b_size=%zu\n",
222 bh->b_state, bh->b_size);
236 static void free_more_memory(
void)
245 (
void)first_zones_zonelist(node_zonelist(nid,
GFP_NOFS),
258 static void end_buffer_async_read(
struct buffer_head *bh,
int uptodate)
261 struct buffer_head *
first;
262 struct buffer_head *
tmp;
264 int page_uptodate = 1;
266 BUG_ON(!buffer_async_read(bh));
270 set_buffer_uptodate(bh);
272 clear_buffer_uptodate(bh);
273 if (!quiet_error(bh))
283 first = page_buffers(page);
285 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
286 clear_buffer_async_read(bh);
290 if (!buffer_uptodate(tmp))
292 if (buffer_async_read(tmp)) {
293 BUG_ON(!buffer_locked(tmp));
296 tmp = tmp->b_this_page;
298 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
305 if (page_uptodate && !PageError(page))
306 SetPageUptodate(page);
311 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
324 struct buffer_head *
first;
325 struct buffer_head *
tmp;
328 BUG_ON(!buffer_async_write(bh));
332 set_buffer_uptodate(bh);
334 if (!quiet_error(bh)) {
341 set_buffer_write_io_error(bh);
342 clear_buffer_uptodate(bh);
346 first = page_buffers(page);
348 bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
350 clear_buffer_async_write(bh);
352 tmp = bh->b_this_page;
354 if (buffer_async_write(tmp)) {
355 BUG_ON(!buffer_locked(tmp));
358 tmp = tmp->b_this_page;
360 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
366 bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
393 static void mark_buffer_async_read(
struct buffer_head *bh)
395 bh->b_end_io = end_buffer_async_read;
396 set_buffer_async_read(bh);
399 static void mark_buffer_async_write_endio(
struct buffer_head *bh,
400 bh_end_io_t *handler)
402 bh->b_end_io = handler;
403 set_buffer_async_write(bh);
465 static void __remove_assoc_queue(
struct buffer_head *bh)
467 list_del_init(&bh->b_assoc_buffers);
469 if (buffer_write_io_error(bh))
471 bh->b_assoc_map =
NULL;
476 return !list_empty(&inode->
i_data.private_list);
491 struct buffer_head *bh;
499 if (buffer_locked(bh)) {
503 if (!buffer_uptodate(bh))
563 return fsync_buffers_list(&buffer_mapping->
private_lock,
575 sector_t bblock,
unsigned blocksize)
579 if (buffer_dirty(bh))
596 if (!bh->b_assoc_map) {
598 list_move_tail(&bh->b_assoc_buffers,
613 static void __set_page_dirty(
struct page *page,
658 return !TestSetPageDirty(page);
661 if (page_has_buffers(page)) {
662 struct buffer_head *head = page_buffers(page);
663 struct buffer_head *bh =
head;
666 set_buffer_dirty(bh);
667 bh = bh->b_this_page;
668 }
while (bh != head);
670 newly_dirty = !TestSetPageDirty(page);
674 __set_page_dirty(page, mapping, 1);
700 struct buffer_head *bh;
706 INIT_LIST_HEAD(&tmp);
710 while (!list_empty(list)) {
712 mapping = bh->b_assoc_map;
713 __remove_assoc_queue(bh);
717 if (buffer_dirty(bh) || buffer_locked(bh)) {
718 list_add(&bh->b_assoc_buffers, &tmp);
720 if (buffer_dirty(bh)) {
748 while (!list_empty(&tmp)) {
751 mapping = bh->b_assoc_map;
752 __remove_assoc_queue(bh);
756 if (buffer_dirty(bh)) {
757 list_add(&bh->b_assoc_buffers,
763 if (!buffer_uptodate(bh))
770 err2 = osync_buffers_list(lock, list);
794 while (!list_empty(list))
817 while (!list_empty(list)) {
819 if (buffer_dirty(bh)) {
823 __remove_assoc_queue(bh);
842 struct buffer_head *bh, *
head;
848 while ((offset -= size) >= 0) {
854 bh->b_this_page =
head;
875 head = head->b_this_page;
901 link_dev_buffers(
struct page *page,
struct buffer_head *head)
903 struct buffer_head *bh, *
tail;
908 bh = bh->b_this_page;
910 tail->b_this_page =
head;
911 attach_page_buffers(page, head);
917 loff_t sz = i_size_read(bdev->
bd_inode);
920 unsigned int sizebits = blksize_bits(size);
921 retval = (sz >> sizebits);
930 init_page_buffers(
struct page *page,
struct block_device *bdev,
933 struct buffer_head *head = page_buffers(page);
934 struct buffer_head *bh =
head;
935 int uptodate = PageUptodate(page);
939 if (!buffer_mapped(bh)) {
942 bh->b_blocknr =
block;
944 set_buffer_uptodate(bh);
945 if (block < end_block)
946 set_buffer_mapped(bh);
949 bh = bh->b_this_page;
950 }
while (bh != head);
965 pgoff_t index,
int size,
int sizebits)
969 struct buffer_head *bh;
978 BUG_ON(!PageLocked(page));
980 if (page_has_buffers(page)) {
981 bh = page_buffers(page);
982 if (bh->b_size == size) {
983 end_block = init_page_buffers(page, bdev,
984 index << sizebits, size);
1003 spin_lock(&inode->
i_mapping->private_lock);
1004 link_dev_buffers(page, bh);
1005 end_block = init_page_buffers(page, bdev, index << sizebits, size);
1006 spin_unlock(&inode->
i_mapping->private_lock);
1028 }
while ((size << sizebits) <
PAGE_SIZE);
1030 index = block >> sizebits;
1036 if (
unlikely(index != block >> sizebits)) {
1041 __func__, (
unsigned long long)block,
1047 return grow_dev_page(bdev, block, index, size, sizebits);
1050 static struct buffer_head *
1054 if (
unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1059 bdev_logical_block_size(bdev));
1066 struct buffer_head *bh;
1073 ret = grow_buffers(bdev, block, size);
1126 if (buffer_dirty(bh)) {
1128 if (buffer_dirty(bh))
1132 if (!test_set_buffer_dirty(bh)) {
1133 struct page *page = bh->b_page;
1134 if (!TestSetPageDirty(page)) {
1137 __set_page_dirty(page, mapping, 0);
1156 WARN(1,
KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1166 clear_buffer_dirty(bh);
1167 if (bh->b_assoc_map) {
1171 list_del_init(&bh->b_assoc_buffers);
1172 bh->b_assoc_map =
NULL;
1179 static struct buffer_head *__bread_slow(
struct buffer_head *bh)
1182 if (buffer_uptodate(bh)) {
1190 if (buffer_uptodate(bh))
1211 #define BH_LRU_SIZE 8
1220 #define bh_lru_lock() local_irq_disable()
1221 #define bh_lru_unlock() local_irq_enable()
1223 #define bh_lru_lock() preempt_disable()
1224 #define bh_lru_unlock() preempt_enable()
1227 static inline void check_irqs_on(
void)
1229 #ifdef irqs_disabled
1237 static void bh_lru_install(
struct buffer_head *bh)
1239 struct buffer_head *evictee =
NULL;
1251 struct buffer_head *bh2 =
1257 if (out >= BH_LRU_SIZE) {
1265 while (out < BH_LRU_SIZE)
1278 static struct buffer_head *
1281 struct buffer_head *ret =
NULL;
1289 if (bh && bh->b_bdev == bdev &&
1290 bh->b_blocknr == block && bh->b_size == size) {
1313 struct buffer_head *
1316 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1319 bh = __find_get_block_slow(bdev, block);
1337 struct buffer_head *
1344 bh = __getblk_slow(bdev, block, size);
1354 struct buffer_head *bh =
__getblk(bdev, block, size);
1371 struct buffer_head *
1374 struct buffer_head *bh =
__getblk(bdev, block, size);
1376 if (
likely(bh) && !buffer_uptodate(bh))
1377 bh = __bread_slow(bh);
1387 static void invalidate_bh_lru(
void *
arg)
1399 static bool has_bh_in_lru(
int cpu,
void *
dummy)
1419 struct page *page,
unsigned long offset)
1422 BUG_ON(offset >= PAGE_SIZE);
1423 if (PageHighMem(page))
1427 bh->b_data = (
char *)(0 + offset);
1436 static void discard_buffer(
struct buffer_head * bh)
1439 clear_buffer_dirty(bh);
1441 clear_buffer_mapped(bh);
1442 clear_buffer_req(bh);
1443 clear_buffer_new(bh);
1444 clear_buffer_delay(bh);
1445 clear_buffer_unwritten(bh);
1466 struct buffer_head *
head, *bh, *
next;
1467 unsigned int curr_off = 0;
1469 BUG_ON(!PageLocked(page));
1470 if (!page_has_buffers(page))
1473 head = page_buffers(page);
1476 unsigned int next_off = curr_off + bh->b_size;
1477 next = bh->b_this_page;
1482 if (offset <= curr_off)
1484 curr_off = next_off;
1486 }
while (bh != head);
1506 unsigned long blocksize,
unsigned long b_state)
1508 struct buffer_head *bh, *
head, *
tail;
1513 bh->b_state |= b_state;
1515 bh = bh->b_this_page;
1517 tail->b_this_page =
head;
1519 spin_lock(&page->
mapping->private_lock);
1520 if (PageUptodate(page) || PageDirty(page)) {
1523 if (PageDirty(page))
1524 set_buffer_dirty(bh);
1525 if (PageUptodate(page))
1526 set_buffer_uptodate(bh);
1527 bh = bh->b_this_page;
1528 }
while (bh != head);
1530 attach_page_buffers(page, head);
1531 spin_unlock(&page->
mapping->private_lock);
1553 struct buffer_head *old_bh;
1557 old_bh = __find_get_block_slow(bdev, block);
1559 clear_buffer_dirty(old_bh);
1560 wait_on_buffer(old_bh);
1561 clear_buffer_req(old_bh);
1575 static inline int block_size_bits(
unsigned int blocksize)
1577 return ilog2(blocksize);
1580 static struct buffer_head *create_page_buffers(
struct page *page,
struct inode *inode,
unsigned int b_state)
1582 BUG_ON(!PageLocked(page));
1584 if (!page_has_buffers(page))
1586 return page_buffers(page);
1618 static int __block_write_full_page(
struct inode *inode,
struct page *page,
1620 bh_end_io_t *handler)
1625 struct buffer_head *bh, *
head;
1626 unsigned int blocksize, bbits;
1627 int nr_underway = 0;
1631 head = create_page_buffers(page, inode,
1632 (1 << BH_Dirty)|(1 << BH_Uptodate));
1645 blocksize = bh->b_size;
1646 bbits = block_size_bits(blocksize);
1649 last_block = (i_size_read(inode) - 1) >> bbits;
1656 if (block > last_block) {
1665 clear_buffer_dirty(bh);
1666 set_buffer_uptodate(bh);
1667 }
else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1669 WARN_ON(bh->b_size != blocksize);
1670 err = get_block(inode, block, bh, 1);
1673 clear_buffer_delay(bh);
1674 if (buffer_new(bh)) {
1676 clear_buffer_new(bh);
1681 bh = bh->b_this_page;
1683 }
while (bh != head);
1686 if (!buffer_mapped(bh))
1697 }
else if (!trylock_buffer(bh)) {
1701 if (test_clear_buffer_dirty(bh)) {
1702 mark_buffer_async_write_endio(bh, handler);
1706 }
while ((bh = bh->b_this_page) != head);
1712 BUG_ON(PageWriteback(page));
1713 set_page_writeback(page);
1716 struct buffer_head *
next = bh->b_this_page;
1717 if (buffer_async_write(bh)) {
1722 }
while (bh != head);
1727 if (nr_underway == 0) {
1752 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1753 !buffer_delay(bh)) {
1755 mark_buffer_async_write_endio(bh, handler);
1761 clear_buffer_dirty(bh);
1763 }
while ((bh = bh->b_this_page) != head);
1765 BUG_ON(PageWriteback(page));
1766 mapping_set_error(page->
mapping, err);
1767 set_page_writeback(page);
1769 struct buffer_head *next = bh->b_this_page;
1770 if (buffer_async_write(bh)) {
1771 clear_buffer_dirty(bh);
1776 }
while (bh != head);
1788 unsigned int block_start, block_end;
1789 struct buffer_head *
head, *bh;
1791 BUG_ON(!PageLocked(page));
1792 if (!page_has_buffers(page))
1795 bh = head = page_buffers(page);
1798 block_end = block_start + bh->b_size;
1800 if (buffer_new(bh)) {
1801 if (block_end > from && block_start < to) {
1802 if (!PageUptodate(page)) {
1805 start =
max(from, block_start);
1808 zero_user(page, start, size);
1809 set_buffer_uptodate(bh);
1812 clear_buffer_new(bh);
1817 block_start = block_end;
1818 bh = bh->b_this_page;
1819 }
while (bh != head);
1827 unsigned to = from + len;
1828 struct inode *inode = page->
mapping->host;
1829 unsigned block_start, block_end;
1832 unsigned blocksize, bbits;
1833 struct buffer_head *bh, *
head, *
wait[2], **wait_bh=
wait;
1835 BUG_ON(!PageLocked(page));
1840 head = create_page_buffers(page, inode, 0);
1841 blocksize = head->b_size;
1842 bbits = block_size_bits(blocksize);
1846 for(bh = head, block_start = 0; bh != head || !block_start;
1847 block++, block_start=block_end, bh = bh->b_this_page) {
1848 block_end = block_start + blocksize;
1849 if (block_end <= from || block_start >= to) {
1850 if (PageUptodate(page)) {
1851 if (!buffer_uptodate(bh))
1852 set_buffer_uptodate(bh);
1857 clear_buffer_new(bh);
1858 if (!buffer_mapped(bh)) {
1859 WARN_ON(bh->b_size != blocksize);
1860 err = get_block(inode, block, bh, 1);
1863 if (buffer_new(bh)) {
1866 if (PageUptodate(page)) {
1867 clear_buffer_new(bh);
1868 set_buffer_uptodate(bh);
1872 if (block_end > to || block_start < from)
1873 zero_user_segments(page,
1879 if (PageUptodate(page)) {
1880 if (!buffer_uptodate(bh))
1881 set_buffer_uptodate(bh);
1884 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1885 !buffer_unwritten(bh) &&
1886 (block_start < from || block_end > to)) {
1894 while(wait_bh > wait) {
1895 wait_on_buffer(*--wait_bh);
1896 if (!buffer_uptodate(*wait_bh))
1905 static int __block_commit_write(
struct inode *inode,
struct page *page,
1906 unsigned from,
unsigned to)
1908 unsigned block_start, block_end;
1911 struct buffer_head *bh, *
head;
1913 bh = head = page_buffers(page);
1914 blocksize = bh->b_size;
1918 block_end = block_start + blocksize;
1919 if (block_end <= from || block_start >= to) {
1920 if (!buffer_uptodate(bh))
1923 set_buffer_uptodate(bh);
1926 clear_buffer_new(bh);
1928 block_start = block_end;
1929 bh = bh->b_this_page;
1930 }
while (bh != head);
1939 SetPageUptodate(page);
1950 unsigned flags,
struct page **pagep,
get_block_t *get_block)
1973 loff_t
pos,
unsigned len,
unsigned copied,
1974 struct page *page,
void *fsdata)
1976 struct inode *inode = mapping->
host;
1994 if (!PageUptodate(page))
2002 __block_commit_write(inode, page, start, start+copied);
2009 loff_t
pos,
unsigned len,
unsigned copied,
2010 struct page *page,
void *fsdata)
2012 struct inode *inode = mapping->
host;
2013 int i_size_changed = 0;
2015 copied =
block_write_end(file, mapping, pos, len, copied, page, fsdata);
2024 if (pos+copied > inode->
i_size) {
2025 i_size_write(inode, pos+copied);
2039 mark_inode_dirty(inode);
2055 unsigned block_start, block_end, blocksize;
2057 struct buffer_head *bh, *
head;
2060 if (!page_has_buffers(page))
2063 head = page_buffers(page);
2064 blocksize = head->b_size;
2073 block_end = block_start + blocksize;
2074 if (block_end > from && block_start < to) {
2075 if (!buffer_uptodate(bh)) {
2079 if (block_end >= to)
2082 block_start = block_end;
2083 bh = bh->b_this_page;
2084 }
while (bh != head);
2099 struct inode *inode = page->
mapping->host;
2101 struct buffer_head *bh, *
head, *arr[MAX_BUF_PER_PAGE];
2102 unsigned int blocksize, bbits;
2104 int fully_mapped = 1;
2106 head = create_page_buffers(page, inode, 0);
2107 blocksize = head->b_size;
2108 bbits = block_size_bits(blocksize);
2111 lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2117 if (buffer_uptodate(bh))
2120 if (!buffer_mapped(bh)) {
2124 if (iblock < lblock) {
2125 WARN_ON(bh->b_size != blocksize);
2126 err = get_block(inode, iblock, bh, 0);
2130 if (!buffer_mapped(bh)) {
2131 zero_user(page, i * blocksize, blocksize);
2133 set_buffer_uptodate(bh);
2140 if (buffer_uptodate(bh))
2144 }
while (i++, iblock++, (bh = bh->b_this_page) != head);
2147 SetPageMappedToDisk(page);
2154 if (!PageError(page))
2155 SetPageUptodate(page);
2161 for (i = 0; i <
nr; i++) {
2164 mark_buffer_async_read(bh);
2172 for (i = 0; i <
nr; i++) {
2174 if (buffer_uptodate(bh))
2175 end_buffer_async_read(bh, 1);
2215 struct inode *inode = mapping->
host;
2216 unsigned blocksize = 1 << inode->
i_blkbits;
2227 while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2229 if (zerofrom & (blocksize-1)) {
2230 *bytes |= (blocksize-1);
2240 zero_user(page, zerofrom, len);
2248 balance_dirty_pages_ratelimited(mapping);
2252 if (index == curidx) {
2255 if (offset <= zerofrom) {
2258 if (zerofrom & (blocksize-1)) {
2259 *bytes |= (blocksize-1);
2262 len = offset - zerofrom;
2269 zero_user(page, zerofrom, len);
2286 loff_t pos,
unsigned len,
unsigned flags,
2287 struct page **pagep,
void **fsdata,
2290 struct inode *inode = mapping->
host;
2291 unsigned blocksize = 1 << inode->
i_blkbits;
2295 err = cont_expand_zero(file, mapping, pos, bytes);
2300 if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2301 *bytes |= (blocksize-1);
2311 struct inode *inode = page->
mapping->host;
2312 __block_commit_write(inode,page,from,to);
2338 struct page *page = vmf->page;
2339 struct inode *inode = vma->
vm_file->f_path.dentry->d_inode;
2345 size = i_size_read(inode);
2354 if (((page->
index + 1) << PAGE_CACHE_SHIFT) > size)
2366 wait_on_page_writeback(page);
2380 sb_start_pagefault(sb);
2389 sb_end_pagefault(sb);
2390 return block_page_mkwrite_return(ret);
2399 static void end_buffer_read_nobh(
struct buffer_head *bh,
int uptodate)
2401 __end_buffer_read_notouch(bh, uptodate);
2409 static void attach_nobh_buffers(
struct page *page,
struct buffer_head *head)
2411 struct buffer_head *bh;
2413 BUG_ON(!PageLocked(page));
2415 spin_lock(&page->
mapping->private_lock);
2418 if (PageDirty(page))
2419 set_buffer_dirty(bh);
2420 if (!bh->b_this_page)
2421 bh->b_this_page =
head;
2422 bh = bh->b_this_page;
2423 }
while (bh != head);
2424 attach_page_buffers(page, head);
2425 spin_unlock(&page->
mapping->private_lock);
2434 loff_t pos,
unsigned len,
unsigned flags,
2435 struct page **pagep,
void **fsdata,
2438 struct inode *inode = mapping->
host;
2439 const unsigned blkbits = inode->
i_blkbits;
2440 const unsigned blocksize = 1 << blkbits;
2441 struct buffer_head *
head, *bh;
2445 unsigned block_in_page;
2446 unsigned block_start, block_end;
2450 int is_mapped_to_disk = 1;
2462 if (page_has_buffers(page)) {
2469 if (PageMappedToDisk(page))
2487 block_in_file = (
sector_t)page->
index << (PAGE_CACHE_SHIFT - blkbits);
2494 for (block_start = 0, block_in_page = 0, bh = head;
2496 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2499 block_end = block_start + blocksize;
2502 if (block_start >= to)
2504 ret = get_block(inode, block_in_file + block_in_page,
2508 if (!buffer_mapped(bh))
2509 is_mapped_to_disk = 0;
2512 if (PageUptodate(page)) {
2513 set_buffer_uptodate(bh);
2516 if (buffer_new(bh) || !buffer_mapped(bh)) {
2517 zero_user_segments(page, block_start, from,
2521 if (buffer_uptodate(bh))
2523 if (block_start < from || block_end > to) {
2525 bh->b_end_io = end_buffer_read_nobh;
2537 for (bh = head; bh; bh = bh->b_this_page) {
2539 if (!buffer_uptodate(bh))
2546 if (is_mapped_to_disk)
2547 SetPageMappedToDisk(page);
2562 attach_nobh_buffers(page, head);
2575 loff_t pos,
unsigned len,
unsigned copied,
2576 struct page *page,
void *fsdata)
2578 struct inode *inode = page->
mapping->host;
2579 struct buffer_head *head = fsdata;
2580 struct buffer_head *bh;
2581 BUG_ON(fsdata !=
NULL && page_has_buffers(page));
2583 if (
unlikely(copied < len) && head)
2584 attach_nobh_buffers(page, head);
2585 if (page_has_buffers(page))
2587 copied, page, fsdata);
2589 SetPageUptodate(page);
2591 if (pos+copied > inode->
i_size) {
2592 i_size_write(inode, pos+copied);
2593 mark_inode_dirty(inode);
2601 head = head->b_this_page;
2617 struct inode *
const inode = page->
mapping->host;
2618 loff_t
i_size = i_size_read(inode);
2624 if (page->
index < end_index)
2629 if (page->
index >= end_index+1 || !offset) {
2637 if (page->
mapping->a_ops->invalidatepage)
2638 page->
mapping->a_ops->invalidatepage(page, offset);
2655 ret = __block_write_full_page(inode, page, get_block, wbc,
2669 struct inode *inode = mapping->
host;
2671 struct buffer_head map_bh;
2675 length = offset & (blocksize - 1);
2681 length = blocksize -
length;
2684 page = grab_cache_page(mapping, index);
2689 if (page_has_buffers(page)) {
2698 while (offset >= pos) {
2703 map_bh.b_size = blocksize;
2705 err = get_block(inode, iblock, &map_bh, 0);
2709 if (!buffer_mapped(&map_bh))
2713 if (!PageUptodate(page)) {
2714 err = mapping->
a_ops->readpage(
NULL, page);
2720 if (!PageUptodate(page)) {
2724 if (page_has_buffers(page))
2727 zero_user(page, offset, length);
2747 struct inode *inode = mapping->
host;
2749 struct buffer_head *bh;
2753 length = offset & (blocksize - 1);
2759 length = blocksize -
length;
2762 page = grab_cache_page(mapping, index);
2767 if (!page_has_buffers(page))
2771 bh = page_buffers(page);
2773 while (offset >= pos) {
2774 bh = bh->b_this_page;
2780 if (!buffer_mapped(bh)) {
2781 WARN_ON(bh->b_size != blocksize);
2782 err = get_block(inode, iblock, bh, 0);
2786 if (!buffer_mapped(bh))
2791 if (PageUptodate(page))
2792 set_buffer_uptodate(bh);
2794 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2799 if (!buffer_uptodate(bh))
2803 zero_user(page, offset, length);
2822 struct inode *
const inode = page->
mapping->host;
2823 loff_t
i_size = i_size_read(inode);
2828 if (page->
index < end_index)
2829 return __block_write_full_page(inode, page, get_block, wbc,
2834 if (page->
index >= end_index+1 || !offset) {
2853 return __block_write_full_page(inode, page, get_block, wbc, handler);
2871 struct buffer_head tmp;
2872 struct inode *inode = mapping->
host;
2876 get_block(inode, block, &tmp, 0);
2877 return tmp.b_blocknr;
2881 static void end_bio_bh_io_sync(
struct bio *bio,
int err)
2883 struct buffer_head *bh = bio->bi_private;
2886 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2890 set_bit(BH_Quiet, &bh->b_state);
2892 bh->b_end_io(bh,
test_bit(BIO_UPTODATE, &bio->bi_flags));
2908 static void guard_bh_eod(
int rw,
struct bio *bio,
struct buffer_head *bh)
2913 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
2922 if (
unlikely(bio->bi_sector >= maxsector))
2925 maxsector -= bio->bi_sector;
2926 bytes = bio->bi_size;
2927 if (
likely((bytes >> 9) <= maxsector))
2931 bytes = maxsector << 9;
2934 bio->bi_size =
bytes;
2935 bio->bi_io_vec[0].bv_len =
bytes;
2940 memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
2950 BUG_ON(!buffer_locked(bh));
2951 BUG_ON(!buffer_mapped(bh));
2953 BUG_ON(buffer_delay(bh));
2954 BUG_ON(buffer_unwritten(bh));
2959 if (test_set_buffer_req(bh) && (rw &
WRITE))
2960 clear_buffer_write_io_error(bh);
2968 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2969 bio->bi_bdev = bh->b_bdev;
2970 bio->bi_io_vec[0].bv_page = bh->b_page;
2971 bio->bi_io_vec[0].bv_len = bh->b_size;
2972 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2976 bio->bi_size = bh->b_size;
2978 bio->bi_end_io = end_bio_bh_io_sync;
2979 bio->bi_private = bh;
2982 guard_bh_eod(rw, bio, bh);
2987 if (bio_flagged(bio, BIO_EOPNOTSUPP))
3024 for (i = 0; i <
nr; i++) {
3025 struct buffer_head *bh = bhs[
i];
3027 if (!trylock_buffer(bh))
3030 if (test_clear_buffer_dirty(bh)) {
3037 if (!buffer_uptodate(bh)) {
3052 if (!test_clear_buffer_dirty(bh)) {
3073 if (test_clear_buffer_dirty(bh)) {
3078 if (!ret && !buffer_uptodate(bh))
3113 static inline int buffer_busy(
struct buffer_head *bh)
3116 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3120 drop_buffers(
struct page *page,
struct buffer_head **buffers_to_free)
3122 struct buffer_head *head = page_buffers(page);
3123 struct buffer_head *bh;
3127 if (buffer_write_io_error(bh) && page->
mapping)
3131 bh = bh->b_this_page;
3132 }
while (bh != head);
3135 struct buffer_head *next = bh->b_this_page;
3137 if (bh->b_assoc_map)
3138 __remove_assoc_queue(bh);
3140 }
while (bh != head);
3141 *buffers_to_free =
head;
3142 __clear_page_buffers(page);
3151 struct buffer_head *buffers_to_free =
NULL;
3154 BUG_ON(!PageLocked(page));
3155 if (PageWriteback(page))
3158 if (mapping ==
NULL) {
3159 ret = drop_buffers(page, &buffers_to_free);
3164 ret = drop_buffers(page, &buffers_to_free);
3184 if (buffers_to_free) {
3185 struct buffer_head *bh = buffers_to_free;
3188 struct buffer_head *next = bh->b_this_page;
3191 }
while (bh != buffers_to_free);
3206 static int msg_count;
3211 if (msg_count < 5) {
3214 "warning: process `%s' used the obsolete bdflush"
3215 " system call\n",
current->comm);
3233 static int max_buffer_heads;
3244 static void recalc_bh_state(
void)
3254 buffer_heads_over_limit = (tot > max_buffer_heads);
3259 struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3261 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3273 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3282 static void buffer_exit_cpu(
int cpu)
3296 unsigned long action,
void *hcpu)
3299 buffer_exit_cpu((
unsigned long)hcpu);
3312 if (!buffer_uptodate(bh)) {
3314 if (!buffer_uptodate(bh))
3330 BUG_ON(!buffer_locked(bh));
3332 if (buffer_uptodate(bh)) {
3341 if (buffer_uptodate(bh))
3352 sizeof(
struct buffer_head), 0,
3361 max_buffer_heads = nrpages * (PAGE_SIZE /
sizeof(
struct buffer_head));