31 #include <linux/export.h>
43 #include <linux/xattr.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
56 #include <linux/falloc.h>
60 #include <linux/mempolicy.h>
62 #include <linux/ctype.h>
66 #include <linux/magic.h>
68 #include <asm/uaccess.h>
69 #include <asm/pgtable.h>
71 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
72 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
75 #define BOGO_DIRENT_SIZE 20
78 #define SHORT_SYMLINK_LEN 128
102 static unsigned long shmem_default_max_blocks(
void)
104 return totalram_pages / 2;
107 static unsigned long shmem_default_max_inodes(
void)
113 static bool shmem_should_replace_page(
struct page *
page,
gfp_t gfp);
114 static int shmem_replace_page(
struct page **pagep,
gfp_t gfp,
117 struct page **pagep,
enum sgp_type sgp,
gfp_t gfp,
int *fault_type);
120 struct page **pagep,
enum sgp_type sgp,
int *fault_type)
122 return shmem_getpage_gfp(inode, index, pagep, sgp,
123 mapping_gfp_mask(inode->
i_mapping), fault_type);
139 return (flags & VM_NORESERVE) ?
145 if (!(flags & VM_NORESERVE))
146 vm_unacct_memory(VM_ACCT(size));
155 static inline int shmem_acct_block(
unsigned long flags)
157 return (flags & VM_NORESERVE) ?
161 static inline void shmem_unacct_blocks(
unsigned long flags,
long pages)
163 if (flags & VM_NORESERVE)
198 static void shmem_free_inode(
struct super_block *sb)
220 static void shmem_recalc_inode(
struct inode *inode)
231 inode->
i_blocks -= freed * BLOCKS_PER_PAGE;
232 shmem_unacct_blocks(info->
flags, freed);
240 pgoff_t index,
void *expected,
void *replacement)
248 item = radix_tree_deref_slot_protected(pslot,
250 if (item != expected)
253 radix_tree_replace_slot(pslot, replacement);
266 static bool shmem_confirm_swap(
struct address_space *mapping,
274 return item == swp_to_radix_entry(swap);
280 static int shmem_add_to_page_cache(
struct page *
page,
297 error = shmem_radix_tree_replace(mapping, index, expected,
302 __inc_zone_page_state(page,
NR_SHMEM);
315 static void shmem_delete_from_page_cache(
struct page *page,
void *radswap)
321 error = shmem_radix_tree_replace(mapping, page->
index, page, radswap);
325 __dec_zone_page_state(page,
NR_SHMEM);
334 static unsigned shmem_find_get_pages_and_swap(
struct address_space *mapping,
336 struct page **pages,
pgoff_t *indices)
340 unsigned int nr_found;
345 (
void ***)pages, indices, start, nr_pages);
347 for (i = 0; i < nr_found; i++) {
350 page = radix_tree_deref_slot((
void **)pages[i]);
353 if (radix_tree_exception(page)) {
354 if (radix_tree_deref_retry(page))
363 if (!page_cache_get_speculative(page))
367 if (
unlikely(page != *((
void **)pages[i]))) {
372 indices[
ret] = indices[
i];
391 error = shmem_radix_tree_replace(mapping, index, radswap,
NULL);
401 static void shmem_deswap_pagevec(
struct pagevec *pvec)
405 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
406 struct page *page = pvec->
pages[
i];
407 if (!radix_tree_exceptional_entry(page))
408 pvec->
pages[j++] = page;
422 pagevec_init(&pvec, 0);
426 while (!mapping_unevictable(mapping)) {
431 pvec.
nr = shmem_find_get_pages_and_swap(mapping, index,
435 index = indices[pvec.
nr - 1] + 1;
436 shmem_deswap_pagevec(&pvec);
437 check_move_unevictable_pages(pvec.
pages, pvec.
nr);
438 pagevec_release(&pvec);
447 static void shmem_undo_range(
struct inode *inode, loff_t lstart, loff_t lend,
458 long nr_swaps_freed = 0;
465 pagevec_init(&pvec, 0);
467 while (index < end) {
468 pvec.
nr = shmem_find_get_pages_and_swap(mapping, index,
470 pvec.
pages, indices);
474 for (i = 0; i < pagevec_count(&pvec); i++) {
475 struct page *page = pvec.
pages[
i];
481 if (radix_tree_exceptional_entry(page)) {
484 nr_swaps_freed += !shmem_free_swap(mapping,
489 if (!trylock_page(page))
491 if (!unfalloc || !PageUptodate(page)) {
492 if (page->
mapping == mapping) {
499 shmem_deswap_pagevec(&pvec);
500 pagevec_release(&pvec);
507 struct page *page =
NULL;
508 shmem_getpage(inode, start - 1, &page, SGP_READ,
NULL);
515 zero_user_segment(page, partial_start, top);
522 struct page *page =
NULL;
523 shmem_getpage(inode, end, &page, SGP_READ,
NULL);
525 zero_user_segment(page, 0, partial_end);
537 pvec.
nr = shmem_find_get_pages_and_swap(mapping, index,
539 pvec.
pages, indices);
541 if (index == start || unfalloc)
546 if ((index == start || unfalloc) && indices[0] >= end) {
547 shmem_deswap_pagevec(&pvec);
548 pagevec_release(&pvec);
552 for (i = 0; i < pagevec_count(&pvec); i++) {
553 struct page *page = pvec.
pages[
i];
559 if (radix_tree_exceptional_entry(page)) {
562 nr_swaps_freed += !shmem_free_swap(mapping,
568 if (!unfalloc || !PageUptodate(page)) {
569 if (page->
mapping == mapping) {
576 shmem_deswap_pagevec(&pvec);
577 pagevec_release(&pvec);
582 spin_lock(&info->
lock);
583 info->
swapped -= nr_swaps_freed;
584 shmem_recalc_inode(inode);
585 spin_unlock(&info->
lock);
590 shmem_undo_range(inode, lstart, lend,
false);
597 struct inode *inode = dentry->
d_inode;
605 loff_t oldsize = inode->
i_size;
606 loff_t newsize = attr->
ia_size;
608 if (newsize != oldsize) {
609 i_size_write(inode, newsize);
612 if (newsize < oldsize) {
622 #ifdef CONFIG_TMPFS_POSIX_ACL
629 static void shmem_evict_inode(
struct inode *inode)
633 if (inode->
i_mapping->a_ops == &shmem_aops) {
645 simple_xattrs_free(&info->
xattrs);
647 shmem_free_inode(inode->
i_sb);
663 radswap = swp_to_radix_entry(swap);
674 if (shmem_swaplist.next != &info->
swaplist)
675 list_move_tail(&shmem_swaplist, &info->
swaplist);
677 gfp = mapping_gfp_mask(mapping);
678 if (shmem_should_replace_page(*pagep, gfp)) {
680 error = shmem_replace_page(pagep, gfp, info, index);
710 error = shmem_add_to_page_cache(*pagep, mapping, index,
720 spin_lock(&info->
lock);
722 spin_unlock(&info->
lock);
744 if (
unlikely(!PageSwapCache(page) || page_private(page) != swap.
val))
761 found = shmem_unuse_inode(info, swap, &page);
789 BUG_ON(!PageLocked(page));
792 inode = mapping->
host;
793 info = SHMEM_I(inode);
794 if (info->
flags & VM_LOCKED)
822 if (!PageUptodate(page)) {
824 struct shmem_falloc *shmem_falloc;
825 spin_lock(&inode->
i_lock);
828 index >= shmem_falloc->start &&
829 index < shmem_falloc->next)
830 shmem_falloc->nr_unswapped++;
833 spin_unlock(&inode->
i_lock);
837 clear_highpage(page);
839 SetPageUptodate(page);
860 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
862 spin_lock(&info->
lock);
864 shmem_recalc_inode(inode);
865 spin_unlock(&info->
lock);
868 BUG_ON(page_mapped(page));
919 pvma.vm_pgoff = index + info->
vfs_inode.i_ino;
926 mpol_cond_put(pvma.vm_policy);
931 static struct page *shmem_alloc_page(
gfp_t gfp,
940 pvma.vm_pgoff = index + info->
vfs_inode.i_ino;
947 mpol_cond_put(pvma.vm_policy);
964 static inline struct page *shmem_alloc_page(
gfp_t gfp,
971 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
990 static bool shmem_should_replace_page(
struct page *page,
gfp_t gfp)
992 return page_zonenum(page) > gfp_zone(gfp);
995 static int shmem_replace_page(
struct page **pagep,
gfp_t gfp,
998 struct page *oldpage, *newpage;
1004 swap_index = page_private(oldpage);
1005 swap_mapping = page_mapping(oldpage);
1012 newpage = shmem_alloc_page(gfp, info, index);
1017 copy_highpage(newpage, oldpage);
1020 __set_page_locked(newpage);
1021 SetPageUptodate(newpage);
1022 SetPageSwapBacked(newpage);
1023 set_page_private(newpage, swap_index);
1024 SetPageSwapCache(newpage);
1030 spin_lock_irq(&swap_mapping->
tree_lock);
1031 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1037 spin_unlock_irq(&swap_mapping->
tree_lock);
1048 lru_cache_add_anon(newpage);
1052 ClearPageSwapCache(oldpage);
1053 set_page_private(oldpage, 0);
1068 static int shmem_getpage_gfp(
struct inode *inode,
pgoff_t index,
1069 struct page **pagep,
enum sgp_type sgp,
gfp_t gfp,
int *fault_type)
1085 if (radix_tree_exceptional_entry(page)) {
1086 swap = radix_to_swp_entry(page);
1090 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1097 if (page && !PageUptodate(page)) {
1098 if (sgp != SGP_READ)
1104 if (page || (sgp == SGP_READ && !swap.
val)) {
1113 info = SHMEM_I(inode);
1114 sbinfo = SHMEM_SB(inode->
i_sb);
1122 *fault_type |= VM_FAULT_MAJOR;
1123 page = shmem_swapin(swap, gfp, info, index);
1132 if (!PageSwapCache(page) || page_private(page) != swap.
val ||
1133 !shmem_confirm_swap(mapping, index, swap)) {
1137 if (!PageUptodate(page)) {
1141 wait_on_page_writeback(page);
1143 if (shmem_should_replace_page(page, gfp)) {
1144 error = shmem_replace_page(&page, gfp, info, index);
1152 error = shmem_add_to_page_cache(page, mapping, index,
1153 gfp, swp_to_radix_entry(swap));
1172 spin_lock(&info->
lock);
1174 shmem_recalc_inode(inode);
1175 spin_unlock(&info->
lock);
1182 if (shmem_acct_block(info->
flags)) {
1195 page = shmem_alloc_page(gfp, info, index);
1201 SetPageSwapBacked(page);
1202 __set_page_locked(page);
1209 error = shmem_add_to_page_cache(page, mapping, index,
1211 radix_tree_preload_end();
1217 lru_cache_add_anon(page);
1219 spin_lock(&info->
lock);
1221 inode->
i_blocks += BLOCKS_PER_PAGE;
1222 shmem_recalc_inode(inode);
1223 spin_unlock(&info->
lock);
1229 if (sgp == SGP_FALLOC)
1237 if (sgp != SGP_WRITE) {
1238 clear_highpage(page);
1240 SetPageUptodate(page);
1242 if (sgp == SGP_DIRTY)
1247 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1262 info = SHMEM_I(inode);
1263 ClearPageDirty(page);
1265 spin_lock(&info->
lock);
1267 inode->
i_blocks -= BLOCKS_PER_PAGE;
1268 spin_unlock(&info->
lock);
1270 sbinfo = SHMEM_SB(inode->
i_sb);
1274 shmem_unacct_blocks(info->
flags, 1);
1277 !shmem_confirm_swap(mapping, index, swap))
1284 if (error == -
ENOSPC && !once++) {
1285 info = SHMEM_I(inode);
1286 spin_lock(&info->
lock);
1287 shmem_recalc_inode(inode);
1288 spin_unlock(&info->
lock);
1296 static int shmem_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
1298 struct inode *inode = vma->
vm_file->f_path.dentry->d_inode;
1300 int ret = VM_FAULT_LOCKED;
1302 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1304 return ((error == -
ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1306 if (ret & VM_FAULT_MAJOR) {
1316 struct inode *inode = vma->
vm_file->f_path.dentry->d_inode;
1323 struct inode *inode = vma->
vm_file->f_path.dentry->d_inode;
1333 struct inode *inode = file->
f_path.dentry->d_inode;
1337 spin_lock(&info->
lock);
1338 if (lock && !(info->
flags & VM_LOCKED)) {
1341 info->
flags |= VM_LOCKED;
1342 mapping_set_unevictable(file->
f_mapping);
1344 if (!lock && (info->
flags & VM_LOCKED) && user) {
1346 info->
flags &= ~VM_LOCKED;
1347 mapping_clear_unevictable(file->
f_mapping);
1352 spin_unlock(&info->
lock);
1356 static int shmem_mmap(
struct file *file,
struct vm_area_struct *vma)
1358 file_accessed(file);
1366 struct inode *
inode;
1370 if (shmem_reserve_inode(sb))
1378 inode->
i_mapping->backing_dev_info = &shmem_backing_dev_info;
1381 info = SHMEM_I(inode);
1382 memset(info, 0, (
char *)inode - (
char *)info);
1384 info->
flags = flags & VM_NORESERVE;
1386 simple_xattrs_init(&info->
xattrs);
1387 cache_no_acl(inode);
1391 inode->
i_op = &shmem_special_inode_operations;
1396 inode->
i_op = &shmem_inode_operations;
1399 shmem_get_sbmpol(sbinfo));
1404 inode->
i_size = 2 * BOGO_DIRENT_SIZE;
1405 inode->
i_op = &shmem_dir_inode_operations;
1417 shmem_free_inode(sb);
1425 #ifdef CONFIG_TMPFS_XATTR
1426 static int shmem_initxattrs(
struct inode *,
const struct xattr *,
void *);
1428 #define shmem_initxattrs NULL
1432 shmem_write_begin(
struct file *file,
struct address_space *mapping,
1433 loff_t
pos,
unsigned len,
unsigned flags,
1434 struct page **pagep,
void **fsdata)
1436 struct inode *inode = mapping->
host;
1438 return shmem_getpage(inode, index, pagep, SGP_WRITE,
NULL);
1442 shmem_write_end(
struct file *file,
struct address_space *mapping,
1443 loff_t
pos,
unsigned len,
unsigned copied,
1444 struct page *page,
void *fsdata)
1446 struct inode *inode = mapping->
host;
1448 if (pos + copied > inode->
i_size)
1449 i_size_write(inode, pos + copied);
1451 if (!PageUptodate(page)) {
1454 zero_user_segments(page, 0, from,
1457 SetPageUptodate(page);
1468 struct inode *inode = filp->
f_path.dentry->d_inode;
1472 enum sgp_type sgp = SGP_READ;
1486 struct page *page =
NULL;
1488 unsigned long nr,
ret;
1489 loff_t i_size = i_size_read(inode);
1492 if (index > end_index)
1494 if (index == end_index) {
1500 desc->
error = shmem_getpage(inode, index, &page, sgp,
NULL);
1514 i_size = i_size_read(inode);
1516 if (index == end_index) {
1532 if (mapping_writably_mapped(mapping))
1554 ret = actor(desc, page, offset, nr);
1560 if (ret != nr || !desc->
count)
1566 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) +
offset;
1567 file_accessed(filp);
1571 const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
1573 struct file *filp = iocb->
ki_filp;
1577 loff_t *ppos = &iocb->
ki_pos;
1583 for (seg = 0; seg < nr_segs; seg++) {
1589 if (desc.
count == 0)
1595 retval = retval ?: desc.
error;
1604 static ssize_t shmem_file_splice_read(
struct file *
in, loff_t *ppos,
1609 struct inode *inode = mapping->
host;
1610 unsigned int loff, nr_pages, req_pages;
1626 isize = i_size_read(inode);
1630 left = isize - *ppos;
1643 nr_pages, spd.
pages);
1648 error = shmem_getpage(inode, index, &page, SGP_CACHE,
NULL);
1660 for (page_nr = 0; page_nr <
nr_pages; page_nr++) {
1661 unsigned int this_len;
1667 page = spd.
pages[page_nr];
1669 if (!PageUptodate(page) || page->
mapping != mapping) {
1670 error = shmem_getpage(inode, index, &page,
1679 isize = i_size_read(inode);
1680 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1681 if (
unlikely(!isize || index > end_index))
1684 if (end_index == index) {
1691 this_len =
min(this_len, plen - loff);
1695 spd.
partial[page_nr].offset = loff;
1696 spd.
partial[page_nr].len = this_len;
1703 while (page_nr < nr_pages)
1718 static long shmem_fallocate(
struct file *file,
int mode, loff_t offset,
1721 struct inode *inode = file->
f_path.dentry->d_inode;
1723 struct shmem_falloc shmem_falloc;
1734 if ((
u64)unmap_end > (
u64)unmap_start)
1736 1 + unmap_end - unmap_start, 0);
1756 shmem_falloc.start =
start;
1757 shmem_falloc.next =
start;
1758 shmem_falloc.nr_falloced = 0;
1759 shmem_falloc.nr_unswapped = 0;
1760 spin_lock(&inode->
i_lock);
1762 spin_unlock(&inode->
i_lock);
1764 for (index = start; index <
end; index++) {
1773 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
1776 error = shmem_getpage(inode, index, &page, SGP_FALLOC,
1780 shmem_undo_range(inode,
1781 (loff_t)start << PAGE_CACHE_SHIFT,
1782 (loff_t)index << PAGE_CACHE_SHIFT,
true);
1790 shmem_falloc.next++;
1791 if (!PageUptodate(page))
1792 shmem_falloc.nr_falloced++;
1808 i_size_write(inode, offset + len);
1811 spin_lock(&inode->
i_lock);
1813 spin_unlock(&inode->
i_lock);
1819 static int shmem_statfs(
struct dentry *dentry,
struct kstatfs *
buf)
1844 shmem_mknod(
struct inode *dir,
struct dentry *dentry,
umode_t mode,
dev_t dev)
1846 struct inode *
inode;
1853 shmem_initxattrs,
NULL);
1860 #ifdef CONFIG_TMPFS_POSIX_ACL
1869 dir->
i_size += BOGO_DIRENT_SIZE;
1877 static int shmem_mkdir(
struct inode *dir,
struct dentry *dentry,
umode_t mode)
1881 if ((error = shmem_mknod(dir, dentry, mode |
S_IFDIR, 0)))
1887 static int shmem_create(
struct inode *dir,
struct dentry *dentry,
umode_t mode,
1890 return shmem_mknod(dir, dentry, mode |
S_IFREG, 0);
1896 static int shmem_link(
struct dentry *old_dentry,
struct inode *dir,
struct dentry *dentry)
1898 struct inode *inode = old_dentry->
d_inode;
1906 ret = shmem_reserve_inode(inode->
i_sb);
1910 dir->
i_size += BOGO_DIRENT_SIZE;
1920 static int shmem_unlink(
struct inode *dir,
struct dentry *dentry)
1922 struct inode *inode = dentry->
d_inode;
1925 shmem_free_inode(inode->
i_sb);
1927 dir->
i_size -= BOGO_DIRENT_SIZE;
1934 static int shmem_rmdir(
struct inode *dir,
struct dentry *dentry)
1941 return shmem_unlink(dir, dentry);
1950 static int shmem_rename(
struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry)
1952 struct inode *inode = old_dentry->
d_inode;
1959 (
void) shmem_unlink(new_dir, new_dentry);
1962 }
else if (they_are_dirs) {
1967 old_dir->
i_size -= BOGO_DIRENT_SIZE;
1968 new_dir->
i_size += BOGO_DIRENT_SIZE;
1975 static int shmem_symlink(
struct inode *dir,
struct dentry *dentry,
const char *symname)
1979 struct inode *
inode;
1984 len =
strlen(symname) + 1;
1993 shmem_initxattrs,
NULL);
2002 info = SHMEM_I(inode);
2004 if (len <= SHORT_SYMLINK_LEN) {
2010 inode->
i_op = &shmem_short_symlink_operations;
2012 error = shmem_getpage(inode, 0, &page, SGP_WRITE,
NULL);
2018 inode->
i_op = &shmem_symlink_inode_operations;
2020 memcpy(kaddr, symname, len);
2022 SetPageUptodate(page);
2027 dir->
i_size += BOGO_DIRENT_SIZE;
2034 static void *shmem_follow_short_symlink(
struct dentry *dentry,
struct nameidata *nd)
2036 nd_set_link(nd, SHMEM_I(dentry->
d_inode)->symlink);
2040 static void *shmem_follow_link(
struct dentry *dentry,
struct nameidata *nd)
2042 struct page *page =
NULL;
2043 int error = shmem_getpage(dentry->
d_inode, 0, &page, SGP_READ,
NULL);
2044 nd_set_link(nd, error ? ERR_PTR(error) :
kmap(page));
2050 static void shmem_put_link(
struct dentry *dentry,
struct nameidata *nd,
void *
cookie)
2052 if (!IS_ERR(nd_get_link(nd))) {
2053 struct page *page =
cookie;
2060 #ifdef CONFIG_TMPFS_XATTR
2071 static int shmem_initxattrs(
struct inode *inode,
2072 const struct xattr *xattr_array,
2080 for (xattr = xattr_array; xattr->
name !=
NULL; xattr++) {
2088 if (!new_xattr->
name) {
2104 static const struct xattr_handler *shmem_xattr_handlers[] = {
2105 #ifdef CONFIG_TMPFS_POSIX_ACL
2112 static int shmem_xattr_validate(
const char *
name)
2114 struct {
const char *
prefix;
size_t len; } arr[] = {
2121 size_t preflen = arr[
i].len;
2131 static ssize_t shmem_getxattr(
struct dentry *dentry,
const char *name,
2132 void *buffer,
size_t size)
2145 err = shmem_xattr_validate(name);
2152 static int shmem_setxattr(
struct dentry *dentry,
const char *name,
2153 const void *
value,
size_t size,
int flags)
2166 err = shmem_xattr_validate(name);
2173 static int shmem_removexattr(
struct dentry *dentry,
const char *name)
2186 err = shmem_xattr_validate(name);
2193 static ssize_t shmem_listxattr(
struct dentry *dentry,
char *buffer,
size_t size)
2202 .follow_link = shmem_follow_short_symlink,
2203 #ifdef CONFIG_TMPFS_XATTR
2204 .setxattr = shmem_setxattr,
2205 .getxattr = shmem_getxattr,
2206 .listxattr = shmem_listxattr,
2207 .removexattr = shmem_removexattr,
2213 .follow_link = shmem_follow_link,
2214 .put_link = shmem_put_link,
2215 #ifdef CONFIG_TMPFS_XATTR
2216 .setxattr = shmem_setxattr,
2217 .getxattr = shmem_getxattr,
2218 .listxattr = shmem_listxattr,
2219 .removexattr = shmem_removexattr,
2223 static struct dentry *shmem_get_parent(
struct dentry *
child)
2228 static int shmem_match(
struct inode *
ino,
void *vfh)
2232 inum = (inum << 32) | fh[1];
2236 static struct dentry *shmem_fh_to_dentry(
struct super_block *sb,
2237 struct fid *
fid,
int fh_len,
int fh_type)
2239 struct inode *
inode;
2240 struct dentry *dentry =
NULL;
2247 inum = (inum << 32) | fid->
raw[1];
2249 inode =
ilookup5(sb, (
unsigned long)(inum + fid->
raw[0]),
2250 shmem_match, fid->
raw);
2259 static int shmem_encode_fh(
struct inode *inode,
__u32 *fh,
int *len,
2260 struct inode *parent)
2267 if (inode_unhashed(inode)) {
2275 if (inode_unhashed(inode))
2282 fh[1] = inode->
i_ino;
2291 .encode_fh = shmem_encode_fh,
2292 .fh_to_dentry = shmem_fh_to_dentry,
2302 while (options !=
NULL) {
2310 options =
strchr(options,
',');
2311 if (options ==
NULL)
2321 if ((value =
strchr(this_char,
'=')) !=
NULL) {
2325 "tmpfs: No value for mount option '%s'\n",
2330 if (!
strcmp(this_char,
"size")) {
2331 unsigned long long size;
2335 size *= totalram_pages;
2343 }
else if (!
strcmp(this_char,
"nr_blocks")) {
2347 }
else if (!
strcmp(this_char,
"nr_inodes")) {
2351 }
else if (!
strcmp(this_char,
"mode")) {
2357 }
else if (!
strcmp(this_char,
"uid")) {
2364 if (!uid_valid(sbinfo->
uid))
2366 }
else if (!
strcmp(this_char,
"gid")) {
2373 if (!gid_valid(sbinfo->
gid))
2375 }
else if (!
strcmp(this_char,
"mpol")) {
2376 if (mpol_parse_str(value, &sbinfo->
mpol, 1))
2393 static int shmem_remount_fs(
struct super_block *sb,
int *flags,
char *
data)
2397 unsigned long inodes;
2400 if (shmem_parse_options(data, &config,
true))
2424 mpol_put(sbinfo->
mpol);
2431 static int shmem_show_options(
struct seq_file *seq,
struct dentry *root)
2435 if (sbinfo->
max_blocks != shmem_default_max_blocks())
2437 sbinfo->
max_blocks << (PAGE_CACHE_SHIFT - 10));
2438 if (sbinfo->
max_inodes != shmem_default_max_inodes())
2448 shmem_show_mpol(seq, sbinfo->
mpol);
2453 static void shmem_put_super(
struct super_block *sb)
2464 struct inode *
inode;
2486 sbinfo->
max_blocks = shmem_default_max_blocks();
2487 sbinfo->
max_inodes = shmem_default_max_inodes();
2488 if (shmem_parse_options(data, sbinfo,
false)) {
2508 sb->
s_op = &shmem_ops;
2510 #ifdef CONFIG_TMPFS_XATTR
2511 sb->
s_xattr = shmem_xattr_handlers;
2513 #ifdef CONFIG_TMPFS_POSIX_ACL
2528 shmem_put_super(sb);
2532 static struct kmem_cache *shmem_inode_cachep;
2534 static struct inode *shmem_alloc_inode(
struct super_block *sb)
2543 static void shmem_destroy_callback(
struct rcu_head *
head)
2549 static void shmem_destroy_inode(
struct inode *inode)
2556 static void shmem_init_inode(
void *
foo)
2562 static int shmem_init_inodecache(
void)
2570 static void shmem_destroy_inodecache(
void)
2579 .write_begin = shmem_write_begin,
2580 .write_end = shmem_write_end,
2592 .aio_read = shmem_file_aio_read,
2595 .splice_read = shmem_file_splice_read,
2597 .fallocate = shmem_fallocate,
2603 #ifdef CONFIG_TMPFS_XATTR
2604 .setxattr = shmem_setxattr,
2605 .getxattr = shmem_getxattr,
2606 .listxattr = shmem_listxattr,
2607 .removexattr = shmem_removexattr,
2616 .unlink = shmem_unlink,
2617 .symlink = shmem_symlink,
2618 .mkdir = shmem_mkdir,
2619 .rmdir = shmem_rmdir,
2620 .mknod = shmem_mknod,
2621 .rename = shmem_rename,
2623 #ifdef CONFIG_TMPFS_XATTR
2624 .setxattr = shmem_setxattr,
2625 .getxattr = shmem_getxattr,
2626 .listxattr = shmem_listxattr,
2627 .removexattr = shmem_removexattr,
2629 #ifdef CONFIG_TMPFS_POSIX_ACL
2630 .setattr = shmem_setattr,
2635 #ifdef CONFIG_TMPFS_XATTR
2637 .getxattr = shmem_getxattr,
2638 .listxattr = shmem_listxattr,
2639 .removexattr = shmem_removexattr,
2641 #ifdef CONFIG_TMPFS_POSIX_ACL
2642 .setattr = shmem_setattr,
2648 .destroy_inode = shmem_destroy_inode,
2650 .statfs = shmem_statfs,
2651 .remount_fs = shmem_remount_fs,
2652 .show_options = shmem_show_options,
2654 .evict_inode = shmem_evict_inode,
2656 .put_super = shmem_put_super,
2659 static const struct vm_operations_struct
shmem_vm_ops = {
2660 .fault = shmem_fault,
2662 .set_policy = shmem_set_policy,
2663 .get_policy = shmem_get_policy,
2669 int flags,
const char *dev_name,
void *data)
2677 .mount = shmem_mount,
2685 error =
bdi_init(&shmem_backing_dev_info);
2689 error = shmem_init_inodecache();
2701 if (IS_ERR(shm_mnt)) {
2702 error = PTR_ERR(shm_mnt);
2711 shmem_destroy_inodecache();
2715 shm_mnt = ERR_PTR(error);
2768 #define shmem_vm_ops generic_file_vm_ops
2769 #define shmem_file_operations ramfs_file_operations
2770 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
2771 #define shmem_acct_size(flags, size) 0
2772 #define shmem_unacct_size(flags, size) do {} while (0)
2788 struct inode *
inode;
2790 struct dentry *root;
2793 if (IS_ERR(shm_mnt))
2794 return (
void *)shm_mnt;
2796 if (size < 0 || size > MAX_LFS_FILESIZE)
2828 &shmem_file_operations);
2838 return ERR_PTR(error);
2853 return PTR_ERR(file);
2881 struct inode *inode = mapping->
host;
2886 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp,
NULL);
2888 page = ERR_PTR(error);