9 #include <linux/module.h>
11 #include <asm/current.h>
12 #include <linux/sched.h>
16 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/capability.h>
23 #include <linux/ctype.h>
28 #include <linux/mman.h>
29 #include <linux/slab.h>
33 #include <linux/magic.h>
36 #include <asm/uaccess.h>
88 static void huge_pagevec_release(
struct pagevec *pvec)
92 for (i = 0; i < pagevec_count(pvec); ++
i)
113 vma->
vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
147 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
150 unsigned long len,
unsigned long pgoff,
unsigned long flags)
172 (!vma || addr + len <= vma->vm_start))
201 if (!vma || addr + len <= vma->vm_start) {
214 char __user *
buf,
unsigned long count,
218 unsigned long left, copied = 0;
232 if (chunksize > size)
234 kaddr =
kmap(&page[i]);
238 copied += (chunksize -
left);
247 return copied ? copied : -
EFAULT;
255 static ssize_t hugetlbfs_read(
struct file *filp,
char __user *buf,
256 size_t len, loff_t *ppos)
260 struct inode *inode = mapping->
host;
263 unsigned long end_index;
273 unsigned long nr,
ret;
278 isize = i_size_read(inode);
282 if (index >= end_index) {
283 if (index > end_index)
298 ret = len < nr ? len :
nr;
309 ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
326 if ((ret != nr) || (len == 0))
334 static int hugetlbfs_write_begin(
struct file *file,
336 loff_t
pos,
unsigned len,
unsigned flags,
337 struct page **pagep,
void **fsdata)
342 static int hugetlbfs_write_end(
struct file *file,
struct address_space *mapping,
343 loff_t
pos,
unsigned len,
unsigned copied,
344 struct page *page,
void *fsdata)
350 static void truncate_huge_page(
struct page *page)
353 ClearPageUptodate(page);
357 static void truncate_hugepages(
struct inode *inode, loff_t lstart)
366 pagevec_init(&pvec, 0);
376 for (i = 0; i < pagevec_count(&pvec); ++
i) {
377 struct page *page = pvec.
pages[
i];
380 if (page->
index > next)
383 truncate_huge_page(page);
387 huge_pagevec_release(&pvec);
393 static void hugetlbfs_evict_inode(
struct inode *inode)
395 truncate_hugepages(inode, 0);
404 vma_interval_tree_foreach(vma, root, pgoff,
ULONG_MAX) {
405 unsigned long v_offset;
423 static int hugetlb_vmtruncate(
struct inode *inode, loff_t offset)
432 i_size_write(inode, offset);
435 hugetlb_vmtruncate_list(&mapping->
i_mmap, pgoff);
437 truncate_hugepages(inode, offset);
443 struct inode *inode = dentry->
d_inode;
446 unsigned int ia_valid = attr->
ia_valid;
458 error = hugetlb_vmtruncate(inode, attr->
ia_size);
464 mark_inode_dirty(inode);
468 static struct inode *hugetlbfs_get_root(
struct super_block *
sb,
481 info = HUGETLBFS_I(inode);
483 inode->
i_op = &hugetlbfs_dir_inode_operations;
487 lockdep_annotate_inode_mutex_key(inode);
492 static struct inode *hugetlbfs_get_inode(
struct super_block *sb,
503 inode->
i_mapping->a_ops = &hugetlbfs_aops;
504 inode->
i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
506 INIT_LIST_HEAD(&inode->
i_mapping->private_list);
507 info = HUGETLBFS_I(inode);
521 inode->
i_op = &hugetlbfs_inode_operations;
525 inode->
i_op = &hugetlbfs_dir_inode_operations;
535 lockdep_annotate_inode_mutex_key(inode);
543 static int hugetlbfs_mknod(
struct inode *dir,
549 inode = hugetlbfs_get_inode(dir->
i_sb, dir, mode, dev);
559 static int hugetlbfs_mkdir(
struct inode *dir,
struct dentry *dentry,
umode_t mode)
561 int retval = hugetlbfs_mknod(dir, dentry, mode |
S_IFDIR, 0);
567 static int hugetlbfs_create(
struct inode *dir,
struct dentry *dentry,
umode_t mode,
bool excl)
569 return hugetlbfs_mknod(dir, dentry, mode |
S_IFREG, 0);
572 static int hugetlbfs_symlink(
struct inode *dir,
573 struct dentry *dentry,
const char *symname)
596 static int hugetlbfs_set_page_dirty(
struct page *page)
598 struct page *
head = compound_head(page);
604 static int hugetlbfs_migrate_page(
struct address_space *mapping,
605 struct page *newpage,
struct page *page,
618 static int hugetlbfs_statfs(
struct dentry *dentry,
struct kstatfs *buf)
620 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->
d_sb);
626 spin_lock(&sbinfo->stat_lock);
632 spin_lock(&sbinfo->spool->lock);
633 buf->
f_blocks = sbinfo->spool->max_hpages;
634 free_pages = sbinfo->spool->max_hpages
635 - sbinfo->spool->used_hpages;
637 spin_unlock(&sbinfo->spool->lock);
638 buf->
f_files = sbinfo->max_inodes;
639 buf->
f_ffree = sbinfo->free_inodes;
641 spin_unlock(&sbinfo->stat_lock);
647 static void hugetlbfs_put_super(
struct super_block *sb)
649 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
661 static inline int hugetlbfs_dec_free_inodes(
struct hugetlbfs_sb_info *sbinfo)
663 if (sbinfo->free_inodes >= 0) {
664 spin_lock(&sbinfo->stat_lock);
665 if (
unlikely(!sbinfo->free_inodes)) {
666 spin_unlock(&sbinfo->stat_lock);
669 sbinfo->free_inodes--;
670 spin_unlock(&sbinfo->stat_lock);
676 static void hugetlbfs_inc_free_inodes(
struct hugetlbfs_sb_info *sbinfo)
678 if (sbinfo->free_inodes >= 0) {
679 spin_lock(&sbinfo->stat_lock);
680 sbinfo->free_inodes++;
681 spin_unlock(&sbinfo->stat_lock);
686 static struct kmem_cache *hugetlbfs_inode_cachep;
688 static struct inode *hugetlbfs_alloc_inode(
struct super_block *sb)
690 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
693 if (
unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
697 hugetlbfs_inc_free_inodes(sbinfo);
709 static void hugetlbfs_destroy_inode(
struct inode *inode)
711 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->
i_sb));
717 .write_begin = hugetlbfs_write_begin,
718 .write_end = hugetlbfs_write_end,
719 .set_page_dirty = hugetlbfs_set_page_dirty,
720 .migratepage = hugetlbfs_migrate_page,
724 static void init_once(
void *
foo)
732 .read = hugetlbfs_read,
733 .mmap = hugetlbfs_file_mmap,
740 .create = hugetlbfs_create,
744 .symlink = hugetlbfs_symlink,
745 .mkdir = hugetlbfs_mkdir,
747 .mknod = hugetlbfs_mknod,
749 .setattr = hugetlbfs_setattr,
753 .setattr = hugetlbfs_setattr,
757 .alloc_inode = hugetlbfs_alloc_inode,
758 .destroy_inode = hugetlbfs_destroy_inode,
759 .evict_inode = hugetlbfs_evict_inode,
760 .statfs = hugetlbfs_statfs,
761 .put_super = hugetlbfs_put_super,
771 unsigned long long size = 0;
772 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
788 if (!uid_valid(pconfig->
uid))
796 if (!gid_valid(pconfig->
gid))
803 pconfig->
mode = option & 01777
U;
810 size =
memparse(args[0].from, &rest);
813 setsize = SIZE_PERCENT;
830 "hugetlbfs: Unsupported page size %lu MB\n",
846 if (setsize > NO_SIZE) {
848 if (setsize == SIZE_PERCENT) {
850 size *= h->max_huge_pages;
859 printk(
KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
869 struct hugetlbfs_sb_info *sbinfo;
878 config.
hstate = &default_hstate;
879 ret = hugetlbfs_parse_options(data, &config);
887 sbinfo->hstate = config.
hstate;
891 sbinfo->spool =
NULL;
901 sb->
s_op = &hugetlbfs_ops;
909 kfree(sbinfo->spool);
915 int flags,
const char *dev_name,
void *data)
917 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
922 .mount = hugetlbfs_mount,
926 static struct vfsmount *hugetlbfs_vfsmount;
928 static int can_do_hugetlb_shm(
void)
944 struct qstr quick_string;
949 if (!hugetlbfs_vfsmount)
957 "%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
962 return ERR_PTR(-
EPERM);
966 root = hugetlbfs_vfsmount->
mnt_root;
969 quick_string.hash = 0;
994 &hugetlbfs_file_operations);
1009 return ERR_PTR(error);
1012 static int __init init_hugetlbfs_fs(
void)
1017 error =
bdi_init(&hugetlbfs_backing_dev_info);
1025 if (hugetlbfs_inode_cachep ==
NULL)
1034 if (!IS_ERR(vfsmount)) {
1035 hugetlbfs_vfsmount = vfsmount;
1039 error = PTR_ERR(vfsmount);
1048 static void __exit exit_hugetlbfs_fs(
void)