13 #include <linux/sysctl.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
19 static unsigned long page_table_shareable(
struct vm_area_struct *svma,
25 unsigned long sbase = saddr &
PUD_MASK;
26 unsigned long s_end = sbase +
PUD_SIZE;
30 unsigned long svm_flags = svma->
vm_flags & ~VM_LOCKED;
37 vm_flags != svm_flags ||
44 static int vma_shareable(
struct vm_area_struct *vma,
unsigned long addr)
68 huge_pmd_share(
struct mm_struct *mm,
unsigned long addr,
pud_t *pud)
79 if (!vma_shareable(vma, addr))
83 vma_interval_tree_foreach(svma, &mapping->
i_mmap, idx, idx) {
87 saddr = page_table_shareable(svma, vma, addr, idx);
140 unsigned long addr,
unsigned long sz)
149 if (sz == PUD_SIZE) {
154 pte = huge_pmd_share(mm, addr, pud);
193 if (!vma || !is_vm_hugetlb_page(vma))
257 pud_t *pud,
int write)
263 page += ((address & ~PUD_MASK) >>
PAGE_SHIFT);
271 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
272 static unsigned long hugetlb_get_unmapped_area_bottomup(
struct file *
file,
273 unsigned long addr,
unsigned long len,
274 unsigned long pgoff,
unsigned long flags)
305 if (!vma || addr + len <= vma->
vm_start) {
315 static unsigned long hugetlb_get_unmapped_area_topdown(
struct file *file,
316 unsigned long addr0,
unsigned long len,
317 unsigned long pgoff,
unsigned long flags)
323 unsigned long addr = addr0;
331 if (len <= largest_hole) {
353 if (addr + len <= vma->vm_start) {
364 if (addr + largest_hole < vma->vm_start)
369 }
while (len <= vma->vm_start);
376 if (start_addr != base) {
389 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
403 unsigned long len,
unsigned long pgoff,
unsigned long flags)
424 (!vma || addr + len <= vma->vm_start))
428 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
431 return hugetlb_get_unmapped_area_topdown(file, addr, len,
438 static __init int setup_hugepagesz(
char *
opt)
443 }
else if (ps == PUD_SIZE && cpu_has_gbpages) {
452 __setup(
"hugepagesz=", setup_hugepagesz);