13 #include <linux/slab.h>
15 #include <linux/export.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
23 #include <asm/setup.h>
25 #define PAGE_SHIFT_64K 16
26 #define PAGE_SHIFT_16M 24
27 #define PAGE_SHIFT_16G 34
38 #ifdef CONFIG_PPC_FSL_BOOK3E
39 #define MAX_NUMBER_GPAGES 128
42 unsigned int nr_gpages;
44 static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
46 #define MAX_NUMBER_GPAGES 1024
48 static unsigned nr_gpages;
51 static inline int shift_to_mmu_psize(
unsigned int shift)
55 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
61 static inline unsigned int mmu_psize_to_shift(
unsigned int mmu_psize)
68 #define hugepd_none(hpd) ((hpd).pd == 0)
104 *shift = hugepd_shift(*hpdp);
105 return hugepte_offset(hpdp, ea, pdshift);
115 unsigned long address,
unsigned pdshift,
unsigned pshift)
120 #ifdef CONFIG_PPC_FSL_BOOK3E
122 int num_hugepd = 1 << (pshift - pdshift);
123 cachep = hugepte_cache;
137 #ifdef CONFIG_PPC_FSL_BOOK3E
144 for (i = 0; i < num_hugepd; i++, hpdp++) {
151 if (i < num_hugepd) {
152 for (i = i - 1 ; i >= 0; i--, hpdp--)
170 #ifdef CONFIG_PPC_FSL_BOOK3E
171 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
172 #define HUGEPD_PUD_SHIFT PUD_SHIFT
174 #define HUGEPD_PGD_SHIFT PUD_SHIFT
175 #define HUGEPD_PUD_SHIFT PMD_SHIFT
184 unsigned pshift =
__ffs(sz);
210 if (
hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
213 return hugepte_offset(hpdp, addr, pdshift);
216 #ifdef CONFIG_PPC_FSL_BOOK3E
222 unsigned int idx = shift_to_mmu_psize(
__ffs(page_size));
228 gpage_freearray[
idx].nr_gpages = number_of_pages;
230 for (i = 0; i < number_of_pages; i++) {
231 gpage_freearray[
idx].gpage_list[
i] =
addr;
242 struct huge_bootmem_page *
m;
243 int idx = shift_to_mmu_psize(hstate->order +
PAGE_SHIFT);
244 int nr_gpages = gpage_freearray[
idx].nr_gpages;
249 #ifdef CONFIG_HIGHMEM
255 m->phys = gpage_freearray[
idx].gpage_list[--nr_gpages];
257 m =
phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
260 list_add(&m->list, &huge_boot_pages);
261 gpage_freearray[
idx].nr_gpages = nr_gpages;
262 gpage_freearray[
idx].gpage_list[nr_gpages] = 0;
272 unsigned long gpage_npages[MMU_PAGE_COUNT];
278 unsigned long npages;
287 if ((
strcmp(param,
"default_hugepagesz") == 0) ||
288 (
strcmp(param,
"hugepagesz") == 0)) {
290 }
else if (
strcmp(param,
"hugepages") == 0) {
292 if (
sscanf(val,
"%lu", &npages) <= 0)
294 gpage_npages[shift_to_mmu_psize(
__ffs(size))] = npages;
309 void __init reserve_hugetlb_gpages(
void)
317 &do_gpage_early_setup);
325 for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
331 size = (
phys_addr_t)(1ULL << mmu_psize_to_shift(i));
333 MEMBLOCK_ALLOC_ANYWHERE);
347 while (number_of_pages > 0) {
348 gpage_freearray[nr_gpages] =
addr;
360 struct huge_bootmem_page *
m;
364 gpage_freearray[nr_gpages] = 0;
365 list_add(&m->list, &huge_boot_pages);
376 #ifdef CONFIG_PPC_FSL_BOOK3E
377 #define HUGEPD_FREELIST_SIZE \
378 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
380 struct hugepd_freelist {
386 static DEFINE_PER_CPU(
struct hugepd_freelist *, hugepd_freelist_cur);
388 static void hugepd_free_rcu_callback(
struct rcu_head *
head)
390 struct hugepd_freelist *batch =
394 for (i = 0; i < batch->index; i++)
400 static void hugepd_free(
struct mmu_gather *tlb,
void *hugepte)
402 struct hugepd_freelist **batchp;
407 cpumask_equal(mm_cpumask(tlb->
mm),
413 if (*batchp ==
NULL) {
415 (*batchp)->index = 0;
418 (*batchp)->ptes[(*batchp)->index++] = hugepte;
419 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
427 unsigned long start,
unsigned long end,
428 unsigned long floor,
unsigned long ceiling)
430 pte_t *hugepte = hugepd_page(*hpdp);
433 unsigned long pdmask = ~((1
UL << pdshift) - 1);
434 unsigned int num_hugepd = 1;
436 #ifdef CONFIG_PPC_FSL_BOOK3E
438 num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
440 unsigned int shift = hugepd_shift(*hpdp);
451 if (end - 1 > ceiling - 1)
454 for (i = 0; i < num_hugepd; i++, hpdp++)
459 #ifdef CONFIG_PPC_FSL_BOOK3E
460 hugepd_free(tlb, hugepte);
462 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
467 unsigned long addr,
unsigned long end,
468 unsigned long floor,
unsigned long ceiling)
480 #ifdef CONFIG_PPC_FSL_BOOK3E
487 next = addr + (1 << hugepd_shift(*(
hugepd_t *)pmd));
490 addr, next, floor, ceiling);
491 }
while (addr = next, addr != end);
501 if (end - 1 > ceiling - 1)
510 unsigned long addr,
unsigned long end,
511 unsigned long floor,
unsigned long ceiling)
522 if (pud_none_or_clear_bad(pud))
524 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
527 #ifdef CONFIG_PPC_FSL_BOOK3E
534 next = addr + (1 << hugepd_shift(*(
hugepd_t *)pud));
537 addr, next, floor, ceiling);
539 }
while (addr = next, addr != end);
549 if (end - 1 > ceiling - 1)
563 unsigned long addr,
unsigned long end,
564 unsigned long floor,
unsigned long ceiling)
587 next = pgd_addr_end(addr, end);
590 if (pgd_none_or_clear_bad(pgd))
592 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
594 #ifdef CONFIG_PPC_FSL_BOOK3E
601 next = addr + (1 << hugepd_shift(*(
hugepd_t *)pgd));
604 addr, next, floor, ceiling);
606 }
while (addr = next, addr != end);
623 mask = (1
UL << shift) - 1;
649 static noinline int gup_hugepte(
pte_t *ptep,
unsigned long sz,
unsigned long addr,
653 unsigned long pte_end;
658 pte_end = (addr + sz) & ~(sz-1);
676 page = head + ((addr & (sz-1)) >>
PAGE_SHIFT);
684 }
while (addr +=
PAGE_SIZE, addr != end);
686 if (!page_cache_add_speculative(head, refs)) {
705 get_huge_page_tail(tail);
712 static unsigned long hugepte_addr_end(
unsigned long addr,
unsigned long end,
715 unsigned long __boundary = (addr + sz) & ~(sz-1);
716 return (__boundary - 1 < end - 1) ? __boundary :
end;
720 unsigned long addr,
unsigned long end,
721 int write,
struct page **pages,
int *nr)
724 unsigned long sz = 1
UL << hugepd_shift(*hugepd);
727 ptep = hugepte_offset(hugepd, addr, pdshift);
729 next = hugepte_addr_end(addr, end, sz);
730 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
732 }
while (ptep++, addr = next, addr != end);
737 #ifdef CONFIG_PPC_MM_SLICES
739 unsigned long len,
unsigned long pgoff,
751 #ifdef CONFIG_PPC_MM_SLICES
754 return 1
UL << mmu_psize_to_shift(psize);
756 if (!is_vm_hugetlb_page(vma))
763 static inline bool is_power_of_4(
unsigned long x)
766 return (__ilog2(x) % 2) ?
false :
true;
770 static int __init add_huge_page_size(
unsigned long long size)
772 int shift =
__ffs(size);
777 #ifdef CONFIG_PPC_FSL_BOOK3E
778 if ((size <
PAGE_SIZE) || !is_power_of_4(size))
782 || (shift > SLICE_HIGH_SHIFT) || (shift <=
PAGE_SHIFT))
786 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
789 #ifdef CONFIG_SPU_FS_64K_LS
808 static int __init hugepage_setup_sz(
char *
str)
810 unsigned long long size;
814 if (add_huge_page_size(size) != 0)
819 __setup(
"hugepagesz=", hugepage_setup_sz);
821 #ifdef CONFIG_PPC_FSL_BOOK3E
823 static int __init hugetlbpage_init(
void)
827 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
833 shift = mmu_psize_to_shift(psize);
837 if (add_huge_page_size(1ULL << shift) < 0)
847 if (hugepte_cache ==
NULL)
848 panic(
"%s: Unable to create kmem cache for hugeptes\n",
855 panic(
"%s: Unable to set default huge page size\n", __func__);
861 static int __init hugetlbpage_init(
void)
865 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
868 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
875 shift = mmu_psize_to_shift(psize);
877 if (add_huge_page_size(1ULL << shift) < 0)
889 panic(
"hugetlbpage_init(): could not create "
890 "pgtable cache for %d bit pagesize\n", shift);
911 BUG_ON(!PageCompound(page));
913 for (i = 0; i < (1
UL << compound_order(page)); i++) {
914 if (!PageHighMem(page)) {
918 __flush_dcache_icache(start);