6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
16 #include <linux/module.h>
19 #include <linux/slab.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
29 #define FRAG_MASK 0x0f
32 #define FRAG_MASK 0x03
53 unsigned long *
table, *pgd;
63 pgd = (
unsigned long *) mm->
pgd;
65 entry = _REGION3_ENTRY_EMPTY;
71 entry = _REGION2_ENTRY_EMPTY;
77 crst_table_init(table, entry);
91 void crst_table_downgrade(
struct mm_struct *mm,
unsigned long limit)
97 switch (
pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
98 case _REGION_ENTRY_TYPE_R2:
104 case _REGION_ENTRY_TYPE_R3:
132 unsigned long *
table;
134 gmap = kzalloc(
sizeof(
struct gmap),
GFP_KERNEL);
144 crst_table_init(table, _REGION1_ENTRY_EMPTY);
158 static int gmap_unlink_segment(
struct gmap *gmap,
unsigned long *table)
169 if (rmap->
entry != table)
179 static void gmap_flush_tlb(
struct gmap *gmap)
182 __tlb_flush_idte((
unsigned long) gmap->
table |
195 unsigned long *
table;
201 __tlb_flush_idte((
unsigned long) gmap->
table |
208 spin_lock(&gmap->
mm->page_table_lock);
211 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
214 gmap_unlink_segment(gmap, table);
217 spin_unlock(&gmap->
mm->page_table_lock);
247 static int gmap_alloc_table(
struct gmap *gmap,
248 unsigned long *table,
unsigned long init)
254 spin_unlock(&gmap->
mm->page_table_lock);
256 spin_lock(&gmap->
mm->page_table_lock);
260 crst_table_init(
new, init);
261 if (*table & _REGION_ENTRY_INV) {
263 *table = (
unsigned long)
new | _REGION_ENTRY_LENGTH |
264 (*table & _REGION_ENTRY_TYPE_MASK);
280 unsigned long *
table;
286 if (len == 0 || to + len < to)
291 spin_lock(&gmap->
mm->page_table_lock);
292 for (off = 0; off < len; off +=
PMD_SIZE) {
294 table = gmap->
table + (((to + off) >> 53) & 0x7ff);
295 if (*table & _REGION_ENTRY_INV)
297 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
298 table = table + (((to + off) >> 42) & 0x7ff);
299 if (*table & _REGION_ENTRY_INV)
301 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
302 table = table + (((to + off) >> 31) & 0x7ff);
303 if (*table & _REGION_ENTRY_INV)
305 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
306 table = table + (((to + off) >> 20) & 0x7ff);
309 flush |= gmap_unlink_segment(gmap, table);
313 spin_unlock(&gmap->
mm->page_table_lock);
316 gmap_flush_tlb(gmap);
330 unsigned long to,
unsigned long len)
332 unsigned long *
table;
336 if ((from | to | len) & (
PMD_SIZE - 1))
339 from + len < from || to + len < to)
344 spin_lock(&gmap->
mm->page_table_lock);
345 for (off = 0; off < len; off +=
PMD_SIZE) {
347 table = gmap->
table + (((to + off) >> 53) & 0x7ff);
348 if ((*table & _REGION_ENTRY_INV) &&
349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
351 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
352 table = table + (((to + off) >> 42) & 0x7ff);
353 if ((*table & _REGION_ENTRY_INV) &&
354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
356 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357 table = table + (((to + off) >> 31) & 0x7ff);
358 if ((*table & _REGION_ENTRY_INV) &&
361 table = (
unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
362 table = table + (((to + off) >> 20) & 0x7ff);
365 flush |= gmap_unlink_segment(gmap, table);
368 spin_unlock(&gmap->
mm->page_table_lock);
371 gmap_flush_tlb(gmap);
375 spin_unlock(&gmap->
mm->page_table_lock);
400 table = gmap->
table + ((address >> 53) & 0x7ff);
401 if (
unlikely(*table & _REGION_ENTRY_INV))
403 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
404 table = table + ((address >> 42) & 0x7ff);
405 if (
unlikely(*table & _REGION_ENTRY_INV))
407 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
408 table = table + ((address >> 31) & 0x7ff);
409 if (
unlikely(*table & _REGION_ENTRY_INV))
411 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
412 table = table + ((address >> 20) & 0x7ff);
416 if (
likely(!(segment & _SEGMENT_ENTRY_INV))) {
450 return vmaddr | (address & ~
PMD_MASK);
455 unsigned long gmap_fault(
unsigned long address,
struct gmap *gmap)
467 void gmap_discard(
unsigned long from,
unsigned long to,
struct gmap *gmap)
477 while (address < to) {
479 table = gmap->
table + ((address >> 53) & 0x7ff);
480 if (
unlikely(*table & _REGION_ENTRY_INV)) {
484 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
485 table = table + ((address >> 42) & 0x7ff);
486 if (
unlikely(*table & _REGION_ENTRY_INV)) {
490 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
491 table = table + ((address >> 31) & 0x7ff);
492 if (
unlikely(*table & _REGION_ENTRY_INV)) {
496 table = (
unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
497 table = table + ((address >> 20) & 0x7ff);
498 if (
unlikely(*table & _SEGMENT_ENTRY_INV)) {
514 void gmap_unmap_notifier(
struct mm_struct *mm,
unsigned long *table)
527 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->
vmaddr;
537 static inline unsigned long *page_table_alloc_pgste(
struct mm_struct *mm,
538 unsigned long vmaddr)
541 unsigned long *
table;
552 pgtable_page_ctor(page);
554 INIT_LIST_HEAD(&mp->
mapper);
563 static inline void page_table_free_pgste(
unsigned long *table)
571 pgtable_page_dtor(page);
579 static inline unsigned long *page_table_alloc_pgste(
struct mm_struct *mm,
580 unsigned long vmaddr)
585 static inline void page_table_free_pgste(
unsigned long *table)
589 static inline void gmap_unmap_notifier(
struct mm_struct *mm,
590 unsigned long *table)
596 static inline unsigned int atomic_xor_bits(
atomic_t *
v,
unsigned int bits)
598 unsigned int old,
new;
616 if (mm_has_pgste(mm))
617 return page_table_alloc_pgste(mm, vmaddr);
626 mask = mask | (mask >> 4);
633 pgtable_page_ctor(page);
640 for (bit = 1; mask &
bit; bit <<= 1)
642 mask = atomic_xor_bits(&page->
_mapcount, bit);
643 if ((mask & FRAG_MASK) == FRAG_MASK)
655 if (mm_has_pgste(mm)) {
656 gmap_unmap_notifier(mm, table);
657 return page_table_free_pgste(table);
665 mask = atomic_xor_bits(&page->
_mapcount, bit);
670 pgtable_page_dtor(page);
676 static void __page_table_free_rcu(
void *table,
unsigned bit)
681 return page_table_free_pgste(table);
684 if (atomic_xor_bits(&page->
_mapcount, bit) == 0) {
685 pgtable_page_dtor(page);
698 if (mm_has_pgste(mm)) {
699 gmap_unmap_notifier(mm, table);
709 mask = atomic_xor_bits(&page->
_mapcount, bit | (bit << 4));
713 table = (
unsigned long *) (
__pa(table) | (bit << 4));
720 void *table = (
void *)((
unsigned long) _table & ~mask);
721 unsigned type = (
unsigned long) _table & mask;
724 __page_table_free_rcu(table, type);
729 static void tlb_remove_table_smp_sync(
void *
arg)
734 static void tlb_remove_table_one(
void *table)
754 for (i = 0; i < batch->
nr; i++)
765 __tlb_flush_mm(tlb->
mm);
775 if (*batch ==
NULL) {
778 if (*batch ==
NULL) {
779 __tlb_flush_mm(tlb->
mm);
780 tlb_remove_table_one(table);
785 (*batch)->tables[(*batch)->nr++] =
table;
790 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
805 while (vma !=
NULL) {
827 if (mm_has_pgste(tsk->
mm))
834 !hlist_empty(&tsk->
mm->ioctx_list) ||
843 tsk->
mm->context.alloc_pgste = 1;
845 sync_mm_rss(tsk->
mm);
847 tsk->
mm->context.alloc_pgste = 0;
851 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
861 !hlist_empty(&tsk->
mm->ioctx_list) ||
884 #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
885 bool kernel_page_present(
struct page *page)
895 :
"=d" (
cc),
"+a" (addr) : :
"cc");
900 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
911 unsigned long address,
pmd_t *pmdp,
916 if (pmd_same(*pmdp, entry))
918 pmdp_invalidate(vma, address, pmdp);
923 static void pmdp_splitting_flush_sync(
void *
arg)
933 (
unsigned long *) pmdp)) {
946 if (!mm->pmd_huge_pte)
949 list_add(lh, (
struct list_head *) mm->pmd_huge_pte);
950 mm->pmd_huge_pte = pgtable;
962 pgtable = mm->pmd_huge_pte;
965 mm->pmd_huge_pte =
NULL;
970 ptep = (
pte_t *) pgtable;