15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
21 #include <linux/slab.h>
25 #include <linux/module.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/fixmap.h>
34 #include <asm/tlbflush.h>
37 #define K(x) ((x) << (PAGE_SHIFT-10))
47 pr_err(
"Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
48 " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
49 " pagecache:%lu swap:%lu\n",
67 unsigned long flags,
order, total = 0, largest_order = -1;
69 if (!populated_zone(zone))
73 for (order = 0; order <
MAX_ORDER; order++) {
77 largest_order =
order;
79 spin_unlock_irqrestore(&zone->
lock, flags);
80 pr_err(
"Node %d %7s: %lukB (largest %luKb)\n",
81 zone_to_nid(zone), zone->
name,
82 K(total), largest_order ?
K(1
UL) << largest_order : 0);
132 set_pte_pfn(address, phys >>
PAGE_SHIFT, flags);
154 unsigned long flags = 0;
155 #ifdef __PAGETABLE_PMD_FOLDED
161 BUG_ON(pgd_addr_invalid(addr));
174 spin_unlock_irqrestore(&
init_mm.page_table_lock, flags);
182 #ifdef __PAGETABLE_PMD_FOLDED
200 spin_unlock_irqrestore(&
init_mm.page_table_lock, flags);
220 static inline void pgd_list_add(
pgd_t *pgd)
225 static inline void pgd_list_del(
pgd_t *pgd)
230 #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
231 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
254 spin_unlock_irqrestore(&
pgd_lock, flags);
263 spin_unlock_irqrestore(&pgd_lock, flags);
281 #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
299 for (i = 1; i <
order; ++
i) {
300 init_page_count(p+i);
304 pgtable_page_ctor(p);
317 pgtable_page_dtor(p);
320 for (i = 1; i <
order; ++
i) {
331 pgtable_page_dtor(pte);
332 tlb_remove_page(tlb, pte);
334 for (i = 1; i <
order; ++
i) {
335 tlb_remove_page(tlb, pte + i);
349 #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
350 # error Code assumes HV_PTE "accessed" bit in second byte
353 u8 second_byte = tmp[1];
368 #if HV_PTE_INDEX_WRITABLE < 32
369 # error Code assumes HV_PTE "writable" bit in high word
383 if (pgd_addr_invalid(addr))
412 HV_LOTAR lotar = hv_pte_get_lotar(prot);
425 pte_t null_pte = { 0 };
441 # if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
442 # error Must write the present and migrating bits last
461 unsigned long pfn =
pte_pfn(pte);
465 }
else if (hv_pte_get_mode(pte) == 0) {
467 panic(
"set_pte(): out-of-range PFN and mode 0\n");
475 static inline int mm_is_priority_cached(
struct mm_struct *mm)
477 return mm->
context.priority_cached != 0;
486 if (!mm_is_priority_cached(mm)) {
503 static unsigned long update_priority_cached(
struct mm_struct *mm)
512 mm->
context.priority_cached = 0;
515 return mm->
context.priority_cached;
521 if (!mm_is_priority_cached(next)) {
526 if (mm_is_priority_cached(prev))
541 unsigned long offset, last_addr;
545 last_addr = phys_addr + size - 1;
546 if (!size || last_addr < phys_addr)
552 pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
570 phys_addr, pgprot)) {
584 vunmap((
void * __force)addr);
603 pr_err(
"iounmap: bad address %p\n", addr);