14 #include <linux/types.h>
16 #include <linux/random.h>
18 #include <asm/tlbflush.h>
19 #include <asm/uaccess.h>
58 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
65 #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
66 #define RESERVE_MEM 2U
67 #define CHECK_GPGD_MASK _PAGE_PRESENT
69 #define RESERVE_MEM 4U
70 #define CHECK_GPGD_MASK _PAGE_TABLE
79 #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
98 #ifndef CONFIG_X86_PAE
101 kill_guest(cpu,
"attempt to access switcher pages");
106 return &cpu->
lg->pgdirs[
i].pgdir[
index];
109 #ifdef CONFIG_X86_PAE
122 index >= SWITCHER_PMD_INDEX) {
123 kill_guest(cpu,
"attempt to access switcher pages");
140 static pte_t *spte_addr(
struct lg_cpu *cpu,
pgd_t spgd,
unsigned long vaddr)
142 #ifdef CONFIG_X86_PAE
143 pmd_t *
pmd = spmd_addr(cpu, spgd, vaddr);
161 static unsigned long gpgd_addr(
struct lg_cpu *cpu,
unsigned long vaddr)
164 return cpu->
lg->pgdirs[cpu->
cpu_pgd].gpgdir + index *
sizeof(
pgd_t);
167 #ifdef CONFIG_X86_PAE
169 static unsigned long gpmd_addr(
pgd_t gpgd,
unsigned long vaddr)
177 static unsigned long gpte_addr(
struct lg_cpu *cpu,
178 pmd_t gpmd,
unsigned long vaddr)
187 static unsigned long gpte_addr(
struct lg_cpu *cpu,
188 pgd_t gpgd,
unsigned long vaddr)
213 static unsigned long get_pfn(
unsigned long virtpfn,
int write)
278 static void check_gpte(
struct lg_cpu *cpu,
pte_t gpte)
285 static void check_gpgd(
struct lg_cpu *cpu,
pgd_t gpgd)
292 #ifdef CONFIG_X86_PAE
293 static void check_gpmd(
struct lg_cpu *cpu,
pmd_t gpmd)
297 kill_guest(cpu,
"bad page middle directory entry");
316 unsigned long gpte_ptr;
321 #ifdef CONFIG_X86_PAE
338 spgd = spgd_addr(cpu, cpu->
cpu_pgd, vaddr);
339 if (!(
pgd_flags(*spgd) & _PAGE_PRESENT)) {
347 kill_guest(cpu,
"out of memory allocating pte page");
351 check_gpgd(cpu, gpgd);
359 #ifdef CONFIG_X86_PAE
371 spmd = spmd_addr(cpu, *spgd, vaddr);
373 if (!(
pmd_flags(*spmd) & _PAGE_PRESENT)) {
382 kill_guest(cpu,
"out of memory allocating pte page");
387 check_gpmd(cpu, gpmd);
400 gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
406 gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
436 check_gpte(cpu, gpte);
444 spte = spte_addr(cpu, *spgd, vaddr);
457 *spte = gpte_to_spte(cpu, gpte, 1);
494 static bool page_writable(
struct lg_cpu *cpu,
unsigned long vaddr)
499 #ifdef CONFIG_X86_PAE
503 spgd = spgd_addr(cpu, cpu->
cpu_pgd, vaddr);
507 #ifdef CONFIG_X86_PAE
508 spmd = spmd_addr(cpu, *spgd, vaddr);
517 flags =
pte_flags(*(spte_addr(cpu, *spgd, vaddr)));
529 if (!page_writable(cpu, vaddr) && !
demand_page(cpu, vaddr, 2))
530 kill_guest(cpu,
"bad stack page %#lx", vaddr);
534 #ifdef CONFIG_X86_PAE
535 static void release_pmd(
pmd_t *spmd)
543 release_pte(ptepage[i]);
551 static void release_pgd(
pgd_t *spgd)
559 release_pmd(&pmdpage[i]);
574 static void release_pgd(
pgd_t *spgd)
587 release_pte(ptepage[i]);
601 static void flush_user_mappings(
struct lguest *lg,
int idx)
606 release_pgd(lg->
pgdirs[idx].pgdir + i);
618 flush_user_mappings(cpu->
lg, cpu->
cpu_pgd);
627 #ifdef CONFIG_X86_PAE
638 if (!(
pgd_flags(gpgd) & _PAGE_PRESENT)) {
643 #ifdef CONFIG_X86_PAE
647 gpte =
lgread(cpu, gpte_addr(cpu, gpmd, vaddr),
pte_t);
649 gpte =
lgread(cpu, gpte_addr(cpu, gpgd, vaddr),
pte_t);
662 static unsigned int find_pgdir(
struct lguest *lg,
unsigned long pgtable)
666 if (lg->
pgdirs[i].pgdir && lg->
pgdirs[i].gpgdir == pgtable)
676 static unsigned int new_pgdir(
struct lg_cpu *cpu,
677 unsigned long gpgdir,
681 #ifdef CONFIG_X86_PAE
691 if (!cpu->
lg->pgdirs[next].pgdir) {
692 cpu->
lg->pgdirs[
next].pgdir =
695 if (!cpu->
lg->pgdirs[next].pgdir)
698 #ifdef CONFIG_X86_PAE
724 cpu->
lg->pgdirs[
next].gpgdir = gpgdir;
726 flush_user_mappings(cpu->
lg, next);
736 static void release_all_pagetables(
struct lguest *lg)
742 if (lg->
pgdirs[i].pgdir) {
743 #ifdef CONFIG_X86_PAE
756 for (k = 0; k < SWITCHER_PMD_INDEX; k++)
757 release_pmd(&pmdpage[k]);
761 release_pgd(lg->
pgdirs[i].pgdir + j);
773 release_all_pagetables(cpu->
lg);
787 int newpgdir, repin = 0;
794 release_all_pagetables(cpu->
lg);
800 newpgdir = find_pgdir(cpu->
lg, pgtable);
808 newpgdir = new_pgdir(cpu, pgtable, &repin);
842 static void do_set_pte(
struct lg_cpu *cpu,
int idx,
843 unsigned long vaddr,
pte_t gpte)
846 pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
847 #ifdef CONFIG_X86_PAE
853 #ifdef CONFIG_X86_PAE
854 spmd = spmd_addr(cpu, *spgd, vaddr);
858 pte_t *spte = spte_addr(cpu, *spgd, vaddr);
868 check_gpte(cpu, gpte);
870 gpte_to_spte(cpu, gpte,
879 #ifdef CONFIG_X86_PAE
898 unsigned long gpgdir,
unsigned long vaddr,
pte_t gpte)
904 if (vaddr >= cpu->
lg->kernel_address) {
907 if (cpu->
lg->pgdirs[i].pgdir)
908 do_set_pte(cpu, i, vaddr, gpte);
911 int pgdir = find_pgdir(cpu->
lg, gpgdir);
914 do_set_pte(cpu, pgdir, vaddr, gpte);
940 pgdir = find_pgdir(lg, gpgdir);
943 release_pgd(lg->
pgdirs[pgdir].pgdir + idx);
946 #ifdef CONFIG_X86_PAE
948 void guest_set_pmd(
struct lguest *lg,
unsigned long pmdp,
u32 idx)
968 cpu->
cpu_pgd = new_pgdir(cpu, 0, &allocated);
982 &cpu->
lg->lguest_data->kernel_address)
988 &cpu->
lg->lguest_data->reserve_mem)) {
989 kill_guest(cpu,
"bad guest page %p", cpu->
lg->lguest_data);
998 #ifdef CONFIG_X86_PAE
1000 pmd_index(cpu->
lg->kernel_address) == SWITCHER_PMD_INDEX)
1005 cpu->
lg->kernel_address);
1014 release_all_pagetables(lg);
1033 #ifdef CONFIG_X86_PAE
1046 set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd);
1073 static void free_switcher_pte_pages(
void)
1087 static
__init void populate_switcher_pte_page(
unsigned int cpu,
1088 struct page *switcher_page[],
1095 for (i = 0; i <
pages; i++) {
1139 free_switcher_pte_pages();
1142 populate_switcher_pte_page(i, switcher_page, pages);
1151 free_switcher_pte_pages();