26 #include <asm/machdep.h>
27 #include <asm/mmu_context.h>
28 #include <asm/hw_irq.h>
34 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
36 #define dprintk_mmu(a, ...) do { } while(0)
40 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
42 #define dprintk_sr(a, ...) do { } while(0)
46 #error Unknown page size
50 #error XXX need to grab mmu_hash_lock
53 #ifdef CONFIG_PTE_64BIT
54 #error Only 32 bit pages are supported for now
69 asm volatile (
"sync");
70 asm volatile (
"tlbie %0" : :
"r" (pte->
pte.eaddr) :
"memory");
71 asm volatile (
"sync");
72 asm volatile (
"tlbsync");
95 if (vcpu->
arch.shared->msr & MSR_PR)
98 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
99 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
101 dprintk_sr(
"SR: Searching 0x%llx -> 0x%llx\n",
106 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
108 dprintk_sr(
"SR: Searching 0x%llx -> 0x%llx\n",
113 dprintk_sr(
"SR: Searching 0x%llx -> not found\n", gvsid);
125 hash = ((vsid ^
page) << 6);
133 dprintk_mmu(
"htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
134 htab, hash, htabmask, pteg);
151 bool primary =
false;
158 if (is_error_pfn(hpaddr)) {
168 map = find_sid_vsid(vcpu, vsid);
171 map = find_sid_vsid(vcpu, vsid);
185 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
188 if (!evict && (pteg[rr] &
PTE_V)) {
194 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[0], pteg[1]);
195 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[2], pteg[3]);
196 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[4], pteg[5]);
197 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[6], pteg[7]);
198 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[8], pteg[9]);
199 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[10], pteg[11]);
200 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[12], pteg[13]);
201 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[14], pteg[15]);
203 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
215 kvmppc_mmu_flush_icache(hpaddr >>
PAGE_SHIFT);
221 asm volatile (
"sync");
223 pteg[rr + 1] = pteg1;
225 asm volatile (
"sync");
230 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[0], pteg[1]);
231 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[2], pteg[3]);
232 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[4], pteg[5]);
233 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[6], pteg[7]);
234 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[8], pteg[9]);
235 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[10], pteg[11]);
236 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[12], pteg[13]);
237 dprintk_mmu(
"KVM: %08x - %08x\n", pteg[14], pteg[15]);
244 dprintk_mmu(
"KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
248 orig_pte->
vpage, hpaddr);
252 pte->
pte = *orig_pte;
266 static int backwards_map = 0;
268 if (vcpu->
arch.shared->msr & MSR_PR)
274 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
276 sid_map_mask = SID_MAP_MASK - sid_map_mask;
278 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
281 backwards_map = !backwards_map;
285 vcpu_book3s->vsid_next = 0;
291 map->
host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
292 vcpu_book3s->vsid_next++;
309 if (vcpu->
arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
316 map = find_sid_vsid(vcpu, gvsid);
318 map = create_sid_map(vcpu, gvsid);
355 #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
372 for (j = 0; j < 16; j++)
376 vcpu3s->vsid_next = 0;
379 asm (
"mfsdr1 %0" :
"=r"(
sdr1) );
380 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
388 for (j = 0; j <
i; j++) {