20 #include <asm/uaccess.h>
27 unsigned long pteg_addr;
30 pte_index &= ((1 << ((vcpu_book3s->
sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
31 pteg_addr = vcpu_book3s->
sdr1 & 0xfffffffffffc0000ULL;
39 long flags = kvmppc_get_gpr(vcpu, 4);
41 unsigned long pteg[2 * 8];
42 unsigned long pteg_addr,
i, *hpte;
45 pteg_addr = get_pteg_addr(vcpu, pte_index);
50 if (
likely((flags & H_EXACT) == 0)) {
60 i = kvmppc_get_gpr(vcpu, 5) & 7
UL;
64 hpte[0] = kvmppc_get_gpr(vcpu, 6);
65 hpte[1] = kvmppc_get_gpr(vcpu, 7);
66 copy_to_user((
void __user *)pteg_addr, pteg,
sizeof(pteg));
67 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
68 kvmppc_set_gpr(vcpu, 4, pte_index | i);
73 static int kvmppc_h_pr_remove(
struct kvm_vcpu *vcpu)
75 unsigned long flags= kvmppc_get_gpr(vcpu, 4);
76 unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
77 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
78 unsigned long v = 0, pteg,
rb;
81 pteg = get_pteg_addr(vcpu, pte_index);
85 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
86 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
87 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
93 rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
94 vcpu->
arch.mmu.tlbie(vcpu, rb, rb & 1 ?
true :
false);
96 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
97 kvmppc_set_gpr(vcpu, 4, pte[0]);
98 kvmppc_set_gpr(vcpu, 5, pte[1]);
104 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
105 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
106 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
107 #define H_BULK_REMOVE_END 0xc000000000000000ULL
108 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
109 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
110 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
111 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
112 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
113 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
114 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
115 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
116 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
117 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
118 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
119 #define H_BULK_REMOVE_MAX_BATCH 4
121 static int kvmppc_h_pr_bulk_remove(
struct kvm_vcpu *vcpu)
128 unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
129 unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
131 unsigned long pte[2];
136 }
else if ((tsh & H_BULK_REMOVE_TYPE) !=
148 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
159 if ((pte[0] & HPTE_V_VALID) == 0 ||
160 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
161 ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
167 rb = compute_tlbie_rb(pte[0], pte[1],
168 tsh & H_BULK_REMOVE_PTEX);
169 vcpu->
arch.mmu.tlbie(vcpu, rb, rb & 1 ?
true :
false);
173 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
175 kvmppc_set_gpr(vcpu, 3, ret);
180 static int kvmppc_h_pr_protect(
struct kvm_vcpu *vcpu)
182 unsigned long flags = kvmppc_get_gpr(vcpu, 4);
183 unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
184 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
185 unsigned long rb, pteg,
r,
v;
186 unsigned long pte[2];
188 pteg = get_pteg_addr(vcpu, pte_index);
191 if ((pte[0] & HPTE_V_VALID) == 0 ||
192 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
193 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
207 rb = compute_tlbie_rb(v, r, pte_index);
208 vcpu->
arch.mmu.tlbie(vcpu, rb, rb & 1 ?
true :
false);
211 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
216 static int kvmppc_h_pr_put_tce(
struct kvm_vcpu *vcpu)
218 unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
219 unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
220 unsigned long tce = kvmppc_get_gpr(vcpu, 6);
224 if (rc == H_TOO_HARD)
226 kvmppc_set_gpr(vcpu, 3, rc);
234 return kvmppc_h_pr_enter(vcpu);
236 return kvmppc_h_pr_remove(vcpu);
238 return kvmppc_h_pr_protect(vcpu);
240 return kvmppc_h_pr_bulk_remove(vcpu);
242 return kvmppc_h_pr_put_tce(vcpu);
247 vcpu->
stat.halt_wakeup++;