Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
book3s_64_mmu_host.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  * Alexander Graf <[email protected]>
6  * Kevin Wolf <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License, version 2, as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20  */
21 
22 #include <linux/kvm_host.h>
23 
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/mmu-hash64.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
30 #include "trace.h"
31 
32 #define PTE_SIZE 12
33 
34 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
35 {
36  ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
37  MMU_PAGE_4K, MMU_SEGSIZE_256M,
38  false);
39 }
40 
41 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
42  * a hash, so we don't waste cycles on looping */
43 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
44 {
45  return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
46  ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
47  ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
48  ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
49  ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
50  ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
51  ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
52  ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
53 }
54 
55 
56 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
57 {
58  struct kvmppc_sid_map *map;
59  u16 sid_map_mask;
60 
61  if (vcpu->arch.shared->msr & MSR_PR)
62  gvsid |= VSID_PR;
63 
64  sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
65  map = &to_book3s(vcpu)->sid_map[sid_map_mask];
66  if (map->valid && (map->guest_vsid == gvsid)) {
67  trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
68  return map;
69  }
70 
71  map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
72  if (map->valid && (map->guest_vsid == gvsid)) {
73  trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
74  return map;
75  }
76 
77  trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
78  return NULL;
79 }
80 
81 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
82 {
83  unsigned long vpn;
84  pfn_t hpaddr;
85  ulong hash, hpteg;
86  u64 vsid;
87  int ret;
88  int rflags = 0x192;
89  int vflags = 0;
90  int attempt = 0;
91  struct kvmppc_sid_map *map;
92  int r = 0;
93 
94  /* Get host physical address for gpa */
95  hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
96  if (is_error_pfn(hpaddr)) {
97  printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
98  r = -EINVAL;
99  goto out;
100  }
101  hpaddr <<= PAGE_SHIFT;
102  hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
103 
104  /* and write the mapping ea -> hpa into the pt */
105  vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
106  map = find_sid_vsid(vcpu, vsid);
107  if (!map) {
108  ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
109  WARN_ON(ret < 0);
110  map = find_sid_vsid(vcpu, vsid);
111  }
112  if (!map) {
113  printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
114  vsid, orig_pte->eaddr);
115  WARN_ON(true);
116  r = -EINVAL;
117  goto out;
118  }
119 
120  vsid = map->host_vsid;
121  vpn = hpt_vpn(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
122 
123  if (!orig_pte->may_write)
124  rflags |= HPTE_R_PP;
125  else
126  mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
127 
128  if (!orig_pte->may_execute)
129  rflags |= HPTE_R_N;
130  else
131  kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
132 
133  hash = hpt_hash(vpn, PTE_SIZE, MMU_SEGSIZE_256M);
134 
135 map_again:
136  hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
137 
138  /* In case we tried normal mapping already, let's nuke old entries */
139  if (attempt > 1)
140  if (ppc_md.hpte_remove(hpteg) < 0) {
141  r = -1;
142  goto out;
143  }
144 
145  ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
146  MMU_PAGE_4K, MMU_SEGSIZE_256M);
147 
148  if (ret < 0) {
149  /* If we couldn't map a primary PTE, try a secondary */
150  hash = ~hash;
151  vflags ^= HPTE_V_SECONDARY;
152  attempt++;
153  goto map_again;
154  } else {
155  struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
156 
157  trace_kvm_book3s_64_mmu_map(rflags, hpteg,
158  vpn, hpaddr, orig_pte);
159 
160  /* The ppc_md code may give us a secondary entry even though we
161  asked for a primary. Fix up. */
162  if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
163  hash = ~hash;
164  hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
165  }
166 
167  pte->slot = hpteg + (ret & 7);
168  pte->host_vpn = vpn;
169  pte->pte = *orig_pte;
170  pte->pfn = hpaddr >> PAGE_SHIFT;
171 
172  kvmppc_mmu_hpte_cache_map(vcpu, pte);
173  }
174 
175 out:
176  return r;
177 }
178 
179 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
180 {
181  struct kvmppc_sid_map *map;
182  struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
183  u16 sid_map_mask;
184  static int backwards_map = 0;
185 
186  if (vcpu->arch.shared->msr & MSR_PR)
187  gvsid |= VSID_PR;
188 
189  /* We might get collisions that trap in preceding order, so let's
190  map them differently */
191 
192  sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
193  if (backwards_map)
194  sid_map_mask = SID_MAP_MASK - sid_map_mask;
195 
196  map = &to_book3s(vcpu)->sid_map[sid_map_mask];
197 
198  /* Make sure we're taking the other map next time */
199  backwards_map = !backwards_map;
200 
201  /* Uh-oh ... out of mappings. Let's flush! */
202  if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
203  vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
204  memset(vcpu_book3s->sid_map, 0,
205  sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
206  kvmppc_mmu_pte_flush(vcpu, 0, 0);
208  }
209  map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
210 
211  map->guest_vsid = gvsid;
212  map->valid = true;
213 
214  trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
215 
216  return map;
217 }
218 
219 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
220 {
221  struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
222  int i;
223  int max_slb_size = 64;
224  int found_inval = -1;
225  int r;
226 
227  if (!svcpu->slb_max)
228  svcpu->slb_max = 1;
229 
230  /* Are we overwriting? */
231  for (i = 1; i < svcpu->slb_max; i++) {
232  if (!(svcpu->slb[i].esid & SLB_ESID_V))
233  found_inval = i;
234  else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
235  r = i;
236  goto out;
237  }
238  }
239 
240  /* Found a spare entry that was invalidated before */
241  if (found_inval > 0) {
242  r = found_inval;
243  goto out;
244  }
245 
246  /* No spare invalid entry, so create one */
247 
248  if (mmu_slb_size < 64)
249  max_slb_size = mmu_slb_size;
250 
251  /* Overflowing -> purge */
252  if ((svcpu->slb_max) == max_slb_size)
254 
255  r = svcpu->slb_max;
256  svcpu->slb_max++;
257 
258 out:
259  svcpu_put(svcpu);
260  return r;
261 }
262 
263 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
264 {
265  struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
266  u64 esid = eaddr >> SID_SHIFT;
267  u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
268  u64 slb_vsid = SLB_VSID_USER;
269  u64 gvsid;
270  int slb_index;
271  struct kvmppc_sid_map *map;
272  int r = 0;
273 
274  slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
275 
276  if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
277  /* Invalidate an entry */
278  svcpu->slb[slb_index].esid = 0;
279  r = -ENOENT;
280  goto out;
281  }
282 
283  map = find_sid_vsid(vcpu, gvsid);
284  if (!map)
285  map = create_sid_map(vcpu, gvsid);
286 
287  map->guest_esid = esid;
288 
289  slb_vsid |= (map->host_vsid << 12);
290  slb_vsid &= ~SLB_VSID_KP;
291  slb_esid |= slb_index;
292 
293  svcpu->slb[slb_index].esid = slb_esid;
294  svcpu->slb[slb_index].vsid = slb_vsid;
295 
296  trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
297 
298 out:
299  svcpu_put(svcpu);
300  return r;
301 }
302 
304 {
305  struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
306  svcpu->slb_max = 1;
307  svcpu->slb[0].esid = 0;
308  svcpu_put(svcpu);
309 }
310 
311 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
312 {
314  __destroy_context(to_book3s(vcpu)->context_id[0]);
315 }
316 
317 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
318 {
319  struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
320  int err;
321 
322  err = __init_new_context();
323  if (err < 0)
324  return -1;
325  vcpu3s->context_id[0] = err;
326 
327  vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
328  << USER_ESID_BITS) - 1;
329  vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
330  vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
331 
332  kvmppc_mmu_hpte_init(vcpu);
333 
334  return 0;
335 }