Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
gaccess.h
Go to the documentation of this file.
1 /*
2  * access guest memory
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  * Author(s): Carsten Otte <[email protected]>
11  */
12 
13 #ifndef __KVM_S390_GACCESS_H
14 #define __KVM_S390_GACCESS_H
15 
16 #include <linux/compiler.h>
17 #include <linux/kvm_host.h>
18 #include <asm/uaccess.h>
19 #include "kvm-s390.h"
20 
21 static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
22  unsigned long guestaddr)
23 {
24  unsigned long prefix = vcpu->arch.sie_block->prefix;
25 
26  if (guestaddr < 2 * PAGE_SIZE)
27  guestaddr += prefix;
28  else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
29  guestaddr -= prefix;
30 
31  return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
32 }
33 
34 static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
35  u64 *result)
36 {
37  void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
38 
39  BUG_ON(guestaddr & 7);
40 
41  if (IS_ERR((void __force *) uptr))
42  return PTR_ERR((void __force *) uptr);
43 
44  return get_user(*result, (unsigned long __user *) uptr);
45 }
46 
47 static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
48  u32 *result)
49 {
50  void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
51 
52  BUG_ON(guestaddr & 3);
53 
54  if (IS_ERR((void __force *) uptr))
55  return PTR_ERR((void __force *) uptr);
56 
57  return get_user(*result, (u32 __user *) uptr);
58 }
59 
60 static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
61  u16 *result)
62 {
63  void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
64 
65  BUG_ON(guestaddr & 1);
66 
67  if (IS_ERR(uptr))
68  return PTR_ERR(uptr);
69 
70  return get_user(*result, (u16 __user *) uptr);
71 }
72 
73 static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
74  u8 *result)
75 {
76  void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
77 
78  if (IS_ERR((void __force *) uptr))
79  return PTR_ERR((void __force *) uptr);
80 
81  return get_user(*result, (u8 __user *) uptr);
82 }
83 
84 static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
85  u64 value)
86 {
87  void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
88 
89  BUG_ON(guestaddr & 7);
90 
91  if (IS_ERR((void __force *) uptr))
92  return PTR_ERR((void __force *) uptr);
93 
94  return put_user(value, (u64 __user *) uptr);
95 }
96 
97 static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
98  u32 value)
99 {
100  void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
101 
102  BUG_ON(guestaddr & 3);
103 
104  if (IS_ERR((void __force *) uptr))
105  return PTR_ERR((void __force *) uptr);
106 
107  return put_user(value, (u32 __user *) uptr);
108 }
109 
110 static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
111  u16 value)
112 {
113  void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
114 
115  BUG_ON(guestaddr & 1);
116 
117  if (IS_ERR((void __force *) uptr))
118  return PTR_ERR((void __force *) uptr);
119 
120  return put_user(value, (u16 __user *) uptr);
121 }
122 
123 static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
124  u8 value)
125 {
126  void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
127 
128  if (IS_ERR((void __force *) uptr))
129  return PTR_ERR((void __force *) uptr);
130 
131  return put_user(value, (u8 __user *) uptr);
132 }
133 
134 
135 static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
136  unsigned long guestdest,
137  void *from, unsigned long n)
138 {
139  int rc;
140  unsigned long i;
141  u8 *data = from;
142 
143  for (i = 0; i < n; i++) {
144  rc = put_guest_u8(vcpu, guestdest++, *(data++));
145  if (rc < 0)
146  return rc;
147  }
148  return 0;
149 }
150 
151 static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
152  unsigned long guestdest,
153  void *from, unsigned long n)
154 {
155  int r;
156  void __user *uptr;
157  unsigned long size;
158 
159  if (guestdest + n < guestdest)
160  return -EFAULT;
161 
162  /* simple case: all within one segment table entry? */
163  if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
164  uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
165 
166  if (IS_ERR((void __force *) uptr))
167  return PTR_ERR((void __force *) uptr);
168 
169  r = copy_to_user(uptr, from, n);
170 
171  if (r)
172  r = -EFAULT;
173 
174  goto out;
175  }
176 
177  /* copy first segment */
178  uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
179 
180  if (IS_ERR((void __force *) uptr))
181  return PTR_ERR((void __force *) uptr);
182 
183  size = PMD_SIZE - (guestdest & ~PMD_MASK);
184 
185  r = copy_to_user(uptr, from, size);
186 
187  if (r) {
188  r = -EFAULT;
189  goto out;
190  }
191  from += size;
192  n -= size;
193  guestdest += size;
194 
195  /* copy full segments */
196  while (n >= PMD_SIZE) {
197  uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
198 
199  if (IS_ERR((void __force *) uptr))
200  return PTR_ERR((void __force *) uptr);
201 
202  r = copy_to_user(uptr, from, PMD_SIZE);
203 
204  if (r) {
205  r = -EFAULT;
206  goto out;
207  }
208  from += PMD_SIZE;
209  n -= PMD_SIZE;
210  guestdest += PMD_SIZE;
211  }
212 
213  /* copy the tail segment */
214  if (n) {
215  uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
216 
217  if (IS_ERR((void __force *) uptr))
218  return PTR_ERR((void __force *) uptr);
219 
220  r = copy_to_user(uptr, from, n);
221 
222  if (r)
223  r = -EFAULT;
224  }
225 out:
226  return r;
227 }
228 
229 static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
230  unsigned long guestdest,
231  void *from, unsigned long n)
232 {
233  return __copy_to_guest_fast(vcpu, guestdest, from, n);
234 }
235 
236 static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
237  void *from, unsigned long n)
238 {
239  unsigned long prefix = vcpu->arch.sie_block->prefix;
240 
241  if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
242  goto slowpath;
243 
244  if ((guestdest < prefix) && (guestdest + n > prefix))
245  goto slowpath;
246 
247  if ((guestdest < prefix + 2 * PAGE_SIZE)
248  && (guestdest + n > prefix + 2 * PAGE_SIZE))
249  goto slowpath;
250 
251  if (guestdest < 2 * PAGE_SIZE)
252  guestdest += prefix;
253  else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
254  guestdest -= prefix;
255 
256  return __copy_to_guest_fast(vcpu, guestdest, from, n);
257 slowpath:
258  return __copy_to_guest_slow(vcpu, guestdest, from, n);
259 }
260 
261 static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
262  unsigned long guestsrc,
263  unsigned long n)
264 {
265  int rc;
266  unsigned long i;
267  u8 *data = to;
268 
269  for (i = 0; i < n; i++) {
270  rc = get_guest_u8(vcpu, guestsrc++, data++);
271  if (rc < 0)
272  return rc;
273  }
274  return 0;
275 }
276 
277 static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
278  unsigned long guestsrc,
279  unsigned long n)
280 {
281  int r;
282  void __user *uptr;
283  unsigned long size;
284 
285  if (guestsrc + n < guestsrc)
286  return -EFAULT;
287 
288  /* simple case: all within one segment table entry? */
289  if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
290  uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
291 
292  if (IS_ERR((void __force *) uptr))
293  return PTR_ERR((void __force *) uptr);
294 
295  r = copy_from_user(to, uptr, n);
296 
297  if (r)
298  r = -EFAULT;
299 
300  goto out;
301  }
302 
303  /* copy first segment */
304  uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
305 
306  if (IS_ERR((void __force *) uptr))
307  return PTR_ERR((void __force *) uptr);
308 
309  size = PMD_SIZE - (guestsrc & ~PMD_MASK);
310 
311  r = copy_from_user(to, uptr, size);
312 
313  if (r) {
314  r = -EFAULT;
315  goto out;
316  }
317  to += size;
318  n -= size;
319  guestsrc += size;
320 
321  /* copy full segments */
322  while (n >= PMD_SIZE) {
323  uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
324 
325  if (IS_ERR((void __force *) uptr))
326  return PTR_ERR((void __force *) uptr);
327 
328  r = copy_from_user(to, uptr, PMD_SIZE);
329 
330  if (r) {
331  r = -EFAULT;
332  goto out;
333  }
334  to += PMD_SIZE;
335  n -= PMD_SIZE;
336  guestsrc += PMD_SIZE;
337  }
338 
339  /* copy the tail segment */
340  if (n) {
341  uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
342 
343  if (IS_ERR((void __force *) uptr))
344  return PTR_ERR((void __force *) uptr);
345 
346  r = copy_from_user(to, uptr, n);
347 
348  if (r)
349  r = -EFAULT;
350  }
351 out:
352  return r;
353 }
354 
355 static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
356  unsigned long guestsrc,
357  unsigned long n)
358 {
359  return __copy_from_guest_fast(vcpu, to, guestsrc, n);
360 }
361 
362 static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
363  unsigned long guestsrc, unsigned long n)
364 {
365  unsigned long prefix = vcpu->arch.sie_block->prefix;
366 
367  if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
368  goto slowpath;
369 
370  if ((guestsrc < prefix) && (guestsrc + n > prefix))
371  goto slowpath;
372 
373  if ((guestsrc < prefix + 2 * PAGE_SIZE)
374  && (guestsrc + n > prefix + 2 * PAGE_SIZE))
375  goto slowpath;
376 
377  if (guestsrc < 2 * PAGE_SIZE)
378  guestsrc += prefix;
379  else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
380  guestsrc -= prefix;
381 
382  return __copy_from_guest_fast(vcpu, to, guestsrc, n);
383 slowpath:
384  return __copy_from_guest_slow(vcpu, to, guestsrc, n);
385 }
386 #endif