13 #ifndef __KVM_S390_GACCESS_H
14 #define __KVM_S390_GACCESS_H
16 #include <linux/compiler.h>
18 #include <asm/uaccess.h>
21 static inline void __user *__guestaddr_to_user(
struct kvm_vcpu *vcpu,
22 unsigned long guestaddr)
24 unsigned long prefix = vcpu->
arch.sie_block->prefix;
28 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 *
PAGE_SIZE))
34 static inline int get_guest_u64(
struct kvm_vcpu *vcpu,
unsigned long guestaddr,
37 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
41 if (IS_ERR((
void __force *) uptr))
42 return PTR_ERR((
void __force *) uptr);
44 return get_user(*result, (
unsigned long __user *) uptr);
47 static inline int get_guest_u32(
struct kvm_vcpu *vcpu,
unsigned long guestaddr,
50 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
54 if (IS_ERR((
void __force *) uptr))
55 return PTR_ERR((
void __force *) uptr);
60 static inline int get_guest_u16(
struct kvm_vcpu *vcpu,
unsigned long guestaddr,
63 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
73 static inline int get_guest_u8(
struct kvm_vcpu *vcpu,
unsigned long guestaddr,
76 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
78 if (IS_ERR((
void __force *) uptr))
79 return PTR_ERR((
void __force *) uptr);
84 static inline int put_guest_u64(
struct kvm_vcpu *vcpu,
unsigned long guestaddr,
87 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
91 if (IS_ERR((
void __force *) uptr))
92 return PTR_ERR((
void __force *) uptr);
97 static inline int put_guest_u32(
struct kvm_vcpu *vcpu,
unsigned long guestaddr,
100 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
104 if (IS_ERR((
void __force *) uptr))
105 return PTR_ERR((
void __force *) uptr);
110 static inline int put_guest_u16(
struct kvm_vcpu *vcpu,
unsigned long guestaddr,
113 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
117 if (IS_ERR((
void __force *) uptr))
118 return PTR_ERR((
void __force *) uptr);
123 static inline int put_guest_u8(
struct kvm_vcpu *vcpu,
unsigned long guestaddr,
126 void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
128 if (IS_ERR((
void __force *) uptr))
129 return PTR_ERR((
void __force *) uptr);
135 static inline int __copy_to_guest_slow(
struct kvm_vcpu *vcpu,
136 unsigned long guestdest,
137 void *
from,
unsigned long n)
143 for (i = 0; i <
n; i++) {
144 rc = put_guest_u8(vcpu, guestdest++, *(data++));
151 static inline int __copy_to_guest_fast(
struct kvm_vcpu *vcpu,
152 unsigned long guestdest,
153 void *from,
unsigned long n)
159 if (guestdest + n < guestdest)
163 if ((guestdest &
PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
166 if (IS_ERR((
void __force *) uptr))
167 return PTR_ERR((
void __force *) uptr);
180 if (IS_ERR((
void __force *) uptr))
181 return PTR_ERR((
void __force *) uptr);
183 size =
PMD_SIZE - (guestdest & ~PMD_MASK);
199 if (IS_ERR((
void __force *) uptr))
200 return PTR_ERR((
void __force *) uptr);
217 if (IS_ERR((
void __force *) uptr))
218 return PTR_ERR((
void __force *) uptr);
229 static inline int copy_to_guest_absolute(
struct kvm_vcpu *vcpu,
230 unsigned long guestdest,
231 void *from,
unsigned long n)
233 return __copy_to_guest_fast(vcpu, guestdest, from, n);
236 static inline int copy_to_guest(
struct kvm_vcpu *vcpu,
unsigned long guestdest,
237 void *from,
unsigned long n)
239 unsigned long prefix = vcpu->
arch.sie_block->prefix;
244 if ((guestdest < prefix) && (guestdest + n > prefix))
248 && (guestdest + n > prefix + 2 *
PAGE_SIZE))
253 else if ((guestdest >= prefix) && (guestdest < prefix + 2 *
PAGE_SIZE))
256 return __copy_to_guest_fast(vcpu, guestdest, from, n);
258 return __copy_to_guest_slow(vcpu, guestdest, from, n);
261 static inline int __copy_from_guest_slow(
struct kvm_vcpu *vcpu,
void *to,
262 unsigned long guestsrc,
269 for (i = 0; i <
n; i++) {
270 rc = get_guest_u8(vcpu, guestsrc++, data++);
277 static inline int __copy_from_guest_fast(
struct kvm_vcpu *vcpu,
void *to,
278 unsigned long guestsrc,
285 if (guestsrc + n < guestsrc)
289 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
292 if (IS_ERR((
void __force *) uptr))
293 return PTR_ERR((
void __force *) uptr);
306 if (IS_ERR((
void __force *) uptr))
307 return PTR_ERR((
void __force *) uptr);
309 size =
PMD_SIZE - (guestsrc & ~PMD_MASK);
325 if (IS_ERR((
void __force *) uptr))
326 return PTR_ERR((
void __force *) uptr);
343 if (IS_ERR((
void __force *) uptr))
344 return PTR_ERR((
void __force *) uptr);
355 static inline int copy_from_guest_absolute(
struct kvm_vcpu *vcpu,
void *to,
356 unsigned long guestsrc,
359 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
362 static inline int copy_from_guest(
struct kvm_vcpu *vcpu,
void *to,
363 unsigned long guestsrc,
unsigned long n)
365 unsigned long prefix = vcpu->
arch.sie_block->prefix;
370 if ((guestsrc < prefix) && (guestsrc + n > prefix))
374 && (guestsrc + n > prefix + 2 *
PAGE_SIZE))
379 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 *
PAGE_SIZE))
382 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
384 return __copy_from_guest_slow(vcpu, to, guestsrc, n);