33 #ifndef _ASM_X86_XEN_HYPERCALL_H
34 #define _ASM_X86_XEN_HYPERCALL_H
36 #include <linux/kernel.h>
38 #include <linux/errno.h>
39 #include <linux/string.h>
40 #include <linux/types.h>
45 #include <asm/pgtable.h>
87 #define __HYPERCALL "call hypercall_page+%c[offset]"
88 #define __HYPERCALL_ENTRY(x) \
89 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
92 #define __HYPERCALL_RETREG "eax"
93 #define __HYPERCALL_ARG1REG "ebx"
94 #define __HYPERCALL_ARG2REG "ecx"
95 #define __HYPERCALL_ARG3REG "edx"
96 #define __HYPERCALL_ARG4REG "esi"
97 #define __HYPERCALL_ARG5REG "edi"
99 #define __HYPERCALL_RETREG "rax"
100 #define __HYPERCALL_ARG1REG "rdi"
101 #define __HYPERCALL_ARG2REG "rsi"
102 #define __HYPERCALL_ARG3REG "rdx"
103 #define __HYPERCALL_ARG4REG "r10"
104 #define __HYPERCALL_ARG5REG "r8"
107 #define __HYPERCALL_DECLS \
108 register unsigned long __res asm(__HYPERCALL_RETREG); \
109 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
110 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
111 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
112 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
113 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
115 #define __HYPERCALL_0PARAM "=r" (__res)
116 #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
117 #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
118 #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
119 #define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
120 #define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
122 #define __HYPERCALL_0ARG()
123 #define __HYPERCALL_1ARG(a1) \
124 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
125 #define __HYPERCALL_2ARG(a1,a2) \
126 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
127 #define __HYPERCALL_3ARG(a1,a2,a3) \
128 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
129 #define __HYPERCALL_4ARG(a1,a2,a3,a4) \
130 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
131 #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
132 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
134 #define __HYPERCALL_CLOBBER5 "memory"
135 #define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
136 #define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
137 #define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
138 #define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
139 #define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
141 #define _hypercall0(type, name) \
144 __HYPERCALL_0ARG(); \
145 asm volatile (__HYPERCALL \
146 : __HYPERCALL_0PARAM \
147 : __HYPERCALL_ENTRY(name) \
148 : __HYPERCALL_CLOBBER0); \
152 #define _hypercall1(type, name, a1) \
155 __HYPERCALL_1ARG(a1); \
156 asm volatile (__HYPERCALL \
157 : __HYPERCALL_1PARAM \
158 : __HYPERCALL_ENTRY(name) \
159 : __HYPERCALL_CLOBBER1); \
163 #define _hypercall2(type, name, a1, a2) \
166 __HYPERCALL_2ARG(a1, a2); \
167 asm volatile (__HYPERCALL \
168 : __HYPERCALL_2PARAM \
169 : __HYPERCALL_ENTRY(name) \
170 : __HYPERCALL_CLOBBER2); \
174 #define _hypercall3(type, name, a1, a2, a3) \
177 __HYPERCALL_3ARG(a1, a2, a3); \
178 asm volatile (__HYPERCALL \
179 : __HYPERCALL_3PARAM \
180 : __HYPERCALL_ENTRY(name) \
181 : __HYPERCALL_CLOBBER3); \
185 #define _hypercall4(type, name, a1, a2, a3, a4) \
188 __HYPERCALL_4ARG(a1, a2, a3, a4); \
189 asm volatile (__HYPERCALL \
190 : __HYPERCALL_4PARAM \
191 : __HYPERCALL_ENTRY(name) \
192 : __HYPERCALL_CLOBBER4); \
196 #define _hypercall5(type, name, a1, a2, a3, a4, a5) \
199 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
200 asm volatile (__HYPERCALL \
201 : __HYPERCALL_5PARAM \
202 : __HYPERCALL_ENTRY(name) \
203 : __HYPERCALL_CLOBBER5); \
209 unsigned long a1,
unsigned long a2,
210 unsigned long a3,
unsigned long a4,
216 asm volatile(
"call *%[call]"
247 return _hypercall2(
int, set_gdt, frame_list, entries);
251 HYPERVISOR_stack_switch(
unsigned long ss,
unsigned long esp)
258 HYPERVISOR_set_callbacks(
unsigned long event_selector,
259 unsigned long event_address,
260 unsigned long failsafe_selector,
261 unsigned long failsafe_address)
264 event_selector, event_address,
265 failsafe_selector, failsafe_address);
269 HYPERVISOR_set_callbacks(
unsigned long event_address,
270 unsigned long failsafe_address,
271 unsigned long syscall_address)
274 event_address, failsafe_address,
286 HYPERVISOR_fpu_taskswitch(
int set)
298 HYPERVISOR_set_timer_op(
u64 timeout)
300 unsigned long timeout_hi = (
unsigned long)(timeout>>32);
301 unsigned long timeout_lo = (
unsigned long)timeout;
302 return _hypercall2(
long, set_timer_op, timeout_lo, timeout_hi);
306 HYPERVISOR_mca(
struct xen_mc *mc_op)
320 HYPERVISOR_set_debugreg(
int reg,
unsigned long value)
325 static inline unsigned long
326 HYPERVISOR_get_debugreg(
int reg)
328 return _hypercall1(
unsigned long, get_debugreg, reg);
332 HYPERVISOR_update_descriptor(
u64 ma,
u64 desc)
334 if (
sizeof(
u64) ==
sizeof(
long))
335 return _hypercall2(
int, update_descriptor, ma, desc);
336 return _hypercall4(
int, update_descriptor, ma, ma>>32, desc, desc>>32);
348 return _hypercall2(
int, multicall, call_list, nr_calls);
355 if (
sizeof(new_val) ==
sizeof(
long))
360 new_val.
pte, new_val.
pte >> 32, flags);
382 return _hypercall3(
int, console_io, cmd, count, str);
399 return _hypercall3(
int, grant_table_op, cmd, uop, count);
403 HYPERVISOR_update_va_mapping_otherdomain(
unsigned long va,
pte_t new_val,
406 if (
sizeof(new_val) ==
sizeof(
long))
407 return _hypercall4(
int, update_va_mapping_otherdomain, va,
408 new_val.
pte, flags, domid);
410 return _hypercall5(
int, update_va_mapping_otherdomain, va,
411 new_val.
pte, new_val.
pte >> 32,
416 HYPERVISOR_vm_assist(
unsigned int cmd,
unsigned int type)
424 return _hypercall3(
int, vcpu_op, cmd, vcpuid, extra_args);
429 HYPERVISOR_set_segment_base(
int reg,
unsigned long value)
431 return _hypercall2(
int, set_segment_base, reg, value);
450 HYPERVISOR_nmi_op(
unsigned long op,
unsigned long arg)
458 return _hypercall2(
unsigned long, hvm_op, op, arg);
474 trace_xen_mc_entry(mcl, 1);
479 pte_t new_val,
unsigned long flags)
483 if (
sizeof(new_val) ==
sizeof(
long)) {
488 mcl->
args[2] = new_val.
pte >> 32;
492 trace_xen_mc_entry(mcl,
sizeof(new_val) ==
sizeof(
long) ? 3 : 4);
497 void *uop,
unsigned int count)
504 trace_xen_mc_entry(mcl, 3);
508 MULTI_update_va_mapping_otherdomain(
struct multicall_entry *mcl,
unsigned long va,
509 pte_t new_val,
unsigned long flags,
514 if (
sizeof(new_val) ==
sizeof(
long)) {
520 mcl->
args[2] = new_val.
pte >> 32;
525 trace_xen_mc_entry(mcl,
sizeof(new_val) ==
sizeof(
long) ? 4 : 5);
533 if (
sizeof(maddr) ==
sizeof(
long)) {
534 mcl->
args[0] = maddr;
535 mcl->
args[1] = *(
unsigned long *)&desc;
537 mcl->
args[0] = maddr;
538 mcl->
args[1] = maddr >> 32;
539 mcl->
args[2] = desc.
a;
540 mcl->
args[3] = desc.
b;
543 trace_xen_mc_entry(mcl,
sizeof(maddr) ==
sizeof(
long) ? 2 : 4);
547 MULTI_memory_op(
struct multicall_entry *mcl,
unsigned int cmd,
void *arg)
553 trace_xen_mc_entry(mcl, 2);
563 mcl->
args[2] = (
unsigned long)success_count;
566 trace_xen_mc_entry(mcl, 4);
571 int *success_count,
domid_t domid)
576 mcl->
args[2] = (
unsigned long)success_count;
579 trace_xen_mc_entry(mcl, 4);
586 mcl->
args[0] = (
unsigned long)frames;
589 trace_xen_mc_entry(mcl, 2);
594 unsigned long ss,
unsigned long esp)
600 trace_xen_mc_entry(mcl, 2);