Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
x86
include
asm
kvm_emulate.h
Go to the documentation of this file.
1
/******************************************************************************
2
* x86_emulate.h
3
*
4
* Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5
*
6
* Copyright (c) 2005 Keir Fraser
7
*
8
* From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
9
*/
10
11
#ifndef _ASM_X86_KVM_X86_EMULATE_H
12
#define _ASM_X86_KVM_X86_EMULATE_H
13
14
#include <
asm/desc_defs.h
>
15
16
struct
x86_emulate_ctxt
;
17
enum
x86_intercept
;
18
enum
x86_intercept_stage
;
19
20
struct
x86_exception
{
21
u8
vector
;
22
bool
error_code_valid
;
23
u16
error_code
;
24
bool
nested_page_fault
;
25
u64
address
;
/* cr2 or nested page fault gpa */
26
};
27
28
/*
29
* This struct is used to carry enough information from the instruction
30
* decoder to main KVM so that a decision can be made whether the
31
* instruction needs to be intercepted or not.
32
*/
33
struct
x86_instruction_info
{
34
u8
intercept
;
/* which intercept */
35
u8
rep_prefix
;
/* rep prefix? */
36
u8
modrm_mod
;
/* mod part of modrm */
37
u8
modrm_reg
;
/* index of register used */
38
u8
modrm_rm
;
/* rm part of modrm */
39
u64
src_val
;
/* value of source operand */
40
u8
src_bytes
;
/* size of source operand */
41
u8
dst_bytes
;
/* size of destination operand */
42
u8
ad_bytes
;
/* size of src/dst address */
43
u64
next_rip
;
/* rip following the instruction */
44
};
45
46
/*
47
* x86_emulate_ops:
48
*
49
* These operations represent the instruction emulator's interface to memory.
50
* There are two categories of operation: those that act on ordinary memory
51
* regions (*_std), and those that act on memory regions known to require
52
* special treatment or emulation (*_emulated).
53
*
54
* The emulator assumes that an instruction accesses only one 'emulated memory'
55
* location, that this location is the given linear faulting address (cr2), and
56
* that this is one of the instruction's data operands. Instruction fetches and
57
* stack operations are assumed never to access emulated memory. The emulator
58
* automatically deduces which operand of a string-move operation is accessing
59
* emulated memory, and assumes that the other operand accesses normal memory.
60
*
61
* NOTES:
62
* 1. The emulator isn't very smart about emulated vs. standard memory.
63
* 'Emulated memory' access addresses should be checked for sanity.
64
* 'Normal memory' accesses may fault, and the caller must arrange to
65
* detect and handle reentrancy into the emulator via recursive faults.
66
* Accesses may be unaligned and may cross page boundaries.
67
* 2. If the access fails (cannot emulate, or a standard access faults) then
68
* it is up to the memop to propagate the fault to the guest VM via
69
* some out-of-band mechanism, unknown to the emulator. The memop signals
70
* failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
71
* then immediately bail.
72
* 3. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
73
* cmpxchg8b_emulated need support 8-byte accesses.
74
* 4. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
75
*/
76
/* Access completed successfully: continue emulation as normal. */
77
#define X86EMUL_CONTINUE 0
78
/* Access is unhandleable: bail from emulation and return error to caller. */
79
#define X86EMUL_UNHANDLEABLE 1
80
/* Terminate emulation but return success to the caller. */
81
#define X86EMUL_PROPAGATE_FAULT 2
/* propagate a generated fault to guest */
82
#define X86EMUL_RETRY_INSTR 3
/* retry the instruction for some reason */
83
#define X86EMUL_CMPXCHG_FAILED 4
/* cmpxchg did not see expected value */
84
#define X86EMUL_IO_NEEDED 5
/* IO is needed to complete emulation */
85
#define X86EMUL_INTERCEPTED 6
/* Intercepted by nested VMCB/VMCS */
86
87
struct
x86_emulate_ops
{
88
/*
89
* read_gpr: read a general purpose register (rax - r15)
90
*
91
* @reg: gpr number.
92
*/
93
ulong
(*
read_gpr
)(
struct
x86_emulate_ctxt
*ctxt,
unsigned
reg
);
94
/*
95
* write_gpr: write a general purpose register (rax - r15)
96
*
97
* @reg: gpr number.
98
* @val: value to write.
99
*/
100
void
(*
write_gpr
)(
struct
x86_emulate_ctxt
*ctxt,
unsigned
reg
,
ulong
val
);
101
/*
102
* read_std: Read bytes of standard (non-emulated/special) memory.
103
* Used for descriptor reading.
104
* @addr: [IN ] Linear address from which to read.
105
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
106
* @bytes: [IN ] Number of bytes to read from memory.
107
*/
108
int
(*
read_std
)(
struct
x86_emulate_ctxt
*ctxt,
109
unsigned
long
addr
,
void
*
val
,
110
unsigned
int
bytes
,
111
struct
x86_exception
*
fault
);
112
113
/*
114
* write_std: Write bytes of standard (non-emulated/special) memory.
115
* Used for descriptor writing.
116
* @addr: [IN ] Linear address to which to write.
117
* @val: [OUT] Value write to memory, zero-extended to 'u_long'.
118
* @bytes: [IN ] Number of bytes to write to memory.
119
*/
120
int
(*
write_std
)(
struct
x86_emulate_ctxt
*ctxt,
121
unsigned
long
addr
,
void
*
val
,
unsigned
int
bytes
,
122
struct
x86_exception
*
fault
);
123
/*
124
* fetch: Read bytes of standard (non-emulated/special) memory.
125
* Used for instruction fetch.
126
* @addr: [IN ] Linear address from which to read.
127
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
128
* @bytes: [IN ] Number of bytes to read from memory.
129
*/
130
int
(*
fetch
)(
struct
x86_emulate_ctxt
*ctxt,
131
unsigned
long
addr
,
void
*
val
,
unsigned
int
bytes
,
132
struct
x86_exception
*
fault
);
133
134
/*
135
* read_emulated: Read bytes from emulated/special memory area.
136
* @addr: [IN ] Linear address from which to read.
137
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
138
* @bytes: [IN ] Number of bytes to read from memory.
139
*/
140
int
(*
read_emulated
)(
struct
x86_emulate_ctxt
*ctxt,
141
unsigned
long
addr
,
void
*
val
,
unsigned
int
bytes
,
142
struct
x86_exception
*
fault
);
143
144
/*
145
* write_emulated: Write bytes to emulated/special memory area.
146
* @addr: [IN ] Linear address to which to write.
147
* @val: [IN ] Value to write to memory (low-order bytes used as
148
* required).
149
* @bytes: [IN ] Number of bytes to write to memory.
150
*/
151
int
(*
write_emulated
)(
struct
x86_emulate_ctxt
*ctxt,
152
unsigned
long
addr
,
const
void
*
val
,
153
unsigned
int
bytes
,
154
struct
x86_exception
*
fault
);
155
156
/*
157
* cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
158
* emulated/special memory area.
159
* @addr: [IN ] Linear address to access.
160
* @old: [IN ] Value expected to be current at @addr.
161
* @new: [IN ] Value to write to @addr.
162
* @bytes: [IN ] Number of bytes to access using CMPXCHG.
163
*/
164
int
(*
cmpxchg_emulated
)(
struct
x86_emulate_ctxt
*ctxt,
165
unsigned
long
addr
,
166
const
void
*old,
167
const
void
*
new
,
168
unsigned
int
bytes
,
169
struct
x86_exception
*
fault
);
170
void
(*
invlpg
)(
struct
x86_emulate_ctxt
*ctxt,
ulong
addr
);
171
172
int
(*
pio_in_emulated
)(
struct
x86_emulate_ctxt
*ctxt,
173
int
size
,
unsigned
short
port
,
void
*
val
,
174
unsigned
int
count
);
175
176
int
(*
pio_out_emulated
)(
struct
x86_emulate_ctxt
*ctxt,
177
int
size
,
unsigned
short
port
,
const
void
*
val
,
178
unsigned
int
count
);
179
180
bool
(*
get_segment
)(
struct
x86_emulate_ctxt
*ctxt,
u16
*
selector
,
181
struct
desc_struct
*
desc
,
u32
*
base3
,
int
seg
);
182
void
(*
set_segment
)(
struct
x86_emulate_ctxt
*ctxt,
u16
selector
,
183
struct
desc_struct
*
desc
,
u32
base3
,
int
seg
);
184
unsigned
long
(*
get_cached_segment_base
)(
struct
x86_emulate_ctxt
*ctxt,
185
int
seg
);
186
void
(*
get_gdt
)(
struct
x86_emulate_ctxt
*ctxt,
struct
desc_ptr
*dt);
187
void
(*
get_idt
)(
struct
x86_emulate_ctxt
*ctxt,
struct
desc_ptr
*dt);
188
void
(*
set_gdt
)(
struct
x86_emulate_ctxt
*ctxt,
struct
desc_ptr
*dt);
189
void
(*
set_idt
)(
struct
x86_emulate_ctxt
*ctxt,
struct
desc_ptr
*dt);
190
ulong
(*
get_cr
)(
struct
x86_emulate_ctxt
*ctxt,
int
cr
);
191
int
(*
set_cr
)(
struct
x86_emulate_ctxt
*ctxt,
int
cr
,
ulong
val
);
192
void
(*
set_rflags
)(
struct
x86_emulate_ctxt
*ctxt,
ulong
val
);
193
int
(*
cpl
)(
struct
x86_emulate_ctxt
*ctxt);
194
int
(*
get_dr
)(
struct
x86_emulate_ctxt
*ctxt,
int
dr,
ulong
*
dest
);
195
int
(*
set_dr
)(
struct
x86_emulate_ctxt
*ctxt,
int
dr,
ulong
value
);
196
int
(*
set_msr
)(
struct
x86_emulate_ctxt
*ctxt,
u32
msr_index,
u64
data
);
197
int
(*
get_msr
)(
struct
x86_emulate_ctxt
*ctxt,
u32
msr_index,
u64
*
pdata
);
198
int
(*
read_pmc
)(
struct
x86_emulate_ctxt
*ctxt,
u32
pmc,
u64
*
pdata
);
199
void
(*
halt
)(
struct
x86_emulate_ctxt
*ctxt);
200
void
(*
wbinvd
)(
struct
x86_emulate_ctxt
*ctxt);
201
int
(*
fix_hypercall
)(
struct
x86_emulate_ctxt
*ctxt);
202
void
(*
get_fpu
)(
struct
x86_emulate_ctxt
*ctxt);
/* disables preempt */
203
void
(*
put_fpu
)(
struct
x86_emulate_ctxt
*ctxt);
/* reenables preempt */
204
int
(*
intercept
)(
struct
x86_emulate_ctxt
*ctxt,
205
struct
x86_instruction_info
*
info
,
206
enum
x86_intercept_stage
stage);
207
208
void
(*
get_cpuid
)(
struct
x86_emulate_ctxt
*ctxt,
209
u32
*eax,
u32
*
ebx
,
u32
*
ecx
,
u32
*edx);
210
};
211
212
typedef
u32
__attribute__
((vector_size(16))) sse128_t;
213
214
/* Type, address-of, and value of an instruction's operand. */
215
struct
operand
{
216
enum
{ OP_REG, OP_MEM, OP_MEM_STR, OP_IMM,
OP_XMM
, OP_MM,
OP_NONE
}
type
;
217
unsigned
int
bytes
;
218
unsigned
int
count
;
219
union
{
220
unsigned
long
orig_val
;
221
u64
orig_val64
;
222
};
223
union
{
224
unsigned
long
*
reg
;
225
struct
segmented_address {
226
ulong
ea
;
227
unsigned
seg
;
228
}
mem
;
229
unsigned
xmm
;
230
unsigned
mm
;
231
}
addr
;
232
union
{
233
unsigned
long
val
;
234
u64
val64
;
235
char
valptr[
sizeof
(
unsigned
long
) + 2];
236
sse128_t
vec_val
;
237
u64
mm_val
;
238
void
*
data
;
239
};
240
};
241
242
struct
fetch_cache
{
243
u8
data
[15];
244
unsigned
long
start
;
245
unsigned
long
end
;
246
};
247
248
struct
read_cache
{
249
u8
data
[1024];
250
unsigned
long
pos
;
251
unsigned
long
end
;
252
};
253
254
/* Execution mode, passed to the emulator. */
255
enum
x86emul_mode
{
256
X86EMUL_MODE_REAL
,
/* Real mode. */
257
X86EMUL_MODE_VM86
,
/* Virtual 8086 mode. */
258
X86EMUL_MODE_PROT16
,
/* 16-bit protected mode. */
259
X86EMUL_MODE_PROT32
,
/* 32-bit protected mode. */
260
X86EMUL_MODE_PROT64
,
/* 64-bit (long) mode. */
261
};
262
263
struct
x86_emulate_ctxt
{
264
const
struct
x86_emulate_ops
*
ops
;
265
266
/* Register state before/after emulation. */
267
unsigned
long
eflags
;
268
unsigned
long
eip
;
/* eip before instruction emulation */
269
/* Emulated execution mode, represented by an X86EMUL_MODE value. */
270
enum
x86emul_mode
mode
;
271
272
/* interruptibility state, as a result of execution of STI or MOV SS */
273
int
interruptibility
;
274
275
bool
guest_mode
;
/* guest running a nested guest */
276
bool
perm_ok
;
/* do not check permissions if true */
277
bool
only_vendor_specific_insn
;
278
279
bool
have_exception
;
280
struct
x86_exception
exception
;
281
282
/* decode cache */
283
u8
twobyte
;
284
u8
b
;
285
u8
intercept
;
286
u8
lock_prefix
;
287
u8
rep_prefix
;
288
u8
op_bytes
;
289
u8
ad_bytes
;
290
u8
rex_prefix
;
291
struct
operand
src
;
292
struct
operand
src2
;
293
struct
operand
dst
;
294
bool
has_seg_override
;
295
u8
seg_override
;
296
u64
d
;
297
int
(*
execute
)(
struct
x86_emulate_ctxt
*ctxt);
298
int
(*
check_perm
)(
struct
x86_emulate_ctxt
*ctxt);
299
/* modrm */
300
u8
modrm
;
301
u8
modrm_mod
;
302
u8
modrm_reg
;
303
u8
modrm_rm
;
304
u8
modrm_seg
;
305
bool
rip_relative
;
306
unsigned
long
_eip
;
307
struct
operand
memop
;
308
u32
regs_valid
;
/* bitmaps of registers in _regs[] that can be read */
309
u32
regs_dirty
;
/* bitmaps of registers in _regs[] that have been written */
310
/* Fields above regs are cleared together. */
311
unsigned
long
_regs
[
NR_VCPU_REGS
];
312
struct
operand
*
memopp
;
313
struct
fetch_cache
fetch
;
314
struct
read_cache
io_read
;
315
struct
read_cache
mem_read
;
316
};
317
318
/* Repeat String Operation Prefix */
319
#define REPE_PREFIX 0xf3
320
#define REPNE_PREFIX 0xf2
321
322
/* CPUID vendors */
323
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
324
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
325
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
326
327
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
328
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
329
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
330
331
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
332
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
333
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
334
335
enum
x86_intercept_stage
{
336
X86_ICTP_NONE
= 0,
/* Allow zero-init to not match anything */
337
X86_ICPT_PRE_EXCEPT
,
338
X86_ICPT_POST_EXCEPT
,
339
X86_ICPT_POST_MEMACCESS
,
340
};
341
342
enum
x86_intercept
{
343
x86_intercept_none
,
344
x86_intercept_cr_read
,
345
x86_intercept_cr_write
,
346
x86_intercept_clts
,
347
x86_intercept_lmsw
,
348
x86_intercept_smsw
,
349
x86_intercept_dr_read
,
350
x86_intercept_dr_write
,
351
x86_intercept_lidt
,
352
x86_intercept_sidt
,
353
x86_intercept_lgdt
,
354
x86_intercept_sgdt
,
355
x86_intercept_lldt
,
356
x86_intercept_sldt
,
357
x86_intercept_ltr
,
358
x86_intercept_str
,
359
x86_intercept_rdtsc
,
360
x86_intercept_rdpmc
,
361
x86_intercept_pushf
,
362
x86_intercept_popf
,
363
x86_intercept_cpuid
,
364
x86_intercept_rsm
,
365
x86_intercept_iret
,
366
x86_intercept_intn
,
367
x86_intercept_invd
,
368
x86_intercept_pause
,
369
x86_intercept_hlt
,
370
x86_intercept_invlpg
,
371
x86_intercept_invlpga
,
372
x86_intercept_vmrun
,
373
x86_intercept_vmload
,
374
x86_intercept_vmsave
,
375
x86_intercept_vmmcall
,
376
x86_intercept_stgi
,
377
x86_intercept_clgi
,
378
x86_intercept_skinit
,
379
x86_intercept_rdtscp
,
380
x86_intercept_icebp
,
381
x86_intercept_wbinvd
,
382
x86_intercept_monitor
,
383
x86_intercept_mwait
,
384
x86_intercept_rdmsr
,
385
x86_intercept_wrmsr
,
386
x86_intercept_in
,
387
x86_intercept_ins
,
388
x86_intercept_out
,
389
x86_intercept_outs
,
390
391
nr_x86_intercepts
392
};
393
394
/* Host execution mode. */
395
#if defined(CONFIG_X86_32)
396
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
397
#elif defined(CONFIG_X86_64)
398
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
399
#endif
400
401
int
x86_decode_insn
(
struct
x86_emulate_ctxt
*ctxt,
void
*
insn
,
int
insn_len);
402
bool
x86_page_table_writing_insn
(
struct
x86_emulate_ctxt
*ctxt);
403
#define EMULATION_FAILED -1
404
#define EMULATION_OK 0
405
#define EMULATION_RESTART 1
406
#define EMULATION_INTERCEPTED 2
407
int
x86_emulate_insn
(
struct
x86_emulate_ctxt
*ctxt);
408
int
emulator_task_switch
(
struct
x86_emulate_ctxt
*ctxt,
409
u16
tss_selector,
int
idt_index,
int
reason
,
410
bool
has_error_code,
u32
error_code
);
411
int
emulate_int_real
(
struct
x86_emulate_ctxt
*ctxt,
int
irq);
412
void
emulator_invalidate_register_cache
(
struct
x86_emulate_ctxt
*ctxt);
413
void
emulator_writeback_register_cache
(
struct
x86_emulate_ctxt
*ctxt);
414
415
#endif
/* _ASM_X86_KVM_X86_EMULATE_H */
Generated on Thu Jan 10 2013 13:20:04 for Linux Kernel by
1.8.2