25 #include <linux/module.h>
35 #define OpImplicit 1ull
41 #define OpImmUByte 7ull
44 #define OpImmByte 10ull
51 #define OpImmFAddr 17ull
52 #define OpMemFAddr 18ull
53 #define OpImmU16 19ull
63 #define OpMask ((1ull << OpBits) - 1)
78 #define ImplicitOps (OpImplicit << DstShift)
79 #define DstReg (OpReg << DstShift)
80 #define DstMem (OpMem << DstShift)
81 #define DstAcc (OpAcc << DstShift)
82 #define DstDI (OpDI << DstShift)
83 #define DstMem64 (OpMem64 << DstShift)
84 #define DstImmUByte (OpImmUByte << DstShift)
85 #define DstDX (OpDX << DstShift)
86 #define DstMask (OpMask << DstShift)
89 #define SrcNone (OpNone << SrcShift)
90 #define SrcReg (OpReg << SrcShift)
91 #define SrcMem (OpMem << SrcShift)
92 #define SrcMem16 (OpMem16 << SrcShift)
93 #define SrcMem32 (OpMem32 << SrcShift)
94 #define SrcImm (OpImm << SrcShift)
95 #define SrcImmByte (OpImmByte << SrcShift)
96 #define SrcOne (OpOne << SrcShift)
97 #define SrcImmUByte (OpImmUByte << SrcShift)
98 #define SrcImmU (OpImmU << SrcShift)
99 #define SrcSI (OpSI << SrcShift)
100 #define SrcImmFAddr (OpImmFAddr << SrcShift)
101 #define SrcMemFAddr (OpMemFAddr << SrcShift)
102 #define SrcAcc (OpAcc << SrcShift)
103 #define SrcImmU16 (OpImmU16 << SrcShift)
104 #define SrcDX (OpDX << SrcShift)
105 #define SrcMem8 (OpMem8 << SrcShift)
106 #define SrcMask (OpMask << SrcShift)
107 #define BitOp (1<<11)
108 #define MemAbs (1<<12)
109 #define String (1<<13)
110 #define Stack (1<<14)
111 #define GroupMask (7<<15)
112 #define Group (1<<15)
113 #define GroupDual (2<<15)
114 #define Prefix (3<<15)
115 #define RMExt (4<<15)
118 #define ModRM (1<<19)
123 #define VendorSpecific (1<<22)
124 #define NoAccess (1<<23)
125 #define Op3264 (1<<24)
126 #define Undefined (1<<25)
130 #define PageTable (1 << 29)
132 #define Src2Shift (30)
133 #define Src2None (OpNone << Src2Shift)
134 #define Src2CL (OpCL << Src2Shift)
135 #define Src2ImmByte (OpImmByte << Src2Shift)
136 #define Src2One (OpOne << Src2Shift)
137 #define Src2Imm (OpImm << Src2Shift)
138 #define Src2ES (OpES << Src2Shift)
139 #define Src2CS (OpCS << Src2Shift)
140 #define Src2SS (OpSS << Src2Shift)
141 #define Src2DS (OpDS << Src2Shift)
142 #define Src2FS (OpFS << Src2Shift)
143 #define Src2GS (OpGS << Src2Shift)
144 #define Src2Mask (OpMask << Src2Shift)
145 #define Mmx ((u64)1 << 40)
146 #define Aligned ((u64)1 << 41)
147 #define Unaligned ((u64)1 << 42)
148 #define Avx ((u64)1 << 43)
150 #define X2(x...) x, x
151 #define X3(x...) X2(x), x
152 #define X4(x...) X2(x), X2(x)
153 #define X5(x...) X4(x), x
154 #define X6(x...) X4(x), X2(x)
155 #define X7(x...) X4(x), X3(x)
156 #define X8(x...) X4(x), X4(x)
157 #define X16(x...) X8(x), X8(x)
184 #define EFLG_ID (1<<21)
185 #define EFLG_VIP (1<<20)
186 #define EFLG_VIF (1<<19)
187 #define EFLG_AC (1<<18)
188 #define EFLG_VM (1<<17)
189 #define EFLG_RF (1<<16)
190 #define EFLG_IOPL (3<<12)
191 #define EFLG_NT (1<<14)
192 #define EFLG_OF (1<<11)
193 #define EFLG_DF (1<<10)
194 #define EFLG_IF (1<<9)
195 #define EFLG_TF (1<<8)
196 #define EFLG_SF (1<<7)
197 #define EFLG_ZF (1<<6)
198 #define EFLG_AF (1<<4)
199 #define EFLG_PF (1<<2)
200 #define EFLG_CF (1<<0)
202 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
203 #define EFLG_RESERVED_ONE_MASK 2
209 ctxt->
_regs[
nr] = ctxt->
ops->read_gpr(ctxt, nr);
232 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
238 ctxt->regs_valid = 0;
248 #if defined(CONFIG_X86_64)
251 #elif defined(__i386__)
260 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
263 #define _PRE_EFLAGS(_sav, _msk, _tmp) \
265 "movl %"_sav",%"_LO32 _tmp"; " \
268 "movl %"_msk",%"_LO32 _tmp"; " \
269 "andl %"_LO32 _tmp",("_STK"); " \
271 "notl %"_LO32 _tmp"; " \
272 "andl %"_LO32 _tmp",("_STK"); " \
273 "andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); " \
275 "orl %"_LO32 _tmp",("_STK"); " \
280 #define _POST_EFLAGS(_sav, _msk, _tmp) \
284 "andl %"_msk",%"_LO32 _tmp"; " \
285 "orl %"_LO32 _tmp",%"_sav"; "
293 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
295 __asm__ __volatile__ ( \
296 _PRE_EFLAGS("0", "4", "2") \
297 _op _suffix " %"_x"3,%1; " \
298 _POST_EFLAGS("0", "4", "2") \
299 : "=m" ((ctxt)->eflags), \
300 "+q" (*(_dsttype*)&(ctxt)->dst.val), \
302 : _y ((ctxt)->src.val), "i" (EFLAGS_MASK)); \
307 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
309 unsigned long _tmp; \
311 switch ((ctxt)->dst.bytes) { \
313 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
316 ____emulate_2op(ctxt,_op,_lx,_ly,"l",u32); \
319 ON64(____emulate_2op(ctxt,_op,_qx,_qy,"q",u64)); \
324 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
326 unsigned long _tmp; \
327 switch ((ctxt)->dst.bytes) { \
329 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
332 __emulate_2op_nobyte(ctxt, _op, \
333 _wx, _wy, _lx, _ly, _qx, _qy); \
339 #define emulate_2op_SrcB(ctxt, _op) \
340 __emulate_2op(ctxt, _op, "b", "c", "b", "c", "b", "c", "b", "c")
343 #define emulate_2op_SrcV(ctxt, _op) \
344 __emulate_2op(ctxt, _op, "b", "q", "w", "r", _LO32, "r", "", "r")
347 #define emulate_2op_SrcV_nobyte(ctxt, _op) \
348 __emulate_2op_nobyte(ctxt, _op, "w", "r", _LO32, "r", "", "r")
351 #define __emulate_2op_cl(ctxt, _op, _suffix, _type) \
353 unsigned long _tmp; \
354 _type _clv = (ctxt)->src2.val; \
355 _type _srcv = (ctxt)->src.val; \
356 _type _dstv = (ctxt)->dst.val; \
358 __asm__ __volatile__ ( \
359 _PRE_EFLAGS("0", "5", "2") \
360 _op _suffix " %4,%1 \n" \
361 _POST_EFLAGS("0", "5", "2") \
362 : "=m" ((ctxt)->eflags), "+r" (_dstv), "=&r" (_tmp) \
363 : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
366 (ctxt)->src2.val = (unsigned long) _clv; \
367 (ctxt)->src2.val = (unsigned long) _srcv; \
368 (ctxt)->dst.val = (unsigned long) _dstv; \
371 #define emulate_2op_cl(ctxt, _op) \
373 switch ((ctxt)->dst.bytes) { \
375 __emulate_2op_cl(ctxt, _op, "w", u16); \
378 __emulate_2op_cl(ctxt, _op, "l", u32); \
381 ON64(__emulate_2op_cl(ctxt, _op, "q", ulong)); \
386 #define __emulate_1op(ctxt, _op, _suffix) \
388 unsigned long _tmp; \
390 __asm__ __volatile__ ( \
391 _PRE_EFLAGS("0", "3", "2") \
392 _op _suffix " %1; " \
393 _POST_EFLAGS("0", "3", "2") \
394 : "=m" ((ctxt)->eflags), "+m" ((ctxt)->dst.val), \
396 : "i" (EFLAGS_MASK)); \
400 #define emulate_1op(ctxt, _op) \
402 switch ((ctxt)->dst.bytes) { \
403 case 1: __emulate_1op(ctxt, _op, "b"); break; \
404 case 2: __emulate_1op(ctxt, _op, "w"); break; \
405 case 4: __emulate_1op(ctxt, _op, "l"); break; \
406 case 8: ON64(__emulate_1op(ctxt, _op, "q")); break; \
410 #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
412 unsigned long _tmp; \
413 ulong *rax = reg_rmw((ctxt), VCPU_REGS_RAX); \
414 ulong *rdx = reg_rmw((ctxt), VCPU_REGS_RDX); \
416 __asm__ __volatile__ ( \
417 _PRE_EFLAGS("0", "5", "1") \
419 _op _suffix " %6; " \
421 _POST_EFLAGS("0", "5", "1") \
422 ".pushsection .fixup,\"ax\" \n\t" \
423 "3: movb $1, %4 \n\t" \
426 _ASM_EXTABLE(1b, 3b) \
427 : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
428 "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
429 : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \
433 #define emulate_1op_rax_rdx(ctxt, _op, _ex) \
435 switch((ctxt)->src.bytes) { \
437 __emulate_1op_rax_rdx(ctxt, _op, "b", _ex); \
440 __emulate_1op_rax_rdx(ctxt, _op, "w", _ex); \
443 __emulate_1op_rax_rdx(ctxt, _op, "l", _ex); \
446 __emulate_1op_rax_rdx(ctxt, _op, "q", _ex)); \
461 .src_val = ctxt->
src.val64,
462 .src_bytes = ctxt->
src.bytes,
463 .dst_bytes = ctxt->
dst.bytes,
465 .next_rip = ctxt->
eip,
468 return ctxt->
ops->intercept(ctxt, &info, stage);
473 *dest = (*dest & ~mask) | (src & mask);
489 return ~0
U >> ((
ss.d ^ 1) * 16);
494 return (
__fls(stack_mask(ctxt)) + 1) >> 3;
498 static inline unsigned long
501 if (ctxt->
ad_bytes ==
sizeof(
unsigned long))
504 return reg & ad_mask(ctxt);
507 static inline unsigned long
510 return address_mask(ctxt, reg);
513 static void masked_increment(
ulong *reg,
ulong mask,
int inc)
515 assign_masked(reg, *reg + inc, mask);
523 if (ctxt->
ad_bytes ==
sizeof(
unsigned long))
526 mask = ad_mask(ctxt);
527 masked_increment(reg, mask, inc);
532 masked_increment(reg_rmw(ctxt,
VCPU_REGS_RSP), stack_mask(ctxt), inc);
537 register_address_increment(ctxt, &ctxt->
_eip, rel);
544 return desc->
g ? (limit << 12) | 0xfff : limit;
558 return ctxt->
ops->get_cached_segment_base(ctxt, seg);
580 return emulate_exception(ctxt,
DB_VECTOR, 0,
false);
585 return emulate_exception(ctxt,
GP_VECTOR, err,
true);
590 return emulate_exception(ctxt,
SS_VECTOR, err,
true);
595 return emulate_exception(ctxt,
UD_VECTOR, 0,
false);
600 return emulate_exception(ctxt,
TS_VECTOR, err,
true);
605 return emulate_exception(ctxt,
DE_VECTOR, 0,
false);
610 return emulate_exception(ctxt,
NM_VECTOR, 0,
false);
618 ctxt->
ops->get_segment(ctxt, &selector, &
desc,
NULL, seg);
629 ctxt->
ops->get_segment(ctxt, &dummy, &
desc, &base3, seg);
630 ctxt->
ops->set_segment(ctxt, selector, &
desc, base3, seg);
650 else if (ctxt->
d &
Avx)
657 struct segmented_address
addr,
658 unsigned size,
bool write,
bool fetch,
668 la = seg_base(ctxt, addr.seg) + addr.ea;
669 switch (ctxt->
mode) {
671 if (((
signed long)la << 16) >> 16 != la)
672 return emulate_gp(ctxt, 0);
675 usable = ctxt->
ops->get_segment(ctxt, &sel, &
desc,
NULL,
680 if (((
desc.type & 8) || !(
desc.type & 2)) && write)
683 if (!fetch && (
desc.type & 8) && !(
desc.type & 2))
685 lim = desc_limit_scaled(&
desc);
686 if ((
desc.type & 8) || !(
desc.type & 4)) {
688 if (addr.ea > lim || (
u32)(addr.ea + size - 1) > lim)
692 if (addr.ea <= lim || (
u32)(addr.ea + size - 1) <= lim)
694 lim =
desc.d ? 0xffffffff : 0xffff;
695 if (addr.ea > lim || (
u32)(addr.ea + size - 1) > lim)
698 cpl = ctxt->
ops->cpl(ctxt);
704 if (!(
desc.type & 8)) {
708 }
else if ((
desc.type & 8) && !(
desc.type & 4)) {
712 }
else if ((
desc.type & 8) && (
desc.type & 4)) {
721 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
722 return emulate_gp(ctxt, 0);
727 return emulate_ss(ctxt, sel);
729 return emulate_gp(ctxt, sel);
733 struct segmented_address addr,
734 unsigned size,
bool write,
737 return __linearize(ctxt, addr, size, write,
false, linear);
742 struct segmented_address addr,
749 rc = linearize(ctxt, addr, size,
false, &linear);
752 return ctxt->
ops->read_std(ctxt, linear, data, size, &ctxt->
exception);
773 size =
min(15
UL - cur_size,
775 rc = __linearize(ctxt, addr, size,
false,
true, &linear);
778 rc = ctxt->
ops->fetch(ctxt, linear, fc->
data + cur_size,
790 void *dest,
unsigned size)
798 rc = do_insn_fetch_byte(ctxt, dest++);
806 #define insn_fetch(_type, _ctxt) \
807 ({ unsigned long _x; \
808 rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \
809 if (rc != X86EMUL_CONTINUE) \
814 #define insn_fetch_arr(_arr, _size, _ctxt) \
815 ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \
816 if (rc != X86EMUL_CONTINUE) \
830 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
831 p = (
unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
833 p = reg_rmw(ctxt, modrm_reg);
838 struct segmented_address addr,
839 u16 *size,
unsigned long *
address,
int op_bytes)
846 rc = segmented_read_std(ctxt, addr, size, 2);
850 rc = segmented_read_std(ctxt, addr, address, op_bytes);
854 static int test_cc(
unsigned int condition,
unsigned int flags)
858 switch ((condition & 15) >> 1) {
886 return (!!rc ^ (condition & 1));
889 static void fetch_register_operand(
struct operand *
op)
907 static void read_sse_reg(
struct x86_emulate_ctxt *ctxt, sse128_t *data,
int reg)
909 ctxt->
ops->get_fpu(ctxt);
911 case 0:
asm(
"movdqa %%xmm0, %0" :
"=m"(*data));
break;
912 case 1:
asm(
"movdqa %%xmm1, %0" :
"=m"(*data));
break;
913 case 2:
asm(
"movdqa %%xmm2, %0" :
"=m"(*data));
break;
914 case 3:
asm(
"movdqa %%xmm3, %0" :
"=m"(*data));
break;
915 case 4:
asm(
"movdqa %%xmm4, %0" :
"=m"(*data));
break;
916 case 5:
asm(
"movdqa %%xmm5, %0" :
"=m"(*data));
break;
917 case 6:
asm(
"movdqa %%xmm6, %0" :
"=m"(*data));
break;
918 case 7:
asm(
"movdqa %%xmm7, %0" :
"=m"(*data));
break;
920 case 8:
asm(
"movdqa %%xmm8, %0" :
"=m"(*data));
break;
921 case 9:
asm(
"movdqa %%xmm9, %0" :
"=m"(*data));
break;
922 case 10:
asm(
"movdqa %%xmm10, %0" :
"=m"(*data));
break;
923 case 11:
asm(
"movdqa %%xmm11, %0" :
"=m"(*data));
break;
924 case 12:
asm(
"movdqa %%xmm12, %0" :
"=m"(*data));
break;
925 case 13:
asm(
"movdqa %%xmm13, %0" :
"=m"(*data));
break;
926 case 14:
asm(
"movdqa %%xmm14, %0" :
"=m"(*data));
break;
927 case 15:
asm(
"movdqa %%xmm15, %0" :
"=m"(*data));
break;
931 ctxt->
ops->put_fpu(ctxt);
937 ctxt->
ops->get_fpu(ctxt);
939 case 0:
asm(
"movdqa %0, %%xmm0" : :
"m"(*data));
break;
940 case 1:
asm(
"movdqa %0, %%xmm1" : :
"m"(*data));
break;
941 case 2:
asm(
"movdqa %0, %%xmm2" : :
"m"(*data));
break;
942 case 3:
asm(
"movdqa %0, %%xmm3" : :
"m"(*data));
break;
943 case 4:
asm(
"movdqa %0, %%xmm4" : :
"m"(*data));
break;
944 case 5:
asm(
"movdqa %0, %%xmm5" : :
"m"(*data));
break;
945 case 6:
asm(
"movdqa %0, %%xmm6" : :
"m"(*data));
break;
946 case 7:
asm(
"movdqa %0, %%xmm7" : :
"m"(*data));
break;
948 case 8:
asm(
"movdqa %0, %%xmm8" : :
"m"(*data));
break;
949 case 9:
asm(
"movdqa %0, %%xmm9" : :
"m"(*data));
break;
950 case 10:
asm(
"movdqa %0, %%xmm10" : :
"m"(*data));
break;
951 case 11:
asm(
"movdqa %0, %%xmm11" : :
"m"(*data));
break;
952 case 12:
asm(
"movdqa %0, %%xmm12" : :
"m"(*data));
break;
953 case 13:
asm(
"movdqa %0, %%xmm13" : :
"m"(*data));
break;
954 case 14:
asm(
"movdqa %0, %%xmm14" : :
"m"(*data));
break;
955 case 15:
asm(
"movdqa %0, %%xmm15" : :
"m"(*data));
break;
959 ctxt->
ops->put_fpu(ctxt);
964 ctxt->
ops->get_fpu(ctxt);
966 case 0:
asm(
"movq %%mm0, %0" :
"=m"(*data));
break;
967 case 1:
asm(
"movq %%mm1, %0" :
"=m"(*data));
break;
968 case 2:
asm(
"movq %%mm2, %0" :
"=m"(*data));
break;
969 case 3:
asm(
"movq %%mm3, %0" :
"=m"(*data));
break;
970 case 4:
asm(
"movq %%mm4, %0" :
"=m"(*data));
break;
971 case 5:
asm(
"movq %%mm5, %0" :
"=m"(*data));
break;
972 case 6:
asm(
"movq %%mm6, %0" :
"=m"(*data));
break;
973 case 7:
asm(
"movq %%mm7, %0" :
"=m"(*data));
break;
976 ctxt->
ops->put_fpu(ctxt);
981 ctxt->
ops->get_fpu(ctxt);
983 case 0:
asm(
"movq %0, %%mm0" : :
"m"(*data));
break;
984 case 1:
asm(
"movq %0, %%mm1" : :
"m"(*data));
break;
985 case 2:
asm(
"movq %0, %%mm2" : :
"m"(*data));
break;
986 case 3:
asm(
"movq %0, %%mm3" : :
"m"(*data));
break;
987 case 4:
asm(
"movq %0, %%mm4" : :
"m"(*data));
break;
988 case 5:
asm(
"movq %0, %%mm5" : :
"m"(*data));
break;
989 case 6:
asm(
"movq %0, %%mm6" : :
"m"(*data));
break;
990 case 7:
asm(
"movq %0, %%mm7" : :
"m"(*data));
break;
993 ctxt->
ops->put_fpu(ctxt);
1003 reg = (ctxt->
b & 7) | ((ctxt->
rex_prefix & 1) << 3);
1005 if (ctxt->
d &
Sse) {
1009 read_sse_reg(ctxt, &op->
vec_val, reg);
1012 if (ctxt->
d &
Mmx) {
1022 op->
addr.
reg = decode_register(ctxt, reg, highbyte_regs);
1025 op->
addr.
reg = decode_register(ctxt, reg, 0);
1028 fetch_register_operand(op);
1042 int index_reg = 0, base_reg = 0, scale;
1061 if (ctxt->
d &
Sse) {
1068 if (ctxt->
d &
Mmx) {
1074 fetch_register_operand(op);
1101 modrm_ea += bx + si;
1104 modrm_ea += bx + di;
1107 modrm_ea += bp + si;
1110 modrm_ea += bp + di;
1129 modrm_ea = (
u16)modrm_ea;
1134 index_reg |= (sib >> 3) & 7;
1135 base_reg |= sib & 7;
1138 if ((base_reg & 7) == 5 && ctxt->
modrm_mod == 0)
1141 modrm_ea +=
reg_read(ctxt, base_reg);
1142 adjust_modrm_seg(ctxt, base_reg);
1145 modrm_ea +=
reg_read(ctxt, index_reg) << scale;
1151 modrm_ea +=
reg_read(ctxt, base_reg);
1152 adjust_modrm_seg(ctxt, base_reg);
1197 if (ctxt->
dst.type == OP_MEM && ctxt->
src.type == OP_REG) {
1198 mask = ~(ctxt->
dst.bytes * 8 - 1);
1200 if (ctxt->
src.bytes == 2)
1202 else if (ctxt->
src.bytes == 4)
1205 ctxt->
dst.addr.mem.ea += (sv >> 3);
1209 ctxt->
src.val &= (ctxt->
dst.bytes << 3) - 1;
1213 unsigned long addr,
void *dest,
unsigned size)
1223 rc = ctxt->
ops->read_emulated(ctxt, addr, mc->
data + mc->
end, size,
1237 struct segmented_address addr,
1244 rc = linearize(ctxt, addr, size,
false, &linear);
1247 return read_emulated(ctxt, linear, data, size);
1251 struct segmented_address addr,
1258 rc = linearize(ctxt, addr, size,
true, &linear);
1261 return ctxt->
ops->write_emulated(ctxt, linear, data, size,
1266 struct segmented_address addr,
1267 const void *orig_data,
const void *data,
1273 rc = linearize(ctxt, addr, size,
true, &linear);
1276 return ctxt->
ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1281 unsigned int size,
unsigned short port,
1286 if (rc->
pos == rc->
end) {
1287 unsigned int in_page,
n;
1293 n =
min(
min(in_page, (
unsigned int)
sizeof(rc->
data)) / size,
1298 if (!ctxt->
ops->pio_in_emulated(ctxt, size, port, rc->
data, n))
1305 ctxt->
dst.type = OP_MEM_STR;
1306 ctxt->
dst.count = (rc->
end - rc->
pos) / size;
1321 ctxt->
ops->get_idt(ctxt, &dt);
1323 if (dt.size < index * 8 + 7)
1324 return emulate_gp(ctxt, index << 3 | 0x2);
1326 addr = dt.address + index * 8;
1327 return ctxt->
ops->read_std(ctxt, addr, desc,
sizeof *desc,
1336 if (selector & 1 << 2) {
1340 memset (dt, 0,
sizeof *dt);
1344 dt->
size = desc_limit_scaled(&desc);
1345 dt->
address = get_desc_base(&desc);
1356 u16 index = selector >> 3;
1359 get_descriptor_table_ptr(ctxt, selector, &dt);
1361 if (dt.
size < index * 8 + 7)
1362 return emulate_gp(ctxt, selector & 0xfffc);
1364 *desc_addr_p = addr = dt.
address + index * 8;
1365 return ctxt->
ops->read_std(ctxt, addr, desc,
sizeof *desc,
1374 u16 index = selector >> 3;
1377 get_descriptor_table_ptr(ctxt, selector, &dt);
1379 if (dt.
size < index * 8 + 7)
1380 return emulate_gp(ctxt, selector & 0xfffc);
1382 addr = dt.
address + index * 8;
1383 return ctxt->
ops->write_std(ctxt, addr, desc,
sizeof *desc,
1389 u16 selector,
int seg)
1395 bool null_selector = !(selector & ~0x3);
1400 memset(&seg_desc, 0,
sizeof seg_desc);
1405 ctxt->
ops->get_segment(ctxt, &dummy, &seg_desc,
NULL, seg);
1406 set_desc_base(&seg_desc, selector << 4);
1411 cpl = ctxt->
ops->cpl(ctxt);
1428 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1432 err_code = selector & 0xfffc;
1452 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1456 if (!(seg_desc.type & 8))
1459 if (seg_desc.type & 4) {
1465 if (rpl > cpl || dpl != cpl)
1469 selector = (selector & 0xfffc) | cpl;
1472 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1474 old_desc = seg_desc;
1476 ret = ctxt->
ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1482 if (seg_desc.s || seg_desc.type != 2)
1491 if ((seg_desc.type & 0xa) == 0x8 ||
1492 (((seg_desc.type & 0
xc) != 0
xc) &&
1493 (rpl > dpl && cpl > dpl)))
1501 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1506 ctxt->
ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1509 emulate_exception(ctxt, err_vec, err_code,
true);
1513 static void write_register_operand(
struct operand *op)
1516 switch (op->
bytes) {
1536 switch (ctxt->
dst.type) {
1538 write_register_operand(&ctxt->
dst);
1542 rc = segmented_cmpxchg(ctxt,
1544 &ctxt->
dst.orig_val,
1548 rc = segmented_write(ctxt,
1556 rc = segmented_write(ctxt,
1559 ctxt->
dst.bytes * ctxt->
dst.count);
1564 write_sse_reg(ctxt, &ctxt->
dst.vec_val, ctxt->
dst.addr.xmm);
1567 write_mmx_reg(ctxt, &ctxt->
dst.mm_val, ctxt->
dst.addr.mm);
1580 struct segmented_address addr;
1582 rsp_increment(ctxt, -bytes);
1586 return segmented_write(ctxt, addr, data, bytes);
1597 void *dest,
int len)
1600 struct segmented_address addr;
1604 rc = segmented_read(ctxt, addr, dest, len);
1608 rsp_increment(ctxt, len);
1614 return emulate_pop(ctxt, &ctxt->
dst.val, ctxt->
op_bytes);
1618 void *dest,
int len)
1621 unsigned long val, change_mask;
1623 int cpl = ctxt->
ops->cpl(ctxt);
1625 rc = emulate_pop(ctxt, &val, len);
1632 switch(ctxt->
mode) {
1643 return emulate_gp(ctxt, 0);
1651 *(
unsigned long *)dest =
1652 (ctxt->
eflags & ~change_mask) | (val & change_mask);
1659 ctxt->
dst.type = OP_REG;
1662 return emulate_popf(ctxt, &ctxt->
dst.val, ctxt->
op_bytes);
1668 unsigned frame_size = ctxt->
src.val;
1669 unsigned nesting_level = ctxt->
src2.val & 31;
1676 rc =
push(ctxt, &rbp, stack_size(ctxt));
1696 int seg = ctxt->
src2.val;
1698 ctxt->
src.val = get_segment_selector(ctxt, seg);
1700 return em_push(ctxt);
1705 int seg = ctxt->
src2.val;
1706 unsigned long selector;
1709 rc = emulate_pop(ctxt, &selector, ctxt->
op_bytes);
1713 rc = load_segment_descriptor(ctxt, (
u16)selector, seg);
1725 (ctxt->
src.val = old_esp) : (ctxt->
src.val =
reg_read(ctxt, reg));
1740 return em_push(ctxt);
1750 rsp_increment(ctxt, ctxt->
op_bytes);
1754 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->
op_bytes);
1791 eip_addr = dt.
address + (irq << 2);
1792 cs_addr = dt.
address + (irq << 2) + 2;
1815 invalidate_registers(ctxt);
1816 rc = __emulate_int_real(ctxt, irq);
1818 writeback_registers(ctxt);
1824 switch(ctxt->
mode) {
1826 return __emulate_int_real(ctxt, irq);
1840 unsigned long temp_eip = 0;
1841 unsigned long temp_eflags = 0;
1842 unsigned long cs = 0;
1850 rc = emulate_pop(ctxt, &temp_eip, ctxt->
op_bytes);
1855 if (temp_eip & ~0xffff)
1856 return emulate_gp(ctxt, 0);
1858 rc = emulate_pop(ctxt, &cs, ctxt->
op_bytes);
1863 rc = emulate_pop(ctxt, &temp_eflags, ctxt->
op_bytes);
1873 ctxt->
_eip = temp_eip;
1880 ctxt->
eflags |= temp_eflags;
1891 switch(ctxt->
mode) {
1893 return emulate_iret_real(ctxt);
1951 ctxt->
dst.val = ~ctxt->
dst.val;
1983 return emulate_de(ctxt);
1993 return emulate_de(ctxt);
2010 old_eip = ctxt->
_eip;
2012 ctxt->
src.val = old_eip;
2020 rc = em_jmp_far(ctxt);
2031 u64 old = ctxt->
dst.orig_val64;
2049 ctxt->
dst.type = OP_REG;
2050 ctxt->
dst.addr.reg = &ctxt->
_eip;
2052 return em_pop(ctxt);
2065 rc = emulate_pop(ctxt, &cs, ctxt->
op_bytes);
2075 ctxt->
src.orig_val = ctxt->
src.val;
2081 ctxt->
dst.val = ctxt->
src.orig_val;
2084 ctxt->
dst.type = OP_REG;
2092 int seg = ctxt->
src2.val;
2098 rc = load_segment_descriptor(ctxt, sel, seg);
2102 ctxt->
dst.val = ctxt->
src.val;
2111 set_desc_base(cs, 0);
2113 set_desc_limit(cs, 0xfffff);
2121 set_desc_base(ss, 0);
2122 set_desc_limit(ss, 0xfffff);
2138 ctxt->
ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2158 ops->
get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2199 return emulate_ud(ctxt);
2201 if (!(em_syscall_is_enabled(ctxt)))
2202 return emulate_ud(ctxt);
2205 setup_syscalls_segments(ctxt, &cs, &
ss);
2208 return emulate_ud(ctxt);
2212 cs_sel = (
u16)(msr_data & 0xfffc);
2213 ss_sel = (
u16)(msr_data + 8);
2223 if (efer & EFER_LMA) {
2224 #ifdef CONFIG_X86_64
2230 ctxt->
_eip = msr_data;
2257 return emulate_gp(ctxt, 0);
2264 && !vendor_intel(ctxt))
2265 return emulate_ud(ctxt);
2271 return emulate_ud(ctxt);
2273 setup_syscalls_segments(ctxt, &cs, &
ss);
2276 switch (ctxt->
mode) {
2278 if ((msr_data & 0xfffc) == 0x0)
2279 return emulate_gp(ctxt, 0);
2282 if (msr_data == 0x0)
2283 return emulate_gp(ctxt, 0);
2290 cs_sel = (
u16)msr_data;
2292 ss_sel = cs_sel + 8;
2303 ctxt->
_eip = msr_data;
2317 u16 cs_sel = 0, ss_sel = 0;
2322 return emulate_gp(ctxt, 0);
2324 setup_syscalls_segments(ctxt, &cs, &
ss);
2336 cs_sel = (
u16)(msr_data + 16);
2337 if ((msr_data & 0xfffc) == 0x0)
2338 return emulate_gp(ctxt, 0);
2339 ss_sel = (
u16)(msr_data + 24);
2342 cs_sel = (
u16)(msr_data + 32);
2343 if (msr_data == 0x0)
2344 return emulate_gp(ctxt, 0);
2345 ss_sel = cs_sel + 8;
2370 return ctxt->
ops->cpl(ctxt) > iopl;
2380 u16 tr, io_bitmap_ptr,
perm, bit_idx = port & 0x7;
2381 unsigned mask = (1 << len) - 1;
2387 if (desc_limit_scaled(&tr_seg) < 103)
2389 base = get_desc_base(&tr_seg);
2390 #ifdef CONFIG_X86_64
2391 base |= ((
u64)base3) << 32;
2393 r = ops->
read_std(ctxt, base + 102, &io_bitmap_ptr, 2,
NULL);
2396 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2398 r = ops->
read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2,
NULL);
2401 if ((perm >> bit_idx) & mask)
2412 if (emulator_bad_iopl(ctxt))
2413 if (!emulator_io_port_access_allowed(ctxt, port, len))
2492 u16 tss_selector,
u16 old_tss_sel,
2498 u32 new_tss_base = get_desc_base(new_desc);
2500 ret = ops->
read_std(ctxt, old_tss_base, &tss_seg,
sizeof tss_seg,
2506 save_state_to_tss16(ctxt, &tss_seg);
2508 ret = ops->
write_std(ctxt, old_tss_base, &tss_seg,
sizeof tss_seg,
2514 ret = ops->
read_std(ctxt, new_tss_base, &tss_seg,
sizeof tss_seg,
2520 if (old_tss_sel != 0xffff) {
2521 tss_seg.prev_task_link = old_tss_sel;
2523 ret = ops->
write_std(ctxt, new_tss_base,
2524 &tss_seg.prev_task_link,
2525 sizeof tss_seg.prev_task_link,
2532 return load_state_from_tss16(ctxt, &tss_seg);
2538 tss->
cr3 = ctxt->
ops->get_cr(ctxt, 3);
2564 if (ctxt->
ops->set_cr(ctxt, 3, tss->
cr3))
2565 return emulate_gp(ctxt, 0);
2607 ctxt->
ops->set_rflags(ctxt, ctxt->
eflags);
2639 u16 tss_selector,
u16 old_tss_sel,
2645 u32 new_tss_base = get_desc_base(new_desc);
2647 ret = ops->
read_std(ctxt, old_tss_base, &tss_seg,
sizeof tss_seg,
2653 save_state_to_tss32(ctxt, &tss_seg);
2655 ret = ops->
write_std(ctxt, old_tss_base, &tss_seg,
sizeof tss_seg,
2661 ret = ops->
read_std(ctxt, new_tss_base, &tss_seg,
sizeof tss_seg,
2667 if (old_tss_sel != 0xffff) {
2668 tss_seg.prev_task_link = old_tss_sel;
2670 ret = ops->
write_std(ctxt, new_tss_base,
2671 &tss_seg.prev_task_link,
2672 sizeof tss_seg.prev_task_link,
2679 return load_state_from_tss32(ctxt, &tss_seg);
2683 u16 tss_selector,
int idt_index,
int reason,
2690 ulong old_tss_base =
2697 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2700 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2714 if (idt_index != -1) {
2719 ret = read_interrupt_descriptor(ctxt, idt_index,
2724 dpl = task_gate_desc.dpl;
2725 if ((tss_selector & 3) > dpl || ops->
cpl(ctxt) >
dpl)
2726 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2729 int dpl = next_tss_desc.dpl;
2730 if ((tss_selector & 3) > dpl || ops->
cpl(ctxt) >
dpl)
2731 return emulate_gp(ctxt, tss_selector);
2735 desc_limit = desc_limit_scaled(&next_tss_desc);
2736 if (!next_tss_desc.p ||
2737 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2738 desc_limit < 0x2b)) {
2739 emulate_ts(ctxt, tss_selector & 0xfffc);
2744 curr_tss_desc.type &= ~(1 << 1);
2745 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2754 old_tss_sel = 0xffff;
2756 if (next_tss_desc.type & 8)
2757 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2758 old_tss_base, &next_tss_desc);
2760 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2761 old_tss_base, &next_tss_desc);
2769 next_tss_desc.type |= (1 << 1);
2770 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2776 if (has_error_code) {
2779 ctxt->
src.val = (
unsigned long) error_code;
2780 ret = em_push(ctxt);
2787 u16 tss_selector,
int idt_index,
int reason,
2788 bool has_error_code,
u32 error_code)
2792 invalidate_registers(ctxt);
2796 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2797 has_error_code, error_code);
2801 writeback_registers(ctxt);
2812 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->
bytes);
2819 bool af,
cf, old_cf;
2828 if ((al & 0x0f) > 9 || af) {
2830 cf = old_cf | (al >= 250);
2835 if (old_al > 0x99 || old_cf) {
2842 ctxt->
src.type = OP_IMM;
2844 ctxt->
src.bytes = 1;
2856 long rel = ctxt->
src.val;
2860 return em_push(ctxt);
2870 old_eip = ctxt->
_eip;
2879 ctxt->
src.val = old_cs;
2884 ctxt->
src.val = old_eip;
2885 return em_push(ctxt);
2892 ctxt->
dst.type = OP_REG;
2893 ctxt->
dst.addr.reg = &ctxt->
_eip;
2895 rc = emulate_pop(ctxt, &ctxt->
dst.val, ctxt->
op_bytes);
2898 rsp_increment(ctxt, ctxt->
src.val);
2963 ctxt->
src.val = ctxt->
dst.val;
2964 write_register_operand(&ctxt->
src);
2967 ctxt->
dst.val = ctxt->
src.orig_val;
2980 ctxt->
dst.val = ctxt->
src2.val;
2981 return em_imul(ctxt);
2986 ctxt->
dst.type = OP_REG;
2987 ctxt->
dst.bytes = ctxt->
src.bytes;
2989 ctxt->
dst.val = ~((ctxt->
src.val >> (ctxt->
src.bytes * 8 - 1)) - 1);
3009 return emulate_gp(ctxt, 0);
3024 return emulate_gp(ctxt, 0);
3036 val = ctxt->
src.val & ~0ULL;
3038 val = ctxt->
src.val & ~0
U;
3042 return emulate_gp(ctxt, 0);
3056 return emulate_gp(ctxt, 0);
3066 return emulate_gp(ctxt, 0);
3076 return emulate_ud(ctxt);
3078 ctxt->
dst.val = get_segment_selector(ctxt, ctxt->
modrm_reg);
3087 return emulate_ud(ctxt);
3094 return load_segment_descriptor(ctxt, sel, ctxt->
modrm_reg);
3112 return load_segment_descriptor(ctxt, sel,
VCPU_SREG_TR);
3120 rc = linearize(ctxt, ctxt->
src.addr.mem, 1,
false, &linear);
3122 ctxt->
ops->invlpg(ctxt, linear);
3132 cr0 = ctxt->
ops->get_cr(ctxt, 0);
3134 ctxt->
ops->set_cr(ctxt, 0, cr0);
3145 rc = ctxt->
ops->fix_hypercall(ctxt);
3171 return segmented_write(ctxt, ctxt->
dst.addr.mem,
3177 return emulate_store_desc_ptr(ctxt, ctxt->
ops->get_gdt);
3182 return emulate_store_desc_ptr(ctxt, ctxt->
ops->get_idt);
3192 rc = read_descriptor(ctxt, ctxt->
src.addr.mem,
3207 rc = ctxt->
ops->fix_hypercall(ctxt);
3221 rc = read_descriptor(ctxt, ctxt->
src.addr.mem,
3234 ctxt->
dst.bytes = 2;
3235 ctxt->
dst.val = ctxt->
ops->get_cr(ctxt, 0);
3241 ctxt->
ops->set_cr(ctxt, 0, (ctxt->
ops->get_cr(ctxt, 0) & ~0x0eul)
3242 | (ctxt->
src.val & 0x0f));
3249 register_address_increment(ctxt, reg_rmw(ctxt,
VCPU_REGS_RCX), -1);
3251 (ctxt->
b == 0xe2 || test_cc(ctxt->
b ^ 0x5, ctxt->
eflags)))
3252 jmp_rel(ctxt, ctxt->
src.val);
3260 jmp_rel(ctxt, ctxt->
src.val);
3267 if (!pio_in_emulated(ctxt, ctxt->
dst.bytes, ctxt->
src.val,
3276 ctxt->
ops->pio_out_emulated(ctxt, ctxt->
src.bytes, ctxt->
dst.val,
3285 if (emulator_bad_iopl(ctxt))
3286 return emulate_gp(ctxt, 0);
3294 if (emulator_bad_iopl(ctxt))
3295 return emulate_gp(ctxt, 0);
3307 ctxt->
src.val &= (ctxt->
dst.bytes << 3) - 1;
3349 ctxt->
ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3367 #ifdef CONFIG_X86_64
3369 asm(
"bswap %0" :
"+r"(ctxt->
dst.val));
3373 asm(
"bswap %0" :
"+r"(*(
u32 *)&ctxt->
dst.val));
3379 static bool valid_cr(
int nr)
3394 return emulate_ud(ctxt);
3401 u64 new_val = ctxt->
src.val64;
3405 static u64 cr_reserved_bits[] = {
3406 0xffffffff00000000ULL,
3414 return emulate_ud(ctxt);
3416 if (new_val & cr_reserved_bits[cr])
3417 return emulate_gp(ctxt, 0);
3424 return emulate_gp(ctxt, 0);
3426 cr4 = ctxt->
ops->get_cr(ctxt, 4);
3429 if ((new_val & X86_CR0_PG) && (efer &
EFER_LME) &&
3431 return emulate_gp(ctxt, 0);
3439 if (efer & EFER_LMA)
3447 return emulate_gp(ctxt, 0);
3454 if ((efer & EFER_LMA) && !(new_val &
X86_CR4_PAE))
3455 return emulate_gp(ctxt, 0);
3468 ctxt->
ops->get_dr(ctxt, 7, &dr7);
3471 return dr7 & (1 << 13);
3480 return emulate_ud(ctxt);
3482 cr4 = ctxt->
ops->get_cr(ctxt, 4);
3483 if ((cr4 &
X86_CR4_DE) && (dr == 4 || dr == 5))
3484 return emulate_ud(ctxt);
3486 if (check_dr7_gd(ctxt))
3487 return emulate_db(ctxt);
3494 u64 new_val = ctxt->
src.val64;
3497 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3498 return emulate_gp(ctxt, 0);
3500 return check_dr_read(ctxt);
3510 return emulate_ud(ctxt);
3520 if (rax & 0xffff000000000000ULL)
3521 return emulate_gp(ctxt, 0);
3523 return check_svme(ctxt);
3528 u64 cr4 = ctxt->
ops->get_cr(ctxt, 4);
3531 return emulate_ud(ctxt);
3538 u64 cr4 = ctxt->
ops->get_cr(ctxt, 4);
3543 return emulate_gp(ctxt, 0);
3551 if (!emulator_io_permited(ctxt, ctxt->
src.val, ctxt->
dst.bytes))
3552 return emulate_gp(ctxt, 0);
3560 if (!emulator_io_permited(ctxt, ctxt->
dst.val, ctxt->
src.bytes))
3561 return emulate_gp(ctxt, 0);
3566 #define D(_y) { .flags = (_y) }
3567 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
3568 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
3569 .check_perm = (_p) }
3571 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3572 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3573 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3574 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3575 #define II(_f, _e, _i) \
3576 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
3577 #define IIP(_f, _e, _i, _p) \
3578 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
3579 .check_perm = (_p) }
3580 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3582 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3583 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3584 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3585 #define I2bvIP(_f, _e, _i, _p) \
3586 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3588 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3589 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3590 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3592 static const struct opcode group7_rm1[] = {
3598 static const struct opcode group7_rm3[] = {
3609 static const struct opcode group7_rm7[] = {
3615 static const struct opcode group1[] = {
3626 static const struct opcode group1A[] = {
3630 static const struct opcode group3[] = {
3641 static const struct opcode group4[] = {
3647 static const struct opcode group5[] = {
3657 static const struct opcode group6[] = {
3676 N,
EXT(0, group7_rm3),
3682 static const struct opcode group8[] = {
3696 static const struct opcode group11[] = {
3701 static const struct gprefix pfx_0f_6f_0f_7f = {
3705 static const struct gprefix pfx_vmovntpx = {
3706 I(0, em_mov),
N,
N,
N,
3709 static const struct opcode opcode_table[256] = {
3826 static const struct opcode twobyte_table[256] = {
3828 G(0, group6),
GD(0, &group7),
N,
N,
3834 N,
N,
N,
N,
N,
N,
N,
N,
D(
ImplicitOps |
ModRM),
N,
N,
N,
N,
N,
N,
N,
3855 N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
3898 N,
N,
N,
GD(0, &group9),
3902 N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
3904 N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
3906 N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N, N
3934 unsigned size,
bool sign_extension)
3942 switch (op->
bytes) {
3953 if (!sign_extension) {
3954 switch (op->
bytes) {
3962 op->
val &= 0xffffffff;
3977 decode_register_operand(ctxt, op);
3980 rc = decode_imm(ctxt, op, 1,
false);
3988 fetch_bit_operand(ctxt);
3992 ctxt->
memop.bytes = 8;
3998 fetch_register_operand(op);
4014 fetch_register_operand(op);
4021 rc = decode_imm(ctxt, op, 1,
true);
4028 rc = decode_imm(ctxt, op, imm_size(ctxt),
true);
4031 ctxt->
memop.bytes = 1;
4034 ctxt->
memop.bytes = 2;
4037 ctxt->
memop.bytes = 4;
4040 rc = decode_imm(ctxt, op, 2,
false);
4043 rc = decode_imm(ctxt, op, imm_size(ctxt),
false);
4050 op->
addr.
mem.seg = seg_override(ctxt);
4096 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4097 bool op_prefix =
false;
4104 ctxt->
fetch.end = ctxt->
fetch.start + insn_len;
4112 def_op_bytes = def_ad_bytes = 2;
4115 def_op_bytes = def_ad_bytes = 4;
4117 #ifdef CONFIG_X86_64
4141 ctxt->
ad_bytes = def_ad_bytes ^ 12;
4150 set_seg_override(ctxt, (ctxt->
b >> 3) & 3);
4154 set_seg_override(ctxt, ctxt->
b & 7);
4184 opcode = opcode_table[ctxt->
b];
4186 if (ctxt->
b == 0x0f) {
4189 opcode = twobyte_table[ctxt->
b];
4199 goffset = (ctxt->
modrm >> 3) & 7;
4200 opcode = opcode.
u.
group[goffset];
4203 goffset = (ctxt->
modrm >> 3) & 7;
4204 if ((ctxt->
modrm >> 6) == 3)
4205 opcode = opcode.
u.
gdual->mod3[goffset];
4207 opcode = opcode.
u.
gdual->mod012[goffset];
4210 goffset = ctxt->
modrm & 7;
4211 opcode = opcode.
u.
group[goffset];
4216 simd_prefix = op_prefix ? 0x66 : ctxt->
rep_prefix;
4217 switch (simd_prefix) {
4218 case 0x00: opcode = opcode.
u.
gprefix->pfx_no;
break;
4219 case 0x66: opcode = opcode.
u.
gprefix->pfx_66;
break;
4220 case 0xf2: opcode = opcode.
u.
gprefix->pfx_f2;
break;
4221 case 0xf3: opcode = opcode.
u.
gprefix->pfx_f3;
break;
4255 else if (ctxt->
d &
Mmx)
4260 rc = decode_modrm(ctxt, &ctxt->
memop);
4262 set_seg_override(ctxt, ctxt->
modrm_seg);
4264 rc = decode_abs(ctxt, &ctxt->
memop);
4271 ctxt->
memop.addr.mem.seg = seg_override(ctxt);
4293 rc = decode_operand(ctxt, &ctxt->
dst, (ctxt->
d >>
DstShift) & OpMask);
4316 if (((ctxt->
b == 0xa6) || (ctxt->
b == 0xa7) ||
4317 (ctxt->
b == 0xae) || (ctxt->
b == 0xaf))
4331 ctxt->
ops->get_fpu(ctxt);
4332 asm volatile(
"1: fwait \n\t"
4334 ".pushsection .fixup,\"ax\" \n\t"
4336 "movb $1, %[fault] \n\t"
4340 : [fault]
"+qm"(fault));
4341 ctxt->
ops->put_fpu(ctxt);
4344 return emulate_exception(ctxt,
MF_VECTOR, 0,
false);
4352 if (op->
type == OP_MM)
4361 int saved_dst_type = ctxt->
dst.type;
4366 rc = emulate_ud(ctxt);
4372 rc = emulate_ud(ctxt);
4377 rc = emulate_ud(ctxt);
4383 rc = emulate_ud(ctxt);
4388 rc = emulate_nm(ctxt);
4392 if (ctxt->
d &
Mmx) {
4393 rc = flush_pending_x87_faults(ctxt);
4400 fetch_possible_mmx_operand(ctxt, &ctxt->
src);
4401 fetch_possible_mmx_operand(ctxt, &ctxt->
src2);
4402 if (!(ctxt->
d &
Mov))
4403 fetch_possible_mmx_operand(ctxt, &ctxt->
dst);
4407 rc = emulator_check_intercept(ctxt, ctxt->
intercept,
4414 if ((ctxt->
d &
Priv) && ops->
cpl(ctxt)) {
4415 rc = emulate_gp(ctxt, 0);
4421 rc = emulate_ud(ctxt);
4433 rc = emulator_check_intercept(ctxt, ctxt->
intercept,
4447 if ((ctxt->
src.type == OP_MEM) && !(ctxt->
d &
NoAccess)) {
4448 rc = segmented_read(ctxt, ctxt->
src.addr.mem,
4449 ctxt->
src.valptr, ctxt->
src.bytes);
4452 ctxt->
src.orig_val64 = ctxt->
src.val64;
4455 if (ctxt->
src2.type == OP_MEM) {
4456 rc = segmented_read(ctxt, ctxt->
src2.addr.mem,
4457 &ctxt->
src2.val, ctxt->
src2.bytes);
4466 if ((ctxt->
dst.type == OP_MEM) && !(ctxt->
d &
Mov)) {
4468 rc = segmented_read(ctxt, ctxt->
dst.addr.mem,
4469 &ctxt->
dst.val, ctxt->
dst.bytes);
4473 ctxt->
dst.orig_val = ctxt->
dst.val;
4478 rc = emulator_check_intercept(ctxt, ctxt->
intercept,
4503 goto cannot_emulate;
4508 jmp_rel(ctxt, ctxt->
src.val);
4511 ctxt->
dst.val = ctxt->
src.addr.mem.ea;
4520 case 2: ctxt->
dst.val = (
s8)ctxt->
dst.val;
break;
4521 case 4: ctxt->
dst.val = (
s16)ctxt->
dst.val;
break;
4522 case 8: ctxt->
dst.val = (
s32)ctxt->
dst.val;
break;
4529 rc = emulate_int(ctxt, 3);
4532 rc = emulate_int(ctxt, ctxt->
src.val);
4536 rc = emulate_int(ctxt, 4);
4547 jmp_rel(ctxt, ctxt->
src.val);
4551 ctxt->
ops->halt(ctxt);
4570 goto cannot_emulate;
4585 ctxt->
dst.type = saved_dst_type;
4597 count = ctxt->
src.count;
4599 count = ctxt->
dst.count;
4600 register_address_increment(ctxt, reg_rmw(ctxt,
VCPU_REGS_RCX),
4603 if (!string_insn_completed(ctxt)) {
4616 writeback_registers(ctxt);
4632 writeback_registers(ctxt);
4639 (ctxt->
ops->wbinvd)(ctxt);
4652 ctxt->
dst.val = ctxt->
dst.orig_val = ctxt->
src.val;
4653 if (!test_cc(ctxt->
b, ctxt->
eflags))
4657 if (test_cc(ctxt->
b, ctxt->
eflags))
4658 jmp_rel(ctxt, ctxt->
src.val);
4661 ctxt->
dst.val = test_cc(ctxt->
b, ctxt->
eflags);
4675 ctxt->
dst.val = (ctxt->
src.bytes == 1) ? (
u8) ctxt->
src.val
4680 ctxt->
dst.val = (ctxt->
src.bytes == 1) ? (
s8) ctxt->
src.val :
4686 ctxt->
src.val = ctxt->
dst.orig_val;
4687 write_register_operand(&ctxt->
src);
4695 goto cannot_emulate;
4709 invalidate_registers(ctxt);
4714 writeback_registers(ctxt);