20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/kvm_host.h>
25 #include <linux/stringify.h>
26 #include <asm/debugreg.h>
27 #include <asm/nospec-branch.h>
39 #define OpImplicit 1ull
45 #define OpImmUByte 7ull
48 #define OpImmByte 10ull
55 #define OpImmFAddr 17ull
56 #define OpMemFAddr 18ull
57 #define OpImmU16 19ull
71 #define OpMask ((1ull << OpBits) - 1)
86 #define ImplicitOps (OpImplicit << DstShift)
87 #define DstReg (OpReg << DstShift)
88 #define DstMem (OpMem << DstShift)
89 #define DstAcc (OpAcc << DstShift)
90 #define DstDI (OpDI << DstShift)
91 #define DstMem64 (OpMem64 << DstShift)
92 #define DstMem16 (OpMem16 << DstShift)
93 #define DstImmUByte (OpImmUByte << DstShift)
94 #define DstDX (OpDX << DstShift)
95 #define DstAccLo (OpAccLo << DstShift)
96 #define DstMask (OpMask << DstShift)
99 #define SrcNone (OpNone << SrcShift)
100 #define SrcReg (OpReg << SrcShift)
101 #define SrcMem (OpMem << SrcShift)
102 #define SrcMem16 (OpMem16 << SrcShift)
103 #define SrcMem32 (OpMem32 << SrcShift)
104 #define SrcImm (OpImm << SrcShift)
105 #define SrcImmByte (OpImmByte << SrcShift)
106 #define SrcOne (OpOne << SrcShift)
107 #define SrcImmUByte (OpImmUByte << SrcShift)
108 #define SrcImmU (OpImmU << SrcShift)
109 #define SrcSI (OpSI << SrcShift)
110 #define SrcXLat (OpXLat << SrcShift)
111 #define SrcImmFAddr (OpImmFAddr << SrcShift)
112 #define SrcMemFAddr (OpMemFAddr << SrcShift)
113 #define SrcAcc (OpAcc << SrcShift)
114 #define SrcImmU16 (OpImmU16 << SrcShift)
115 #define SrcImm64 (OpImm64 << SrcShift)
116 #define SrcDX (OpDX << SrcShift)
117 #define SrcMem8 (OpMem8 << SrcShift)
118 #define SrcAccHi (OpAccHi << SrcShift)
119 #define SrcMask (OpMask << SrcShift)
120 #define BitOp (1<<11)
121 #define MemAbs (1<<12)
122 #define String (1<<13)
123 #define Stack (1<<14)
124 #define GroupMask (7<<15)
125 #define Group (1<<15)
126 #define GroupDual (2<<15)
127 #define Prefix (3<<15)
128 #define RMExt (4<<15)
129 #define Escape (5<<15)
130 #define InstrDual (6<<15)
131 #define ModeDual (7<<15)
134 #define ModRM (1<<19)
139 #define EmulateOnUD (1<<22)
140 #define NoAccess (1<<23)
141 #define Op3264 (1<<24)
142 #define Undefined (1<<25)
146 #define PageTable (1 << 29)
147 #define NotImpl (1 << 30)
149 #define Src2Shift (31)
150 #define Src2None (OpNone << Src2Shift)
151 #define Src2Mem (OpMem << Src2Shift)
152 #define Src2CL (OpCL << Src2Shift)
153 #define Src2ImmByte (OpImmByte << Src2Shift)
154 #define Src2One (OpOne << Src2Shift)
155 #define Src2Imm (OpImm << Src2Shift)
156 #define Src2ES (OpES << Src2Shift)
157 #define Src2CS (OpCS << Src2Shift)
158 #define Src2SS (OpSS << Src2Shift)
159 #define Src2DS (OpDS << Src2Shift)
160 #define Src2FS (OpFS << Src2Shift)
161 #define Src2GS (OpGS << Src2Shift)
162 #define Src2Mask (OpMask << Src2Shift)
163 #define Mmx ((u64)1 << 40)
164 #define AlignMask ((u64)7 << 41)
165 #define Aligned ((u64)1 << 41)
166 #define Unaligned ((u64)2 << 41)
167 #define Avx ((u64)3 << 41)
168 #define Aligned16 ((u64)4 << 41)
169 #define Fastop ((u64)1 << 44)
170 #define NoWrite ((u64)1 << 45)
171 #define SrcWrite ((u64)1 << 46)
172 #define NoMod ((u64)1 << 47)
173 #define Intercept ((u64)1 << 48)
174 #define CheckPerm ((u64)1 << 49)
175 #define PrivUD ((u64)1 << 51)
176 #define NearBranch ((u64)1 << 52)
177 #define No16 ((u64)1 << 53)
178 #define IncSP ((u64)1 << 54)
179 #define TwoMemOp ((u64)1 << 55)
180 #define IsBranch ((u64)1 << 56)
182 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
184 #define X2(x...) x, x
185 #define X3(x...) X2(x), x
186 #define X4(x...) X2(x), X2(x)
187 #define X5(x...) X4(x), x
188 #define X6(x...) X4(x), X2(x)
189 #define X7(x...) X4(x), X3(x)
190 #define X8(x...) X4(x), X4(x)
191 #define X16(x...) X8(x), X8(x)
237 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
265 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
266 X86_EFLAGS_PF|X86_EFLAGS_CF)
293 #define FASTOP_SIZE 16
295 #define __FOP_FUNC(name) \
296 ".align " __stringify(FASTOP_SIZE) " \n\t" \
297 ".type " name ", @function \n\t" \
302 #define FOP_FUNC(name) \
305 #define __FOP_RET(name) \
307 ".size " name ", .-" name "\n\t"
309 #define FOP_RET(name) \
312 #define __FOP_START(op, align) \
313 extern void em_##op(struct fastop *fake); \
314 asm(".pushsection .text, \"ax\" \n\t" \
315 ".global em_" #op " \n\t" \
316 ".align " __stringify(align) " \n\t" \
319 #define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
324 #define __FOPNOP(name) \
329 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
331 #define FOP1E(op, dst) \
332 __FOP_FUNC(#op "_" #dst) \
333 "10: " #op " %" #dst " \n\t" \
334 __FOP_RET(#op "_" #dst)
336 #define FOP1EEX(op, dst) \
337 FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
339 #define FASTOP1(op) \
344 ON64(FOP1E(op##q, rax)) \
348 #define FASTOP1SRC2(op, name) \
353 ON64(FOP1E(op, rcx)) \
357 #define FASTOP1SRC2EX(op, name) \
362 ON64(FOP1EEX(op, rcx)) \
365 #define FOP2E(op, dst, src) \
366 __FOP_FUNC(#op "_" #dst "_" #src) \
367 #op " %" #src ", %" #dst " \n\t" \
368 __FOP_RET(#op "_" #dst "_" #src)
370 #define FASTOP2(op) \
372 FOP2E(op##b, al, dl) \
373 FOP2E(op##w, ax, dx) \
374 FOP2E(op##l, eax, edx) \
375 ON64(FOP2E(op##q, rax, rdx)) \
379 #define FASTOP2W(op) \
382 FOP2E(op##w, ax, dx) \
383 FOP2E(op##l, eax, edx) \
384 ON64(FOP2E(op##q, rax, rdx)) \
388 #define FASTOP2CL(op) \
390 FOP2E(op##b, al, cl) \
391 FOP2E(op##w, ax, cl) \
392 FOP2E(op##l, eax, cl) \
393 ON64(FOP2E(op##q, rax, cl)) \
397 #define FASTOP2R(op, name) \
399 FOP2E(op##b, dl, al) \
400 FOP2E(op##w, dx, ax) \
401 FOP2E(op##l, edx, eax) \
402 ON64(FOP2E(op##q, rdx, rax)) \
405 #define FOP3E(op, dst, src, src2) \
406 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
407 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
408 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
411 #define FASTOP3WCL(op) \
414 FOP3E(op##w, ax, dx, cl) \
415 FOP3E(op##l, eax, edx, cl) \
416 ON64(FOP3E(op##q, rax, rdx, cl)) \
420 #define FOP_SETCC(op) \
454 #define asm_safe(insn, inoutclob...) \
458 asm volatile("1:" insn "\n" \
460 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
461 : [_fault] "+r"(_fault) inoutclob ); \
463 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
481 .next_rip = ctxt->
eip,
489 *dest = (*dest & ~mask) | (src & mask);
497 *(u8 *)reg = (u8)val;
500 *(u16 *)reg = (u16)val;
513 return (1UL << (ctxt->
ad_bytes << 3)) - 1;
519 struct desc_struct ss;
524 return ~0U >> ((ss.d ^ 1) * 16);
533 static inline unsigned long
536 if (ctxt->
ad_bytes ==
sizeof(
unsigned long))
542 static inline unsigned long
556 ulong *preg =
reg_rmw(ctxt, reg);
568 u32 limit = get_desc_limit(desc);
570 return desc->g ? (limit << 12) | 0xfff : limit;
582 u32 error,
bool valid)
631 struct desc_struct desc;
642 struct desc_struct desc;
650 return (ctxt->
ops->
get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
672 if (likely(size < 16))
688 struct segmented_address addr,
689 unsigned *max_size,
unsigned size,
693 struct desc_struct desc;
700 la =
seg_base(ctxt, addr.seg) + addr.ea;
706 if (!__is_canonical_address(la, va_bits))
709 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
710 if (size > *max_size)
714 *linear = la = (u32)la;
727 if (!(desc.type & 8) && (desc.type & 4)) {
731 lim = desc.d ? 0xffffffff : 0xffff;
735 if (lim == 0xffffffff)
738 *max_size = (u64)lim + 1 - addr.ea;
739 if (size > *max_size)
748 if (addr.seg == VCPU_SREG_SS)
755 struct segmented_address addr,
756 unsigned size,
bool write,
769 struct segmented_address addr = { .seg = VCPU_SREG_CS,
772 if (ctxt->
op_bytes !=
sizeof(
unsigned long))
773 addr.ea = dst & ((1UL << (ctxt->
op_bytes << 3)) - 1);
777 ctxt->
_eip = addr.ea;
784 struct desc_struct cs;
790 if (!(ctxt->
ops->
get_cr(ctxt, 0) & X86_CR0_PE)) {
798 if (ctxt->
eflags & X86_EFLAGS_VM) {
806 if (!ctxt->
ops->
get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
809 if (efer & EFER_LMA) {
848 void *data,
unsigned size)
854 ulong linear,
void *data,
861 struct segmented_address addr,
868 rc =
linearize(ctxt, addr, size,
false, &linear);
875 struct segmented_address addr,
882 rc =
linearize(ctxt, addr, size,
true, &linear);
895 unsigned size, max_size;
896 unsigned long linear;
898 struct segmented_address addr = { .seg = VCPU_SREG_CS,
899 .ea = ctxt->
eip + cur_size };
916 size = min_t(
unsigned, 15UL ^ cur_size, max_size);
917 size = min_t(
unsigned, size, PAGE_SIZE - offset_in_page(linear));
925 if (unlikely(size < op_size))
941 if (unlikely(done_size < size))
948 #define insn_fetch(_type, _ctxt) \
951 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
952 if (rc != X86EMUL_CONTINUE) \
954 ctxt->_eip += sizeof(_type); \
955 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
956 ctxt->fetch.ptr += sizeof(_type); \
960 #define insn_fetch_arr(_arr, _size, _ctxt) \
962 rc = do_insn_fetch_bytes(_ctxt, _size); \
963 if (rc != X86EMUL_CONTINUE) \
965 ctxt->_eip += (_size); \
966 memcpy(_arr, ctxt->fetch.ptr, _size); \
967 ctxt->fetch.ptr += (_size); \
979 int highbyte_regs = (ctxt->
rex_prefix == 0) && byteop;
981 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
982 p = (
unsigned char *)
reg_rmw(ctxt, modrm_reg & 3) + 1;
989 struct segmented_address addr,
990 u16 *size,
unsigned long *address,
int op_bytes)
1054 return fastop(ctxt, em_bsf);
1062 return fastop(ctxt, em_bsr);
1065 static __always_inline u8
test_cc(
unsigned int condition,
unsigned long flags)
1068 void (*fop)(void) = (
void *)em_setcc +
FASTOP_SIZE * (condition & 0xf);
1071 asm(
"push %[flags]; popf; " CALL_NOSPEC
1072 :
"=a"(rc) : [thunk_target]
"r"(fop), [
flags]
"r"(
flags));
1078 switch (op->
bytes) {
1096 if (ctxt->
ops->
get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1100 asm volatile(
"fninit");
1109 if (ctxt->
ops->
get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1113 asm volatile(
"fnstcw %0":
"+m"(fcw));
1125 if (ctxt->
ops->
get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1129 asm volatile(
"fnstsw %0":
"+m"(fsw));
1145 reg = (ctxt->
b & 7) | ((ctxt->
rex_prefix & 1) << 3);
1147 if (ctxt->
d &
Sse) {
1154 if (ctxt->
d &
Mmx) {
1172 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1180 int index_reg, base_reg, scale;
1198 if (ctxt->
d &
Sse) {
1205 if (ctxt->
d &
Mmx) {
1218 unsigned bx =
reg_read(ctxt, VCPU_REGS_RBX);
1219 unsigned bp =
reg_read(ctxt, VCPU_REGS_RBP);
1220 unsigned si =
reg_read(ctxt, VCPU_REGS_RSI);
1221 unsigned di =
reg_read(ctxt, VCPU_REGS_RDI);
1238 modrm_ea += bx + si;
1241 modrm_ea += bx + di;
1244 modrm_ea += bp + si;
1247 modrm_ea += bp + di;
1266 modrm_ea = (u16)modrm_ea;
1271 index_reg |= (sib >> 3) & 7;
1272 base_reg |= sib & 7;
1275 if ((base_reg & 7) == 5 && ctxt->
modrm_mod == 0)
1278 modrm_ea +=
reg_read(ctxt, base_reg);
1282 base_reg == VCPU_REGS_RSP)
1286 modrm_ea +=
reg_read(ctxt, index_reg) << scale;
1293 modrm_ea +=
reg_read(ctxt, base_reg);
1339 mask = ~((long)ctxt->
dst.
bytes * 8 - 1);
1342 sv = (s16)ctxt->
src.
val & (s16)mask;
1344 sv = (s32)ctxt->
src.
val & (s32)mask;
1346 sv = (s64)ctxt->
src.
val & (s64)mask;
1357 unsigned long addr,
void *dest,
unsigned size)
1376 memcpy(dest, mc->
data + mc->
pos, size);
1382 struct segmented_address addr,
1389 rc =
linearize(ctxt, addr, size,
false, &linear);
1396 struct segmented_address addr,
1403 rc =
linearize(ctxt, addr, size,
true, &linear);
1411 struct segmented_address addr,
1412 const void *orig_data,
const void *
data,
1418 rc =
linearize(ctxt, addr, size,
true, &linear);
1426 unsigned int size,
unsigned short port,
1431 if (rc->
pos == rc->
end) {
1432 unsigned int in_page, n;
1435 in_page = (ctxt->
eflags & X86_EFLAGS_DF) ?
1436 offset_in_page(
reg_read(ctxt, VCPU_REGS_RDI)) :
1437 PAGE_SIZE - offset_in_page(
reg_read(ctxt, VCPU_REGS_RDI));
1438 n = min3(in_page, (
unsigned int)
sizeof(rc->
data) / size, count);
1448 !(ctxt->
eflags & X86_EFLAGS_DF)) {
1454 memcpy(dest, rc->
data + rc->
pos, size);
1461 u16 index,
struct desc_struct *desc)
1468 if (dt.size < index * 8 + 7)
1471 addr = dt.address + index * 8;
1476 u16 selector,
struct desc_ptr *dt)
1481 if (selector & 1 << 2) {
1482 struct desc_struct desc;
1485 memset(dt, 0,
sizeof(*dt));
1491 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1497 u16 selector, ulong *desc_addr_p)
1500 u16 index = selector >> 3;
1505 if (dt.size < index * 8 + 7)
1508 addr = dt.address + index * 8;
1510 #ifdef CONFIG_X86_64
1511 if (addr >> 32 != 0) {
1515 if (!(efer & EFER_LMA))
1520 *desc_addr_p = addr;
1526 u16 selector,
struct desc_struct *desc,
1540 u16 selector,
struct desc_struct *desc)
1553 u16 selector,
int seg, u8 cpl,
1555 struct desc_struct *desc)
1557 struct desc_struct seg_desc, old_desc;
1559 unsigned err_vec = GP_VECTOR;
1561 bool null_selector = !(selector & ~0x3);
1567 memset(&seg_desc, 0,
sizeof(seg_desc));
1573 set_desc_base(&seg_desc, selector << 4);
1577 set_desc_base(&seg_desc, selector << 4);
1578 set_desc_limit(&seg_desc, 0xffff);
1589 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1593 if (null_selector) {
1594 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1597 if (seg == VCPU_SREG_SS) {
1621 err_code = selector & 0xfffc;
1626 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1640 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1652 if (!(seg_desc.type & 8))
1664 if (seg_desc.type & 4) {
1674 if (seg_desc.type & 4) {
1680 if (rpl > cpl || dpl != cpl)
1685 if (seg_desc.d && seg_desc.l) {
1689 if (efer & EFER_LMA)
1694 selector = (selector & 0xfffc) | cpl;
1697 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1700 case VCPU_SREG_LDTR:
1701 if (seg_desc.s || seg_desc.type != 2)
1710 if ((seg_desc.type & 0xa) == 0x8 ||
1711 (((seg_desc.type & 0xc) != 0xc) &&
1712 (rpl > dpl || cpl > dpl)))
1718 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1724 if (!(seg_desc.type & 1)) {
1736 ((u64)base3 << 32), ctxt))
1740 if (seg == VCPU_SREG_TR) {
1741 old_desc = seg_desc;
1758 u16 selector,
int seg)
1760 u8 cpl = ctxt->
ops->
cpl(ctxt);
1772 if (seg == VCPU_SREG_SS && selector == 3 &&
1825 struct segmented_address addr;
1829 addr.seg = VCPU_SREG_SS;
1842 void *dest,
int len)
1845 struct segmented_address addr;
1848 addr.seg = VCPU_SREG_SS;
1863 void *dest,
int len)
1866 unsigned long val, change_mask;
1867 int iopl = (ctxt->
eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1868 int cpl = ctxt->
ops->
cpl(ctxt);
1874 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1875 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1876 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1877 X86_EFLAGS_AC | X86_EFLAGS_ID;
1879 switch(ctxt->
mode) {
1884 change_mask |= X86_EFLAGS_IOPL;
1886 change_mask |= X86_EFLAGS_IF;
1891 change_mask |= X86_EFLAGS_IF;
1894 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1898 *(
unsigned long *)dest =
1899 (ctxt->
eflags & ~change_mask) | (val & change_mask);
1915 unsigned frame_size = ctxt->
src.
val;
1916 unsigned nesting_level = ctxt->
src2.
val & 31;
1922 rbp =
reg_read(ctxt, VCPU_REGS_RBP);
1929 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1957 unsigned long selector;
1964 if (seg == VCPU_SREG_SS)
1975 unsigned long old_esp =
reg_read(ctxt, VCPU_REGS_RSP);
1977 int reg = VCPU_REGS_RAX;
1979 while (reg <= VCPU_REGS_RDI) {
1980 (reg == VCPU_REGS_RSP) ?
1995 ctxt->
src.
val = (
unsigned long)ctxt->
eflags & ~X86_EFLAGS_VM;
2002 int reg = VCPU_REGS_RDI;
2005 while (reg >= VCPU_REGS_RAX) {
2006 if (reg == VCPU_REGS_RSP) {
2035 ctxt->
eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2049 eip_addr = dt.address + (irq << 2);
2050 cs_addr = dt.address + (irq << 2) + 2;
2082 switch(ctxt->
mode) {
2098 unsigned long temp_eip = 0;
2099 unsigned long temp_eflags = 0;
2100 unsigned long cs = 0;
2101 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2102 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2103 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2104 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2105 X86_EFLAGS_AC | X86_EFLAGS_ID |
2107 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2117 if (temp_eip & ~0xffff)
2135 ctxt->
_eip = temp_eip;
2138 ctxt->
eflags = ((temp_eflags & mask) | (ctxt->
eflags & vm86_mask));
2141 ctxt->
eflags |= temp_eflags;
2145 ctxt->
eflags |= X86_EFLAGS_FIXED;
2153 switch(ctxt->
mode) {
2170 struct desc_struct new_desc;
2171 u8 cpl = ctxt->
ops->
cpl(ctxt);
2199 old_eip = ctxt->
_eip;
2215 if (((u32) (old >> 0) != (u32)
reg_read(ctxt, VCPU_REGS_RAX)) ||
2216 ((u32) (old >> 32) != (u32)
reg_read(ctxt, VCPU_REGS_RDX))) {
2217 *
reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2218 *
reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2219 ctxt->
eflags &= ~X86_EFLAGS_ZF;
2222 (u32)
reg_read(ctxt, VCPU_REGS_RBX);
2224 ctxt->
eflags |= X86_EFLAGS_ZF;
2244 unsigned long eip, cs;
2245 int cpl = ctxt->
ops->
cpl(ctxt);
2246 struct desc_struct new_desc;
2287 if (ctxt->
eflags & X86_EFLAGS_ZF) {
2333 set_desc_base(cs, 0);
2335 set_desc_limit(cs, 0xfffff);
2343 set_desc_base(ss, 0);
2344 set_desc_limit(ss, 0xfffff);
2357 u32 eax, ebx, ecx, edx;
2360 ctxt->
ops->
get_cpuid(ctxt, &eax, &ebx, &ecx, &edx,
true);
2367 u32 eax, ebx, ecx, edx;
2378 ops->
get_cpuid(ctxt, &eax, &ebx, &ecx, &edx,
true);
2402 struct desc_struct cs, ss;
2415 ops->
get_msr(ctxt, MSR_EFER, &efer);
2416 if (!(efer & EFER_SCE))
2420 ops->
get_msr(ctxt, MSR_STAR, &msr_data);
2422 cs_sel = (u16)(msr_data & 0xfffc);
2423 ss_sel = (u16)(msr_data + 8);
2425 if (efer & EFER_LMA) {
2429 ops->
set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2430 ops->
set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2433 if (efer & EFER_LMA) {
2434 #ifdef CONFIG_X86_64
2439 MSR_LSTAR : MSR_CSTAR, &msr_data);
2440 ctxt->
_eip = msr_data;
2442 ops->
get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2443 ctxt->
eflags &= ~msr_data;
2444 ctxt->
eflags |= X86_EFLAGS_FIXED;
2448 ops->
get_msr(ctxt, MSR_STAR, &msr_data);
2449 ctxt->
_eip = (u32)msr_data;
2451 ctxt->
eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2454 ctxt->
tf = (ctxt->
eflags & X86_EFLAGS_TF) != 0;
2461 struct desc_struct cs, ss;
2466 ops->
get_msr(ctxt, MSR_EFER, &efer);
2483 ops->
get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2484 if ((msr_data & 0xfffc) == 0x0)
2488 ctxt->
eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2489 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2490 ss_sel = cs_sel + 8;
2491 if (efer & EFER_LMA) {
2496 ops->
set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2497 ops->
set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2499 ops->
get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2500 ctxt->
_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2502 ops->
get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2503 *
reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2505 if (efer & EFER_LMA)
2514 struct desc_struct cs, ss;
2515 u64 msr_data, rcx, rdx;
2517 u16 cs_sel = 0, ss_sel = 0;
2531 rcx =
reg_read(ctxt, VCPU_REGS_RCX);
2532 rdx =
reg_read(ctxt, VCPU_REGS_RDX);
2536 ops->
get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2539 cs_sel = (u16)(msr_data + 16);
2540 if ((msr_data & 0xfffc) == 0x0)
2542 ss_sel = (u16)(msr_data + 24);
2547 cs_sel = (u16)(msr_data + 32);
2548 if (msr_data == 0x0)
2550 ss_sel = cs_sel + 8;
2558 cs_sel |= SEGMENT_RPL_MASK;
2559 ss_sel |= SEGMENT_RPL_MASK;
2561 ops->
set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2562 ops->
set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2565 ctxt->
mode = usermode;
2578 iopl = (ctxt->
eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2579 return ctxt->
ops->
cpl(ctxt) > iopl;
2582 #define VMWARE_PORT_VMPORT (0x5658)
2583 #define VMWARE_PORT_VMRPC (0x5659)
2589 struct desc_struct tr_seg;
2592 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2593 unsigned mask = (1 << len) - 1;
2604 ops->
get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2609 base = get_desc_base(&tr_seg);
2610 #ifdef CONFIG_X86_64
2611 base |= ((u64)base3) << 32;
2613 r = ops->
read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL,
true);
2618 r = ops->
read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL,
true);
2621 if ((perm >> bit_idx) & mask)
2647 #ifdef CONFIG_X86_64
2656 *
reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2660 *
reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2744 ulong old_tss_base,
struct desc_struct *new_desc)
2748 u32 new_tss_base = get_desc_base(new_desc);
2764 if (old_tss_sel != 0xffff) {
2839 if (ctxt->
eflags & X86_EFLAGS_VM) {
2882 ulong old_tss_base,
struct desc_struct *new_desc)
2886 u32 new_tss_base = get_desc_base(new_desc);
2898 ldt_sel_offset - eip_offset);
2906 if (old_tss_sel != 0xffff) {
2920 u16 tss_selector,
int idt_index,
int reason,
2921 bool has_error_code, u32 error_code)
2924 struct desc_struct curr_tss_desc, next_tss_desc;
2927 ulong old_tss_base =
2930 ulong desc_addr, dr7;
2951 if (reason == TASK_SWITCH_GATE) {
2952 if (idt_index != -1) {
2954 struct desc_struct task_gate_desc;
2962 dpl = task_gate_desc.dpl;
2963 if ((tss_selector & 3) > dpl || ops->
cpl(ctxt) > dpl)
2964 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2969 if (!next_tss_desc.p ||
2970 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2971 desc_limit < 0x2b)) {
2972 return emulate_ts(ctxt, tss_selector & 0xfffc);
2975 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2976 curr_tss_desc.type &= ~(1 << 1);
2980 if (reason == TASK_SWITCH_IRET)
2985 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2986 old_tss_sel = 0xffff;
2988 if (next_tss_desc.type & 8)
2989 ret =
task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc);
2992 old_tss_base, &next_tss_desc);
2996 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2999 if (reason != TASK_SWITCH_IRET) {
3000 next_tss_desc.type |= (1 << 1);
3004 ops->
set_cr(ctxt, 0, ops->
get_cr(ctxt, 0) | X86_CR0_TS);
3005 ops->
set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3007 if (has_error_code) {
3010 ctxt->
src.
val = (
unsigned long) error_code;
3014 ops->
get_dr(ctxt, 7, &dr7);
3015 ops->
set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3021 u16 tss_selector,
int idt_index,
int reason,
3022 bool has_error_code, u32 error_code)
3031 has_error_code, error_code);
3053 bool af, cf, old_cf;
3055 cf = ctxt->
eflags & X86_EFLAGS_CF;
3061 af = ctxt->
eflags & X86_EFLAGS_AF;
3062 if ((
al & 0x0f) > 9 || af) {
3064 cf = old_cf | (
al >= 250);
3069 if (old_al > 0x99 || old_cf) {
3080 ctxt->
eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3082 ctxt->
eflags |= X86_EFLAGS_CF;
3084 ctxt->
eflags |= X86_EFLAGS_AF;
3113 u8 ah = (ctxt->
dst.
val >> 8) & 0xff;
3131 long rel = ctxt->
src.
val;
3145 struct desc_struct old_desc, new_desc;
3150 old_eip = ctxt->
_eip;
3151 ops->
get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3173 pr_warn_once(
"faulting far call emulation tainted memory\n");
3178 ops->
set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3179 ctxt->
mode = prev_mode;
3214 return fastop(ctxt, em_imul);
3234 ctxt->
ops->
get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
3243 ctxt->
ops->
get_msr(ctxt, MSR_IA32_TSC, &tsc);
3244 *
reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3245 *
reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3255 *
reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3256 *
reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3283 tmp = (u16)ctxt->
src.
val;
3284 ctxt->
dst.
val &= ~0xffffUL;
3285 ctxt->
dst.
val |= (
unsigned long)swab16(tmp);
3328 val = ctxt->
src.
val & ~0ULL;
3330 val = ctxt->
src.
val & ~0U;
3343 u64 msr_index =
reg_read(ctxt, VCPU_REGS_RCX);
3347 msr_data = (u32)
reg_read(ctxt, VCPU_REGS_RAX)
3348 | ((u64)
reg_read(ctxt, VCPU_REGS_RDX) << 32);
3359 u64 msr_index =
reg_read(ctxt, VCPU_REGS_RCX);
3369 *
reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3370 *
reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3377 if (segment > VCPU_SREG_GS &&
3378 (ctxt->
ops->
get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3379 ctxt->
ops->
cpl(ctxt) > 0)
3443 unsigned int max_size;
3480 struct desc_ptr *ptr))
3482 struct desc_ptr desc_ptr;
3484 if ((ctxt->
ops->
get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3485 ctxt->
ops->
cpl(ctxt) > 0)
3490 get(ctxt, &desc_ptr);
3493 desc_ptr.address &= 0x00ffffff;
3513 struct desc_ptr desc_ptr;
3519 &desc_ptr.size, &desc_ptr.address,
3547 if ((ctxt->
ops->
get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3548 ctxt->
ops->
cpl(ctxt) > 0)
3560 | (ctxt->
src.
val & 0x0f));
3610 ctxt->
eflags &= ~X86_EFLAGS_IF;
3620 ctxt->
eflags |= X86_EFLAGS_IF;
3626 u32 eax, ebx, ecx, edx;
3629 ctxt->
ops->
get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3630 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3635 eax =
reg_read(ctxt, VCPU_REGS_RAX);
3636 ecx =
reg_read(ctxt, VCPU_REGS_RCX);
3637 ctxt->
ops->
get_cpuid(ctxt, &eax, &ebx, &ecx, &edx,
false);
3649 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3660 *
reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3661 *
reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->
eflags & 0xff) << 8;
3668 #ifdef CONFIG_X86_64
3670 asm(
"bswap %0" :
"+r"(ctxt->
dst.
val));
3674 asm(
"bswap %0" :
"+r"(*(u32 *)&ctxt->
dst.
val));
3703 if (ctxt->
ops->
get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3722 return offsetof(
struct fxregs_state, xmm_space[0]) + nregs * 16;
3731 cr4_osfxsr = ctxt->
ops->
get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3755 struct fxregs_state fx_state;
3764 rc =
asm_safe(
"fxsave %[fx]", , [fx]
"+m"(fx_state));
3783 const size_t used_size)
3785 struct fxregs_state fx_tmp;
3788 rc =
asm_safe(
"fxsave %[fx]", , [fx]
"+m"(fx_tmp));
3789 memcpy((
void *)fx_state + used_size, (
void *)&fx_tmp + used_size,
3797 struct fxregs_state fx_state;
3818 if (fx_state.mxcsr >> 16) {
3824 rc =
asm_safe(
"fxrstor %[fx]", : [fx]
"m"(fx_state));
3836 if (!(ctxt->
ops->
get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
3839 eax =
reg_read(ctxt, VCPU_REGS_RAX);
3840 edx =
reg_read(ctxt, VCPU_REGS_RDX);
3841 ecx =
reg_read(ctxt, VCPU_REGS_RCX);
3843 if (ctxt->
ops->
set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
3875 return dr7 & DR7_GD;
3887 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3894 dr6 &= ~DR_TRAP_BITS;
3895 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
3908 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3920 if (!(efer & EFER_SVME))
3928 u64 rax =
reg_read(ctxt, VCPU_REGS_RAX);
3931 if (rax & 0xffff000000000000ULL)
3941 if (cr4 & X86_CR4_TSD && ctxt->
ops->
cpl(ctxt))
3950 u64 rcx =
reg_read(ctxt, VCPU_REGS_RCX);
3964 if ((!(cr4 & X86_CR4_PCE) && ctxt->
ops->
cpl(ctxt)) ||
3989 #define D(_y) { .flags = (_y) }
3990 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3991 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3992 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3993 #define N D(NotImpl)
3994 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3995 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3996 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3997 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3998 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
3999 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4000 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4001 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4002 #define II(_f, _e, _i) \
4003 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4004 #define IIP(_f, _e, _i, _p) \
4005 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4006 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4007 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4009 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4010 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4011 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4012 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4013 #define I2bvIP(_f, _e, _i, _p) \
4014 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4016 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4017 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4018 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4378 N,
E(0, &
escape_d9),
N,
E(0, &
escape_db),
N,
E(0, &
escape_dd),
N,
N,
4442 N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
4491 N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
4496 N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N,
N
4558 unsigned size,
bool sign_extension)
4566 switch (op->
bytes) {
4580 if (!sign_extension) {
4581 switch (op->
bytes) {
4589 op->
val &= 0xffffffff;
4651 op->
addr.
mem.seg = VCPU_SREG_ES;
4715 (
reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4730 op->
val = VCPU_SREG_ES;
4734 op->
val = VCPU_SREG_CS;
4738 op->
val = VCPU_SREG_SS;
4742 op->
val = VCPU_SREG_DS;
4746 op->
val = VCPU_SREG_FS;
4750 op->
val = VCPU_SREG_GS;
4766 int mode = ctxt->
mode;
4767 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4768 bool op_prefix =
false;
4769 bool has_seg_override =
false;
4772 struct desc_struct desc;
4782 memcpy(ctxt->
fetch.
data, insn, insn_len);
4792 def_op_bytes = def_ad_bytes = 2;
4795 def_op_bytes = def_ad_bytes = 4;
4798 def_op_bytes = def_ad_bytes = 2;
4801 def_op_bytes = def_ad_bytes = 4;
4803 #ifdef CONFIG_X86_64
4827 ctxt->
ad_bytes = def_ad_bytes ^ 12;
4833 has_seg_override =
true;
4837 has_seg_override =
true;
4841 has_seg_override =
true;
4845 has_seg_override =
true;
4849 has_seg_override =
true;
4853 has_seg_override =
true;
4886 if (ctxt->
b == 0x0f) {
4892 if (ctxt->
b == 0x38) {
4904 if (ctxt->
opcode_len == 1 && (ctxt->
b == 0xc5 || ctxt->
b == 0xc4) &&
4912 goffset = (ctxt->
modrm >> 3) & 7;
4916 goffset = (ctxt->
modrm >> 3) & 7;
4917 if ((ctxt->
modrm >> 6) == 3)
4923 goffset = ctxt->
modrm & 7;
4929 simd_prefix = op_prefix ? 0x66 : ctxt->
rep_prefix;
4930 switch (simd_prefix) {
4938 if (ctxt->
modrm > 0xbf) {
4940 u32 index = array_index_nospec(
4941 ctxt->
modrm - 0xc0, size);
4949 if ((ctxt->
modrm >> 6) == 3)
4976 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
4980 if (unlikely(ctxt->
d &
5012 else if (ctxt->
d &
Mmx)
5019 if (!has_seg_override) {
5020 has_seg_override =
true;
5028 if (!has_seg_override)
5076 if (((ctxt->
b == 0xa6) || (ctxt->
b == 0xa7) ||
5077 (ctxt->
b == 0xae) || (ctxt->
b == 0xaf))
5079 ((ctxt->
eflags & X86_EFLAGS_ZF) == 0))
5081 ((ctxt->
eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5103 if (op->
type == OP_MM)
5114 asm(
"push %[flags]; popf; " CALL_NOSPEC
" ; pushf; pop %[flags]\n"
5116 [thunk_target]
"+S"(fop), ASM_CALL_CONSTRAINT
5144 int saved_dst_type = ctxt->
dst.
type;
5160 if (unlikely(ctxt->
d &
5168 if (((ctxt->
d & (
Sse|
Mmx)) && ((ops->
get_cr(ctxt, 0) & X86_CR0_EM)))
5169 || ((ctxt->
d &
Sse) && !(ops->
get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5174 if ((ctxt->
d & (
Sse|
Mmx)) && (ops->
get_cr(ctxt, 0) & X86_CR0_TS)) {
5179 if (ctxt->
d &
Mmx) {
5189 if (!(ctxt->
d &
Mov))
5207 if ((ctxt->
d &
Priv) && ops->
cpl(ctxt)) {
5234 ctxt->
eflags &= ~X86_EFLAGS_RF;
5259 if ((ctxt->
dst.
type == OP_MEM) && !(ctxt->
d &
Mov)) {
5284 ctxt->
eflags |= X86_EFLAGS_RF;
5286 ctxt->
eflags &= ~X86_EFLAGS_RF;
5301 goto threebyte_insn;
5331 if (ctxt->
eflags & X86_EFLAGS_OF)
5344 ctxt->
eflags ^= X86_EFLAGS_CF;
5347 ctxt->
eflags &= ~X86_EFLAGS_CF;
5350 ctxt->
eflags |= X86_EFLAGS_CF;
5353 ctxt->
eflags &= ~X86_EFLAGS_DF;
5356 ctxt->
eflags |= X86_EFLAGS_DF;
5359 goto cannot_emulate;
5367 BUG_ON(ctxt->
src.
type == OP_MEM || ctxt->
src.
type == OP_MEM_STR);
5382 ctxt->
dst.
type = saved_dst_type;
5404 if ((r->
end != 0 ||
reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5417 ctxt->
eflags &= ~X86_EFLAGS_RF;
5478 goto cannot_emulate;
static const struct group_dual group9
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
static const struct opcode group7_rm7[]
static unsigned long register_address(struct x86_emulate_ctxt *ctxt, int reg)
static const struct mode_dual mode_dual_63
static int em_out(struct x86_emulate_ctxt *ctxt)
static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
static void writeback_registers(struct x86_emulate_ctxt *ctxt)
static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
static int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
static int em_aad(struct x86_emulate_ctxt *ctxt)
#define VMWARE_PORT_VMRPC
static int em_syscall(struct x86_emulate_ctxt *ctxt)
static unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
static int stack_size(struct x86_emulate_ctxt *ctxt)
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
static const struct escape escape_d9
static int check_dr_write(struct x86_emulate_ctxt *ctxt)
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
static int em_leave(struct x86_emulate_ctxt *ctxt)
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss)
static int decode_abs(struct x86_emulate_ctxt *ctxt, struct operand *op)
static int em_sti(struct x86_emulate_ctxt *ctxt)
static int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
#define insn_fetch(_type, _ctxt)
static const struct gprefix pfx_0f_10_0f_11
static const struct opcode group7_rm3[]
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss)
static const struct opcode group11[]
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_ptr *dt)
static int em_lahf(struct x86_emulate_ctxt *ctxt)
static u32 desc_limit_scaled(struct desc_struct *desc)
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss)
static int em_mov(struct x86_emulate_ctxt *ctxt)
static void assign_masked(ulong *dest, ulong src, ulong mask)
static const struct instr_dual instr_dual_8d
static const struct opcode group1A[]
static void fetch_possible_mmx_operand(struct operand *op)
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
static int check_svme(struct x86_emulate_ctxt *ctxt)
static int segmented_write(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *data, unsigned size)
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, u32 error, bool valid)
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
static int linear_write_system(struct x86_emulate_ctxt *ctxt, ulong linear, void *data, unsigned int size)
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
static int em_call_far(struct x86_emulate_ctxt *ctxt)
static int em_clts(struct x86_emulate_ctxt *ctxt)
static int em_lldt(struct x86_emulate_ctxt *ctxt)
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
static const struct group_dual group15
static int linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, ulong *linear)
static const struct gprefix pfx_0f_c7_7
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code)
static int emulate_db(struct x86_emulate_ctxt *ctxt)
static int em_pusha(struct x86_emulate_ctxt *ctxt)
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
@ X86_TRANSFER_TASK_SWITCH
static const struct opcode group7_rm2[]
static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
static int em_rdpid(struct x86_emulate_ctxt *ctxt)
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
static void write_register_operand(struct operand *op)
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
static int em_cli(struct x86_emulate_ctxt *ctxt)
static void masked_increment(ulong *reg, ulong mask, int inc)
static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt, u16 port, u16 len)
static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
static int emulate_popf(struct x86_emulate_ctxt *ctxt, void *dest, int len)
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, void(*get)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *ptr))
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
static int em_movbe(struct x86_emulate_ctxt *ctxt)
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
static int em_loop(struct x86_emulate_ctxt *ctxt)
static int em_pop(struct x86_emulate_ctxt *ctxt)
#define IIP(_f, _e, _i, _p)
#define I2bvIP(_f, _e, _i, _p)
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned size, bool sign_extension)
static int em_fninit(struct x86_emulate_ctxt *ctxt)
static const struct opcode group2[]
static const struct instr_dual instr_dual_0f_38_f0
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op)
static int em_enter(struct x86_emulate_ctxt *ctxt)
static const struct gprefix three_byte_0f_38_f1
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
static int em_iret(struct x86_emulate_ctxt *ctxt)
static int em_xchg(struct x86_emulate_ctxt *ctxt)
#define FASTOP1SRC2EX(op, name)
static const struct opcode group6[]
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
static int em_push(struct x86_emulate_ctxt *ctxt)
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned d)
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
static void fetch_register_operand(struct operand *op)
static int em_str(struct x86_emulate_ctxt *ctxt)
static void register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, u16 port, u16 len)
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
static int em_sahf(struct x86_emulate_ctxt *ctxt)
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
static int em_sldt(struct x86_emulate_ctxt *ctxt)
static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes)
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc)
static int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
static const struct opcode twobyte_table[256]
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, unsigned int size, unsigned short port, void *dest)
static const struct group_dual group7
static int segmented_write_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned int size)
static int em_lidt(struct x86_emulate_ctxt *ctxt)
static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, unsigned seg)
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
#define FASTOP1SRC2(op, name)
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
static int em_pushf(struct x86_emulate_ctxt *ctxt)
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
static const struct gprefix pfx_0f_28_0f_29
static void * decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, int byteop)
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, u16 index, struct desc_struct *desc)
static const struct instr_dual instr_dual_0f_2b
static noinline int fxregs_fixup(struct fxregs_state *fx_state, const size_t used_size)
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, unsigned size)
static const struct escape escape_db
static const struct opcode opcode_map_0f_38[256]
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
static void assign_register(unsigned long *reg, u64 val, int bytes)
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg)
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
static bool valid_cr(int nr)
static int em_das(struct x86_emulate_ctxt *ctxt)
static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, ulong *desc_addr_p)
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss)
static int em_ret(struct x86_emulate_ctxt *ctxt)
static const struct opcode group8[]
static int em_rsm(struct x86_emulate_ctxt *ctxt)
static const struct opcode group5[]
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
static int em_cwd(struct x86_emulate_ctxt *ctxt)
static const struct gprefix three_byte_0f_38_f0
static int em_popa(struct x86_emulate_ctxt *ctxt)
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage)
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
static const struct instr_dual instr_dual_0f_c3
static int em_popf(struct x86_emulate_ctxt *ctxt)
#define FASTOP2R(op, name)
static const struct opcode group4[]
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
static size_t __fxstate_size(int nregs)
static int em_jcxz(struct x86_emulate_ctxt *ctxt)
static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
static const struct escape escape_dd
static int check_cr_access(struct x86_emulate_ctxt *ctxt)
static const struct opcode group3[]
static int segmented_read(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size)
static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, void *data, unsigned size)
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size)
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
static int emulate_de(struct x86_emulate_ctxt *ctxt)
#define asm_safe(insn, inoutclob...)
static int em_aam(struct x86_emulate_ctxt *ctxt)
static u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
static int em_bswap(struct x86_emulate_ctxt *ctxt)
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
static int em_ltr(struct x86_emulate_ctxt *ctxt)
int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code)
static const struct gprefix pfx_0f_2b
static const struct gprefix pfx_0f_ae_7
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
static size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
static int em_call(struct x86_emulate_ctxt *ctxt)
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc)
static int em_lseg(struct x86_emulate_ctxt *ctxt)
static const struct gprefix pfx_0f_6f_0f_7f
static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op)
static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc)
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
static unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
#define VMWARE_PORT_VMPORT
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, enum x86emul_mode mode, ulong *linear, unsigned int flags)
static const struct instr_dual instr_dual_0f_38_f1
#define insn_fetch_arr(_arr, _size, _ctxt)
static int em_smsw(struct x86_emulate_ctxt *ctxt)
static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *orig_data, const void *data, unsigned size)
static const struct opcode group7_rm0[]
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op)
static int read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *dest, unsigned size)
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, ulong *desc_addr_p)
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
#define EFLG_RESERVED_ZEROS_MASK
static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
static bool emul_is_noncanonical_address(u64 la, struct x86_emulate_ctxt *ctxt)
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
static int em_clflush(struct x86_emulate_ctxt *ctxt)
static int em_lmsw(struct x86_emulate_ctxt *ctxt)
static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
static const struct opcode opcode_table[256]
static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len)
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, enum x86_transfer_type transfer, struct desc_struct *desc)
static const struct gprefix pfx_0f_e7
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
static int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
static int em_in(struct x86_emulate_ctxt *ctxt)
static int em_sidt(struct x86_emulate_ctxt *ctxt)
static const struct opcode group1[]
bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
static const struct opcode group7_rm1[]
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
static void setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
static int em_dr_write(struct x86_emulate_ctxt *ctxt)
static void kvm_fpu_get(void)
static void kvm_read_sse_reg(int reg, sse128_t *data)
static void kvm_write_mmx_reg(int reg, const u64 *data)
static void kvm_write_sse_reg(int reg, const sse128_t *data)
static void kvm_fpu_put(void)
static void kvm_read_mmx_reg(int reg, u64 *data)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
#define X86EMUL_PROPAGATE_FAULT
#define EMULATION_INTERCEPTED
static bool is_guest_vendor_hygon(u32 ebx, u32 ecx, u32 edx)
void(* fastop_t)(struct fastop *)
#define X86EMUL_UNHANDLEABLE
static bool is_guest_vendor_intel(u32 ebx, u32 ecx, u32 edx)
static ulong * reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
#define EMULATION_RESTART
#define X86EMUL_INTERCEPTED
#define KVM_EMULATOR_BUG_ON(cond, ctxt)
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
static bool is_guest_vendor_amd(u32 ebx, u32 ecx, u32 edx)
static ulong * reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
#define X86EMUL_IO_NEEDED
@ X86_ICPT_POST_MEMACCESS
bool is_vmware_backdoor_pmc(u32 pmc_idx)
const struct opcode * group
int(* execute)(struct x86_emulate_ctxt *ctxt)
const struct mode_dual * mdual
int(* check_perm)(struct x86_emulate_ctxt *ctxt)
const struct escape * esc
const struct group_dual * gdual
const struct instr_dual * idual
const struct gprefix * gprefix
void(* fastop)(struct fastop *fake)
char valptr[sizeof(sse128_t)]
struct operand::@4::segmented_address mem
int(* check_perm)(struct x86_emulate_ctxt *ctxt)
struct read_cache io_read
const struct x86_emulate_ops * ops
struct x86_exception exception
struct read_cache mem_read
unsigned long _regs[NR_EMULATOR_GPRS]
int(* execute)(struct x86_emulate_ctxt *ctxt)
void(* halt)(struct x86_emulate_ctxt *ctxt)
int(* set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value)
bool(* guest_has_rdpid)(struct x86_emulate_ctxt *ctxt)
ulong(* get_cr)(struct x86_emulate_ctxt *ctxt, int cr)
int(* cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *fault)
void(* get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest)
int(* cpl)(struct x86_emulate_ctxt *ctxt)
unsigned long(* get_cached_segment_base)(struct x86_emulate_ctxt *ctxt, int seg)
int(* leave_smm)(struct x86_emulate_ctxt *ctxt)
void(* triple_fault)(struct x86_emulate_ctxt *ctxt)
bool(* is_smm)(struct x86_emulate_ctxt *ctxt)
void(* set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked)
int(* pio_out_emulated)(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, const void *val, unsigned int count)
int(* get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata)
void(* write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
int(* pio_in_emulated)(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, void *val, unsigned int count)
int(* write_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *fault)
int(* set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
int(* get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata)
bool(* guest_has_movbe)(struct x86_emulate_ctxt *ctxt)
void(* get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
bool(* is_guest_mode)(struct x86_emulate_ctxt *ctxt)
void(* set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
void(* set_segment)(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, u32 base3, int seg)
bool(* get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector, struct desc_struct *desc, u32 *base3, int seg)
void(* set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
gva_t(* get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr, unsigned int flags)
int(* fix_hypercall)(struct x86_emulate_ctxt *ctxt)
int(* intercept)(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage)
int(* read_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault)
int(* fetch)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault)
int(* read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata)
void(* wbinvd)(struct x86_emulate_ctxt *ctxt)
int(* read_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault, bool system)
int(* write_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault, bool system)
bool(* get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only)
int(* check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc)
int(* set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data)
int(* set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
bool(* guest_has_fxsr)(struct x86_emulate_ctxt *ctxt)
void(* invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr)
void(* get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
static __always_inline void vmsave(unsigned long pa)
static void invlpga(unsigned long addr, u32 asid)
bool __read_mostly enable_vmware_backdoor