KVM
emulate.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3  * emulate.c
4  *
5  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6  *
7  * Copyright (c) 2005 Keir Fraser
8  *
9  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10  * privileged instructions:
11  *
12  * Copyright (C) 2006 Qumranet
13  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14  *
15  * Avi Kivity <avi@qumranet.com>
16  * Yaniv Kamay <yaniv@qumranet.com>
17  *
18  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
19  */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/kvm_host.h>
23 #include "kvm_cache_regs.h"
24 #include "kvm_emulate.h"
25 #include <linux/stringify.h>
26 #include <asm/debugreg.h>
27 #include <asm/nospec-branch.h>
28 #include <asm/ibt.h>
29 
30 #include "x86.h"
31 #include "tss.h"
32 #include "mmu.h"
33 #include "pmu.h"
34 
35 /*
36  * Operand types
37  */
38 #define OpNone 0ull
39 #define OpImplicit 1ull /* No generic decode */
40 #define OpReg 2ull /* Register */
41 #define OpMem 3ull /* Memory */
42 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
43 #define OpDI 5ull /* ES:DI/EDI/RDI */
44 #define OpMem64 6ull /* Memory, 64-bit */
45 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
46 #define OpDX 8ull /* DX register */
47 #define OpCL 9ull /* CL register (for shifts) */
48 #define OpImmByte 10ull /* 8-bit sign extended immediate */
49 #define OpOne 11ull /* Implied 1 */
50 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
51 #define OpMem16 13ull /* Memory operand (16-bit). */
52 #define OpMem32 14ull /* Memory operand (32-bit). */
53 #define OpImmU 15ull /* Immediate operand, zero extended */
54 #define OpSI 16ull /* SI/ESI/RSI */
55 #define OpImmFAddr 17ull /* Immediate far address */
56 #define OpMemFAddr 18ull /* Far address in memory */
57 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
58 #define OpES 20ull /* ES */
59 #define OpCS 21ull /* CS */
60 #define OpSS 22ull /* SS */
61 #define OpDS 23ull /* DS */
62 #define OpFS 24ull /* FS */
63 #define OpGS 25ull /* GS */
64 #define OpMem8 26ull /* 8-bit zero extended memory operand */
65 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
66 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
67 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
68 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
69 
70 #define OpBits 5 /* Width of operand field */
71 #define OpMask ((1ull << OpBits) - 1)
72 
73 /*
74  * Opcode effective-address decode tables.
75  * Note that we only emulate instructions that have at least one memory
76  * operand (excluding implicit stack references). We assume that stack
77  * references and instruction fetches will never occur in special memory
78  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79  * not be handled.
80  */
81 
82 /* Operand sizes: 8-bit operands or specified/overridden size. */
83 #define ByteOp (1<<0) /* 8-bit operands. */
84 /* Destination operand type. */
85 #define DstShift 1
86 #define ImplicitOps (OpImplicit << DstShift)
87 #define DstReg (OpReg << DstShift)
88 #define DstMem (OpMem << DstShift)
89 #define DstAcc (OpAcc << DstShift)
90 #define DstDI (OpDI << DstShift)
91 #define DstMem64 (OpMem64 << DstShift)
92 #define DstMem16 (OpMem16 << DstShift)
93 #define DstImmUByte (OpImmUByte << DstShift)
94 #define DstDX (OpDX << DstShift)
95 #define DstAccLo (OpAccLo << DstShift)
96 #define DstMask (OpMask << DstShift)
97 /* Source operand type. */
98 #define SrcShift 6
99 #define SrcNone (OpNone << SrcShift)
100 #define SrcReg (OpReg << SrcShift)
101 #define SrcMem (OpMem << SrcShift)
102 #define SrcMem16 (OpMem16 << SrcShift)
103 #define SrcMem32 (OpMem32 << SrcShift)
104 #define SrcImm (OpImm << SrcShift)
105 #define SrcImmByte (OpImmByte << SrcShift)
106 #define SrcOne (OpOne << SrcShift)
107 #define SrcImmUByte (OpImmUByte << SrcShift)
108 #define SrcImmU (OpImmU << SrcShift)
109 #define SrcSI (OpSI << SrcShift)
110 #define SrcXLat (OpXLat << SrcShift)
111 #define SrcImmFAddr (OpImmFAddr << SrcShift)
112 #define SrcMemFAddr (OpMemFAddr << SrcShift)
113 #define SrcAcc (OpAcc << SrcShift)
114 #define SrcImmU16 (OpImmU16 << SrcShift)
115 #define SrcImm64 (OpImm64 << SrcShift)
116 #define SrcDX (OpDX << SrcShift)
117 #define SrcMem8 (OpMem8 << SrcShift)
118 #define SrcAccHi (OpAccHi << SrcShift)
119 #define SrcMask (OpMask << SrcShift)
120 #define BitOp (1<<11)
121 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
122 #define String (1<<13) /* String instruction (rep capable) */
123 #define Stack (1<<14) /* Stack instruction (push/pop) */
124 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
125 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
126 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
127 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
128 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
129 #define Escape (5<<15) /* Escape to coprocessor instruction */
130 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
131 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
132 #define Sse (1<<18) /* SSE Vector instruction */
133 /* Generic ModRM decode. */
134 #define ModRM (1<<19)
135 /* Destination is only written; never read. */
136 #define Mov (1<<20)
137 /* Misc flags */
138 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
139 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
140 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
141 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
142 #define Undefined (1<<25) /* No Such Instruction */
143 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
144 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
145 #define No64 (1<<28)
146 #define PageTable (1 << 29) /* instruction used to write page table */
147 #define NotImpl (1 << 30) /* instruction is not implemented */
148 /* Source 2 operand type */
149 #define Src2Shift (31)
150 #define Src2None (OpNone << Src2Shift)
151 #define Src2Mem (OpMem << Src2Shift)
152 #define Src2CL (OpCL << Src2Shift)
153 #define Src2ImmByte (OpImmByte << Src2Shift)
154 #define Src2One (OpOne << Src2Shift)
155 #define Src2Imm (OpImm << Src2Shift)
156 #define Src2ES (OpES << Src2Shift)
157 #define Src2CS (OpCS << Src2Shift)
158 #define Src2SS (OpSS << Src2Shift)
159 #define Src2DS (OpDS << Src2Shift)
160 #define Src2FS (OpFS << Src2Shift)
161 #define Src2GS (OpGS << Src2Shift)
162 #define Src2Mask (OpMask << Src2Shift)
163 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
164 #define AlignMask ((u64)7 << 41)
165 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
166 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
167 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
168 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
169 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
170 #define NoWrite ((u64)1 << 45) /* No writeback */
171 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
172 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
173 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
174 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
175 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
176 #define NearBranch ((u64)1 << 52) /* Near branches */
177 #define No16 ((u64)1 << 53) /* No 16 bit operand */
178 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
179 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
180 #define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
181 
182 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
183 
184 #define X2(x...) x, x
185 #define X3(x...) X2(x), x
186 #define X4(x...) X2(x), X2(x)
187 #define X5(x...) X4(x), x
188 #define X6(x...) X4(x), X2(x)
189 #define X7(x...) X4(x), X3(x)
190 #define X8(x...) X4(x), X4(x)
191 #define X16(x...) X8(x), X8(x)
192 
193 struct opcode {
194  u64 flags;
196  u8 pad[7];
197  union {
198  int (*execute)(struct x86_emulate_ctxt *ctxt);
199  const struct opcode *group;
200  const struct group_dual *gdual;
201  const struct gprefix *gprefix;
202  const struct escape *esc;
203  const struct instr_dual *idual;
204  const struct mode_dual *mdual;
205  void (*fastop)(struct fastop *fake);
206  } u;
207  int (*check_perm)(struct x86_emulate_ctxt *ctxt);
208 };
209 
210 struct group_dual {
211  struct opcode mod012[8];
212  struct opcode mod3[8];
213 };
214 
215 struct gprefix {
216  struct opcode pfx_no;
217  struct opcode pfx_66;
218  struct opcode pfx_f2;
219  struct opcode pfx_f3;
220 };
221 
222 struct escape {
223  struct opcode op[8];
224  struct opcode high[64];
225 };
226 
227 struct instr_dual {
228  struct opcode mod012;
229  struct opcode mod3;
230 };
231 
232 struct mode_dual {
233  struct opcode mode32;
234  struct opcode mode64;
235 };
236 
237 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
238 
244 };
245 
246 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
247 {
248  unsigned long dirty = ctxt->regs_dirty;
249  unsigned reg;
250 
251  for_each_set_bit(reg, &dirty, NR_EMULATOR_GPRS)
252  ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
253 }
254 
255 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
256 {
257  ctxt->regs_dirty = 0;
258  ctxt->regs_valid = 0;
259 }
260 
261 /*
262  * These EFLAGS bits are restored from saved value during emulation, and
263  * any changes are written back to the saved value after emulation.
264  */
265 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
266  X86_EFLAGS_PF|X86_EFLAGS_CF)
267 
268 #ifdef CONFIG_X86_64
269 #define ON64(x) x
270 #else
271 #define ON64(x)
272 #endif
273 
274 /*
275  * fastop functions have a special calling convention:
276  *
277  * dst: rax (in/out)
278  * src: rdx (in/out)
279  * src2: rcx (in)
280  * flags: rflags (in/out)
281  * ex: rsi (in:fastop pointer, out:zero if exception)
282  *
283  * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
284  * different operand sizes can be reached by calculation, rather than a jump
285  * table (which would be bigger than the code).
286  *
287  * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
288  * and 1 for the straight line speculation INT3, leaves 7 bytes for the
289  * body of the function. Currently none is larger than 4.
290  */
291 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
292 
293 #define FASTOP_SIZE 16
294 
295 #define __FOP_FUNC(name) \
296  ".align " __stringify(FASTOP_SIZE) " \n\t" \
297  ".type " name ", @function \n\t" \
298  name ":\n\t" \
299  ASM_ENDBR \
300  IBT_NOSEAL(name)
301 
302 #define FOP_FUNC(name) \
303  __FOP_FUNC(#name)
304 
305 #define __FOP_RET(name) \
306  "11: " ASM_RET \
307  ".size " name ", .-" name "\n\t"
308 
309 #define FOP_RET(name) \
310  __FOP_RET(#name)
311 
312 #define __FOP_START(op, align) \
313  extern void em_##op(struct fastop *fake); \
314  asm(".pushsection .text, \"ax\" \n\t" \
315  ".global em_" #op " \n\t" \
316  ".align " __stringify(align) " \n\t" \
317  "em_" #op ":\n\t"
318 
319 #define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
320 
321 #define FOP_END \
322  ".popsection")
323 
324 #define __FOPNOP(name) \
325  __FOP_FUNC(name) \
326  __FOP_RET(name)
327 
328 #define FOPNOP() \
329  __FOPNOP(__stringify(__UNIQUE_ID(nop)))
330 
331 #define FOP1E(op, dst) \
332  __FOP_FUNC(#op "_" #dst) \
333  "10: " #op " %" #dst " \n\t" \
334  __FOP_RET(#op "_" #dst)
335 
336 #define FOP1EEX(op, dst) \
337  FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
338 
339 #define FASTOP1(op) \
340  FOP_START(op) \
341  FOP1E(op##b, al) \
342  FOP1E(op##w, ax) \
343  FOP1E(op##l, eax) \
344  ON64(FOP1E(op##q, rax)) \
345  FOP_END
346 
347 /* 1-operand, using src2 (for MUL/DIV r/m) */
348 #define FASTOP1SRC2(op, name) \
349  FOP_START(name) \
350  FOP1E(op, cl) \
351  FOP1E(op, cx) \
352  FOP1E(op, ecx) \
353  ON64(FOP1E(op, rcx)) \
354  FOP_END
355 
356 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
357 #define FASTOP1SRC2EX(op, name) \
358  FOP_START(name) \
359  FOP1EEX(op, cl) \
360  FOP1EEX(op, cx) \
361  FOP1EEX(op, ecx) \
362  ON64(FOP1EEX(op, rcx)) \
363  FOP_END
364 
365 #define FOP2E(op, dst, src) \
366  __FOP_FUNC(#op "_" #dst "_" #src) \
367  #op " %" #src ", %" #dst " \n\t" \
368  __FOP_RET(#op "_" #dst "_" #src)
369 
370 #define FASTOP2(op) \
371  FOP_START(op) \
372  FOP2E(op##b, al, dl) \
373  FOP2E(op##w, ax, dx) \
374  FOP2E(op##l, eax, edx) \
375  ON64(FOP2E(op##q, rax, rdx)) \
376  FOP_END
377 
378 /* 2 operand, word only */
379 #define FASTOP2W(op) \
380  FOP_START(op) \
381  FOPNOP() \
382  FOP2E(op##w, ax, dx) \
383  FOP2E(op##l, eax, edx) \
384  ON64(FOP2E(op##q, rax, rdx)) \
385  FOP_END
386 
387 /* 2 operand, src is CL */
388 #define FASTOP2CL(op) \
389  FOP_START(op) \
390  FOP2E(op##b, al, cl) \
391  FOP2E(op##w, ax, cl) \
392  FOP2E(op##l, eax, cl) \
393  ON64(FOP2E(op##q, rax, cl)) \
394  FOP_END
395 
396 /* 2 operand, src and dest are reversed */
397 #define FASTOP2R(op, name) \
398  FOP_START(name) \
399  FOP2E(op##b, dl, al) \
400  FOP2E(op##w, dx, ax) \
401  FOP2E(op##l, edx, eax) \
402  ON64(FOP2E(op##q, rdx, rax)) \
403  FOP_END
404 
405 #define FOP3E(op, dst, src, src2) \
406  __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
407  #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
408  __FOP_RET(#op "_" #dst "_" #src "_" #src2)
409 
410 /* 3-operand, word-only, src2=cl */
411 #define FASTOP3WCL(op) \
412  FOP_START(op) \
413  FOPNOP() \
414  FOP3E(op##w, ax, dx, cl) \
415  FOP3E(op##l, eax, edx, cl) \
416  ON64(FOP3E(op##q, rax, rdx, cl)) \
417  FOP_END
418 
419 /* Special case for SETcc - 1 instruction per cc */
420 #define FOP_SETCC(op) \
421  FOP_FUNC(op) \
422  #op " %al \n\t" \
423  FOP_RET(op)
424 
425 FOP_START(setcc)
426 FOP_SETCC(seto)
427 FOP_SETCC(setno)
428 FOP_SETCC(setc)
429 FOP_SETCC(setnc)
430 FOP_SETCC(setz)
431 FOP_SETCC(setnz)
432 FOP_SETCC(setbe)
433 FOP_SETCC(setnbe)
434 FOP_SETCC(sets)
435 FOP_SETCC(setns)
436 FOP_SETCC(setp)
437 FOP_SETCC(setnp)
438 FOP_SETCC(setl)
439 FOP_SETCC(setnl)
440 FOP_SETCC(setle)
441 FOP_SETCC(setnle)
443 
444 FOP_START(salc)
445 FOP_FUNC(salc)
446 "pushf; sbb %al, %al; popf \n\t"
447 FOP_RET(salc)
448 FOP_END;
449 
450 /*
451  * XXX: inoutclob user must know where the argument is being expanded.
452  * Using asm goto would allow us to remove _fault.
453  */
454 #define asm_safe(insn, inoutclob...) \
455 ({ \
456  int _fault = 0; \
457  \
458  asm volatile("1:" insn "\n" \
459  "2:\n" \
460  _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
461  : [_fault] "+r"(_fault) inoutclob ); \
462  \
463  _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
464 })
465 
468  enum x86_intercept_stage stage)
469 {
470  struct x86_instruction_info info = {
471  .intercept = intercept,
472  .rep_prefix = ctxt->rep_prefix,
473  .modrm_mod = ctxt->modrm_mod,
474  .modrm_reg = ctxt->modrm_reg,
475  .modrm_rm = ctxt->modrm_rm,
476  .src_val = ctxt->src.val64,
477  .dst_val = ctxt->dst.val64,
478  .src_bytes = ctxt->src.bytes,
479  .dst_bytes = ctxt->dst.bytes,
480  .ad_bytes = ctxt->ad_bytes,
481  .next_rip = ctxt->eip,
482  };
483 
484  return ctxt->ops->intercept(ctxt, &info, stage);
485 }
486 
487 static void assign_masked(ulong *dest, ulong src, ulong mask)
488 {
489  *dest = (*dest & ~mask) | (src & mask);
490 }
491 
492 static void assign_register(unsigned long *reg, u64 val, int bytes)
493 {
494  /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
495  switch (bytes) {
496  case 1:
497  *(u8 *)reg = (u8)val;
498  break;
499  case 2:
500  *(u16 *)reg = (u16)val;
501  break;
502  case 4:
503  *reg = (u32)val;
504  break; /* 64b: zero-extend */
505  case 8:
506  *reg = val;
507  break;
508  }
509 }
510 
511 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
512 {
513  return (1UL << (ctxt->ad_bytes << 3)) - 1;
514 }
515 
516 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
517 {
518  u16 sel;
519  struct desc_struct ss;
520 
521  if (ctxt->mode == X86EMUL_MODE_PROT64)
522  return ~0UL;
523  ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
524  return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
525 }
526 
527 static int stack_size(struct x86_emulate_ctxt *ctxt)
528 {
529  return (__fls(stack_mask(ctxt)) + 1) >> 3;
530 }
531 
532 /* Access/update address held in a register, based on addressing mode. */
533 static inline unsigned long
534 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
535 {
536  if (ctxt->ad_bytes == sizeof(unsigned long))
537  return reg;
538  else
539  return reg & ad_mask(ctxt);
540 }
541 
542 static inline unsigned long
543 register_address(struct x86_emulate_ctxt *ctxt, int reg)
544 {
545  return address_mask(ctxt, reg_read(ctxt, reg));
546 }
547 
548 static void masked_increment(ulong *reg, ulong mask, int inc)
549 {
550  assign_masked(reg, *reg + inc, mask);
551 }
552 
553 static inline void
554 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
555 {
556  ulong *preg = reg_rmw(ctxt, reg);
557 
558  assign_register(preg, *preg + inc, ctxt->ad_bytes);
559 }
560 
561 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
562 {
563  masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
564 }
565 
566 static u32 desc_limit_scaled(struct desc_struct *desc)
567 {
568  u32 limit = get_desc_limit(desc);
569 
570  return desc->g ? (limit << 12) | 0xfff : limit;
571 }
572 
573 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
574 {
575  if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
576  return 0;
577 
578  return ctxt->ops->get_cached_segment_base(ctxt, seg);
579 }
580 
581 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
582  u32 error, bool valid)
583 {
584  if (KVM_EMULATOR_BUG_ON(vec > 0x1f, ctxt))
585  return X86EMUL_UNHANDLEABLE;
586 
587  ctxt->exception.vector = vec;
588  ctxt->exception.error_code = error;
589  ctxt->exception.error_code_valid = valid;
591 }
592 
593 static int emulate_db(struct x86_emulate_ctxt *ctxt)
594 {
595  return emulate_exception(ctxt, DB_VECTOR, 0, false);
596 }
597 
598 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
599 {
600  return emulate_exception(ctxt, GP_VECTOR, err, true);
601 }
602 
603 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
604 {
605  return emulate_exception(ctxt, SS_VECTOR, err, true);
606 }
607 
608 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
609 {
610  return emulate_exception(ctxt, UD_VECTOR, 0, false);
611 }
612 
613 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
614 {
615  return emulate_exception(ctxt, TS_VECTOR, err, true);
616 }
617 
618 static int emulate_de(struct x86_emulate_ctxt *ctxt)
619 {
620  return emulate_exception(ctxt, DE_VECTOR, 0, false);
621 }
622 
623 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
624 {
625  return emulate_exception(ctxt, NM_VECTOR, 0, false);
626 }
627 
628 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
629 {
630  u16 selector;
631  struct desc_struct desc;
632 
633  ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
634  return selector;
635 }
636 
637 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
638  unsigned seg)
639 {
640  u16 dummy;
641  u32 base3;
642  struct desc_struct desc;
643 
644  ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
645  ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
646 }
647 
648 static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
649 {
650  return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
651 }
652 
653 static inline bool emul_is_noncanonical_address(u64 la,
654  struct x86_emulate_ctxt *ctxt)
655 {
656  return !__is_canonical_address(la, ctxt_virt_addr_bits(ctxt));
657 }
658 
659 /*
660  * x86 defines three classes of vector instructions: explicitly
661  * aligned, explicitly unaligned, and the rest, which change behaviour
662  * depending on whether they're AVX encoded or not.
663  *
664  * Also included is CMPXCHG16B which is not a vector instruction, yet it is
665  * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
666  * 512 bytes of data must be aligned to a 16 byte boundary.
667  */
668 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
669 {
670  u64 alignment = ctxt->d & AlignMask;
671 
672  if (likely(size < 16))
673  return 1;
674 
675  switch (alignment) {
676  case Unaligned:
677  case Avx:
678  return 1;
679  case Aligned16:
680  return 16;
681  case Aligned:
682  default:
683  return size;
684  }
685 }
686 
687 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
688  struct segmented_address addr,
689  unsigned *max_size, unsigned size,
690  enum x86emul_mode mode, ulong *linear,
691  unsigned int flags)
692 {
693  struct desc_struct desc;
694  bool usable;
695  ulong la;
696  u32 lim;
697  u16 sel;
698  u8 va_bits;
699 
700  la = seg_base(ctxt, addr.seg) + addr.ea;
701  *max_size = 0;
702  switch (mode) {
703  case X86EMUL_MODE_PROT64:
704  *linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags);
705  va_bits = ctxt_virt_addr_bits(ctxt);
706  if (!__is_canonical_address(la, va_bits))
707  goto bad;
708 
709  *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
710  if (size > *max_size)
711  goto bad;
712  break;
713  default:
714  *linear = la = (u32)la;
715  usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
716  addr.seg);
717  if (!usable)
718  goto bad;
719  /* code segment in protected mode or read-only data segment */
720  if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) &&
722  goto bad;
723  /* unreadable code segment */
724  if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2))
725  goto bad;
726  lim = desc_limit_scaled(&desc);
727  if (!(desc.type & 8) && (desc.type & 4)) {
728  /* expand-down segment */
729  if (addr.ea <= lim)
730  goto bad;
731  lim = desc.d ? 0xffffffff : 0xffff;
732  }
733  if (addr.ea > lim)
734  goto bad;
735  if (lim == 0xffffffff)
736  *max_size = ~0u;
737  else {
738  *max_size = (u64)lim + 1 - addr.ea;
739  if (size > *max_size)
740  goto bad;
741  }
742  break;
743  }
744  if (la & (insn_alignment(ctxt, size) - 1))
745  return emulate_gp(ctxt, 0);
746  return X86EMUL_CONTINUE;
747 bad:
748  if (addr.seg == VCPU_SREG_SS)
749  return emulate_ss(ctxt, 0);
750  else
751  return emulate_gp(ctxt, 0);
752 }
753 
754 static int linearize(struct x86_emulate_ctxt *ctxt,
755  struct segmented_address addr,
756  unsigned size, bool write,
757  ulong *linear)
758 {
759  unsigned max_size;
760  return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear,
761  write ? X86EMUL_F_WRITE : 0);
762 }
763 
764 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
765 {
766  ulong linear;
767  int rc;
768  unsigned max_size;
769  struct segmented_address addr = { .seg = VCPU_SREG_CS,
770  .ea = dst };
771 
772  if (ctxt->op_bytes != sizeof(unsigned long))
773  addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
774  rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
776  if (rc == X86EMUL_CONTINUE)
777  ctxt->_eip = addr.ea;
778  return rc;
779 }
780 
781 static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
782 {
783  u64 efer;
784  struct desc_struct cs;
785  u16 selector;
786  u32 base3;
787 
788  ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
789 
790  if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
791  /* Real mode. cpu must not have long mode active */
792  if (efer & EFER_LMA)
793  return X86EMUL_UNHANDLEABLE;
794  ctxt->mode = X86EMUL_MODE_REAL;
795  return X86EMUL_CONTINUE;
796  }
797 
798  if (ctxt->eflags & X86_EFLAGS_VM) {
799  /* Protected/VM86 mode. cpu must not have long mode active */
800  if (efer & EFER_LMA)
801  return X86EMUL_UNHANDLEABLE;
802  ctxt->mode = X86EMUL_MODE_VM86;
803  return X86EMUL_CONTINUE;
804  }
805 
806  if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
807  return X86EMUL_UNHANDLEABLE;
808 
809  if (efer & EFER_LMA) {
810  if (cs.l) {
811  /* Proper long mode */
812  ctxt->mode = X86EMUL_MODE_PROT64;
813  } else if (cs.d) {
814  /* 32 bit compatibility mode*/
815  ctxt->mode = X86EMUL_MODE_PROT32;
816  } else {
817  ctxt->mode = X86EMUL_MODE_PROT16;
818  }
819  } else {
820  /* Legacy 32 bit / 16 bit mode */
822  }
823 
824  return X86EMUL_CONTINUE;
825 }
826 
827 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
828 {
829  return assign_eip(ctxt, dst);
830 }
831 
832 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
833 {
834  int rc = emulator_recalc_and_set_mode(ctxt);
835 
836  if (rc != X86EMUL_CONTINUE)
837  return rc;
838 
839  return assign_eip(ctxt, dst);
840 }
841 
842 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
843 {
844  return assign_eip_near(ctxt, ctxt->_eip + rel);
845 }
846 
847 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
848  void *data, unsigned size)
849 {
850  return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
851 }
852 
853 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
854  ulong linear, void *data,
855  unsigned int size)
856 {
857  return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
858 }
859 
860 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
861  struct segmented_address addr,
862  void *data,
863  unsigned size)
864 {
865  int rc;
866  ulong linear;
867 
868  rc = linearize(ctxt, addr, size, false, &linear);
869  if (rc != X86EMUL_CONTINUE)
870  return rc;
871  return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
872 }
873 
874 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
875  struct segmented_address addr,
876  void *data,
877  unsigned int size)
878 {
879  int rc;
880  ulong linear;
881 
882  rc = linearize(ctxt, addr, size, true, &linear);
883  if (rc != X86EMUL_CONTINUE)
884  return rc;
885  return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
886 }
887 
888 /*
889  * Prefetch the remaining bytes of the instruction without crossing page
890  * boundary if they are not in fetch_cache yet.
891  */
892 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
893 {
894  int rc;
895  unsigned size, max_size;
896  unsigned long linear;
897  int cur_size = ctxt->fetch.end - ctxt->fetch.data;
898  struct segmented_address addr = { .seg = VCPU_SREG_CS,
899  .ea = ctxt->eip + cur_size };
900 
901  /*
902  * We do not know exactly how many bytes will be needed, and
903  * __linearize is expensive, so fetch as much as possible. We
904  * just have to avoid going beyond the 15 byte limit, the end
905  * of the segment, or the end of the page.
906  *
907  * __linearize is called with size 0 so that it does not do any
908  * boundary check itself. Instead, we use max_size to check
909  * against op_size.
910  */
911  rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
913  if (unlikely(rc != X86EMUL_CONTINUE))
914  return rc;
915 
916  size = min_t(unsigned, 15UL ^ cur_size, max_size);
917  size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
918 
919  /*
920  * One instruction can only straddle two pages,
921  * and one has been loaded at the beginning of
922  * x86_decode_insn. So, if not enough bytes
923  * still, we must have hit the 15-byte boundary.
924  */
925  if (unlikely(size < op_size))
926  return emulate_gp(ctxt, 0);
927 
928  rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
929  size, &ctxt->exception);
930  if (unlikely(rc != X86EMUL_CONTINUE))
931  return rc;
932  ctxt->fetch.end += size;
933  return X86EMUL_CONTINUE;
934 }
935 
936 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
937  unsigned size)
938 {
939  unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
940 
941  if (unlikely(done_size < size))
942  return __do_insn_fetch_bytes(ctxt, size - done_size);
943  else
944  return X86EMUL_CONTINUE;
945 }
946 
947 /* Fetch next part of the instruction being emulated. */
948 #define insn_fetch(_type, _ctxt) \
949 ({ _type _x; \
950  \
951  rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
952  if (rc != X86EMUL_CONTINUE) \
953  goto done; \
954  ctxt->_eip += sizeof(_type); \
955  memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
956  ctxt->fetch.ptr += sizeof(_type); \
957  _x; \
958 })
959 
960 #define insn_fetch_arr(_arr, _size, _ctxt) \
961 ({ \
962  rc = do_insn_fetch_bytes(_ctxt, _size); \
963  if (rc != X86EMUL_CONTINUE) \
964  goto done; \
965  ctxt->_eip += (_size); \
966  memcpy(_arr, ctxt->fetch.ptr, _size); \
967  ctxt->fetch.ptr += (_size); \
968 })
969 
970 /*
971  * Given the 'reg' portion of a ModRM byte, and a register block, return a
972  * pointer into the block that addresses the relevant register.
973  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
974  */
975 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
976  int byteop)
977 {
978  void *p;
979  int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
980 
981  if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
982  p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
983  else
984  p = reg_rmw(ctxt, modrm_reg);
985  return p;
986 }
987 
988 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
989  struct segmented_address addr,
990  u16 *size, unsigned long *address, int op_bytes)
991 {
992  int rc;
993 
994  if (op_bytes == 2)
995  op_bytes = 3;
996  *address = 0;
997  rc = segmented_read_std(ctxt, addr, size, 2);
998  if (rc != X86EMUL_CONTINUE)
999  return rc;
1000  addr.ea += 2;
1001  rc = segmented_read_std(ctxt, addr, address, op_bytes);
1002  return rc;
1003 }
1004 
1005 FASTOP2(add);
1007 FASTOP2(adc);
1008 FASTOP2(sbb);
1009 FASTOP2(and);
1010 FASTOP2(sub);
1011 FASTOP2(xor);
1012 FASTOP2(cmp);
1013 FASTOP2(test);
1014 
1015 FASTOP1SRC2(mul, mul_ex);
1016 FASTOP1SRC2(imul, imul_ex);
1017 FASTOP1SRC2EX(div, div_ex);
1018 FASTOP1SRC2EX(idiv, idiv_ex);
1019 
1022 
1023 FASTOP2W(imul);
1024 
1025 FASTOP1(not);
1026 FASTOP1(neg);
1027 FASTOP1(inc);
1028 FASTOP1(dec);
1029 
1037 
1044 
1045 FASTOP2(xadd);
1046 
1047 FASTOP2R(cmp, cmp_r);
1048 
1049 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1050 {
1051  /* If src is zero, do not writeback, but update flags */
1052  if (ctxt->src.val == 0)
1053  ctxt->dst.type = OP_NONE;
1054  return fastop(ctxt, em_bsf);
1055 }
1056 
1057 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1058 {
1059  /* If src is zero, do not writeback, but update flags */
1060  if (ctxt->src.val == 0)
1061  ctxt->dst.type = OP_NONE;
1062  return fastop(ctxt, em_bsr);
1063 }
1064 
1065 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1066 {
1067  u8 rc;
1068  void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
1069 
1070  flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1071  asm("push %[flags]; popf; " CALL_NOSPEC
1072  : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1073  return rc;
1074 }
1075 
1076 static void fetch_register_operand(struct operand *op)
1077 {
1078  switch (op->bytes) {
1079  case 1:
1080  op->val = *(u8 *)op->addr.reg;
1081  break;
1082  case 2:
1083  op->val = *(u16 *)op->addr.reg;
1084  break;
1085  case 4:
1086  op->val = *(u32 *)op->addr.reg;
1087  break;
1088  case 8:
1089  op->val = *(u64 *)op->addr.reg;
1090  break;
1091  }
1092 }
1093 
1094 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1095 {
1096  if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1097  return emulate_nm(ctxt);
1098 
1099  kvm_fpu_get();
1100  asm volatile("fninit");
1101  kvm_fpu_put();
1102  return X86EMUL_CONTINUE;
1103 }
1104 
1105 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1106 {
1107  u16 fcw;
1108 
1109  if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1110  return emulate_nm(ctxt);
1111 
1112  kvm_fpu_get();
1113  asm volatile("fnstcw %0": "+m"(fcw));
1114  kvm_fpu_put();
1115 
1116  ctxt->dst.val = fcw;
1117 
1118  return X86EMUL_CONTINUE;
1119 }
1120 
1121 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1122 {
1123  u16 fsw;
1124 
1125  if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1126  return emulate_nm(ctxt);
1127 
1128  kvm_fpu_get();
1129  asm volatile("fnstsw %0": "+m"(fsw));
1130  kvm_fpu_put();
1131 
1132  ctxt->dst.val = fsw;
1133 
1134  return X86EMUL_CONTINUE;
1135 }
1136 
1138  struct operand *op)
1139 {
1140  unsigned int reg;
1141 
1142  if (ctxt->d & ModRM)
1143  reg = ctxt->modrm_reg;
1144  else
1145  reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1146 
1147  if (ctxt->d & Sse) {
1148  op->type = OP_XMM;
1149  op->bytes = 16;
1150  op->addr.xmm = reg;
1151  kvm_read_sse_reg(reg, &op->vec_val);
1152  return;
1153  }
1154  if (ctxt->d & Mmx) {
1155  reg &= 7;
1156  op->type = OP_MM;
1157  op->bytes = 8;
1158  op->addr.mm = reg;
1159  return;
1160  }
1161 
1162  op->type = OP_REG;
1163  op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1164  op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1165 
1167  op->orig_val = op->val;
1168 }
1169 
1170 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1171 {
1172  if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1173  ctxt->modrm_seg = VCPU_SREG_SS;
1174 }
1175 
1176 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1177  struct operand *op)
1178 {
1179  u8 sib;
1180  int index_reg, base_reg, scale;
1181  int rc = X86EMUL_CONTINUE;
1182  ulong modrm_ea = 0;
1183 
1184  ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1185  index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1186  base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1187 
1188  ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1189  ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1190  ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1191  ctxt->modrm_seg = VCPU_SREG_DS;
1192 
1193  if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1194  op->type = OP_REG;
1195  op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1196  op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1197  ctxt->d & ByteOp);
1198  if (ctxt->d & Sse) {
1199  op->type = OP_XMM;
1200  op->bytes = 16;
1201  op->addr.xmm = ctxt->modrm_rm;
1202  kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1203  return rc;
1204  }
1205  if (ctxt->d & Mmx) {
1206  op->type = OP_MM;
1207  op->bytes = 8;
1208  op->addr.mm = ctxt->modrm_rm & 7;
1209  return rc;
1210  }
1212  return rc;
1213  }
1214 
1215  op->type = OP_MEM;
1216 
1217  if (ctxt->ad_bytes == 2) {
1218  unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1219  unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1220  unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1221  unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1222 
1223  /* 16-bit ModR/M decode. */
1224  switch (ctxt->modrm_mod) {
1225  case 0:
1226  if (ctxt->modrm_rm == 6)
1227  modrm_ea += insn_fetch(u16, ctxt);
1228  break;
1229  case 1:
1230  modrm_ea += insn_fetch(s8, ctxt);
1231  break;
1232  case 2:
1233  modrm_ea += insn_fetch(u16, ctxt);
1234  break;
1235  }
1236  switch (ctxt->modrm_rm) {
1237  case 0:
1238  modrm_ea += bx + si;
1239  break;
1240  case 1:
1241  modrm_ea += bx + di;
1242  break;
1243  case 2:
1244  modrm_ea += bp + si;
1245  break;
1246  case 3:
1247  modrm_ea += bp + di;
1248  break;
1249  case 4:
1250  modrm_ea += si;
1251  break;
1252  case 5:
1253  modrm_ea += di;
1254  break;
1255  case 6:
1256  if (ctxt->modrm_mod != 0)
1257  modrm_ea += bp;
1258  break;
1259  case 7:
1260  modrm_ea += bx;
1261  break;
1262  }
1263  if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1264  (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1265  ctxt->modrm_seg = VCPU_SREG_SS;
1266  modrm_ea = (u16)modrm_ea;
1267  } else {
1268  /* 32/64-bit ModR/M decode. */
1269  if ((ctxt->modrm_rm & 7) == 4) {
1270  sib = insn_fetch(u8, ctxt);
1271  index_reg |= (sib >> 3) & 7;
1272  base_reg |= sib & 7;
1273  scale = sib >> 6;
1274 
1275  if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1276  modrm_ea += insn_fetch(s32, ctxt);
1277  else {
1278  modrm_ea += reg_read(ctxt, base_reg);
1279  adjust_modrm_seg(ctxt, base_reg);
1280  /* Increment ESP on POP [ESP] */
1281  if ((ctxt->d & IncSP) &&
1282  base_reg == VCPU_REGS_RSP)
1283  modrm_ea += ctxt->op_bytes;
1284  }
1285  if (index_reg != 4)
1286  modrm_ea += reg_read(ctxt, index_reg) << scale;
1287  } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1288  modrm_ea += insn_fetch(s32, ctxt);
1289  if (ctxt->mode == X86EMUL_MODE_PROT64)
1290  ctxt->rip_relative = 1;
1291  } else {
1292  base_reg = ctxt->modrm_rm;
1293  modrm_ea += reg_read(ctxt, base_reg);
1294  adjust_modrm_seg(ctxt, base_reg);
1295  }
1296  switch (ctxt->modrm_mod) {
1297  case 1:
1298  modrm_ea += insn_fetch(s8, ctxt);
1299  break;
1300  case 2:
1301  modrm_ea += insn_fetch(s32, ctxt);
1302  break;
1303  }
1304  }
1305  op->addr.mem.ea = modrm_ea;
1306  if (ctxt->ad_bytes != 8)
1307  ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1308 
1309 done:
1310  return rc;
1311 }
1312 
1313 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1314  struct operand *op)
1315 {
1316  int rc = X86EMUL_CONTINUE;
1317 
1318  op->type = OP_MEM;
1319  switch (ctxt->ad_bytes) {
1320  case 2:
1321  op->addr.mem.ea = insn_fetch(u16, ctxt);
1322  break;
1323  case 4:
1324  op->addr.mem.ea = insn_fetch(u32, ctxt);
1325  break;
1326  case 8:
1327  op->addr.mem.ea = insn_fetch(u64, ctxt);
1328  break;
1329  }
1330 done:
1331  return rc;
1332 }
1333 
1334 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1335 {
1336  long sv = 0, mask;
1337 
1338  if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1339  mask = ~((long)ctxt->dst.bytes * 8 - 1);
1340 
1341  if (ctxt->src.bytes == 2)
1342  sv = (s16)ctxt->src.val & (s16)mask;
1343  else if (ctxt->src.bytes == 4)
1344  sv = (s32)ctxt->src.val & (s32)mask;
1345  else
1346  sv = (s64)ctxt->src.val & (s64)mask;
1347 
1348  ctxt->dst.addr.mem.ea = address_mask(ctxt,
1349  ctxt->dst.addr.mem.ea + (sv >> 3));
1350  }
1351 
1352  /* only subword offset */
1353  ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1354 }
1355 
1356 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1357  unsigned long addr, void *dest, unsigned size)
1358 {
1359  int rc;
1360  struct read_cache *mc = &ctxt->mem_read;
1361 
1362  if (mc->pos < mc->end)
1363  goto read_cached;
1364 
1365  if (KVM_EMULATOR_BUG_ON((mc->end + size) >= sizeof(mc->data), ctxt))
1366  return X86EMUL_UNHANDLEABLE;
1367 
1368  rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1369  &ctxt->exception);
1370  if (rc != X86EMUL_CONTINUE)
1371  return rc;
1372 
1373  mc->end += size;
1374 
1375 read_cached:
1376  memcpy(dest, mc->data + mc->pos, size);
1377  mc->pos += size;
1378  return X86EMUL_CONTINUE;
1379 }
1380 
1381 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1382  struct segmented_address addr,
1383  void *data,
1384  unsigned size)
1385 {
1386  int rc;
1387  ulong linear;
1388 
1389  rc = linearize(ctxt, addr, size, false, &linear);
1390  if (rc != X86EMUL_CONTINUE)
1391  return rc;
1392  return read_emulated(ctxt, linear, data, size);
1393 }
1394 
1395 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1396  struct segmented_address addr,
1397  const void *data,
1398  unsigned size)
1399 {
1400  int rc;
1401  ulong linear;
1402 
1403  rc = linearize(ctxt, addr, size, true, &linear);
1404  if (rc != X86EMUL_CONTINUE)
1405  return rc;
1406  return ctxt->ops->write_emulated(ctxt, linear, data, size,
1407  &ctxt->exception);
1408 }
1409 
1410 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1411  struct segmented_address addr,
1412  const void *orig_data, const void *data,
1413  unsigned size)
1414 {
1415  int rc;
1416  ulong linear;
1417 
1418  rc = linearize(ctxt, addr, size, true, &linear);
1419  if (rc != X86EMUL_CONTINUE)
1420  return rc;
1421  return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1422  size, &ctxt->exception);
1423 }
1424 
1425 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1426  unsigned int size, unsigned short port,
1427  void *dest)
1428 {
1429  struct read_cache *rc = &ctxt->io_read;
1430 
1431  if (rc->pos == rc->end) { /* refill pio read ahead */
1432  unsigned int in_page, n;
1433  unsigned int count = ctxt->rep_prefix ?
1434  address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1435  in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1436  offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1437  PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1438  n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1439  if (n == 0)
1440  n = 1;
1441  rc->pos = rc->end = 0;
1442  if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1443  return 0;
1444  rc->end = n * size;
1445  }
1446 
1447  if (ctxt->rep_prefix && (ctxt->d & String) &&
1448  !(ctxt->eflags & X86_EFLAGS_DF)) {
1449  ctxt->dst.data = rc->data + rc->pos;
1450  ctxt->dst.type = OP_MEM_STR;
1451  ctxt->dst.count = (rc->end - rc->pos) / size;
1452  rc->pos = rc->end;
1453  } else {
1454  memcpy(dest, rc->data + rc->pos, size);
1455  rc->pos += size;
1456  }
1457  return 1;
1458 }
1459 
1461  u16 index, struct desc_struct *desc)
1462 {
1463  struct desc_ptr dt;
1464  ulong addr;
1465 
1466  ctxt->ops->get_idt(ctxt, &dt);
1467 
1468  if (dt.size < index * 8 + 7)
1469  return emulate_gp(ctxt, index << 3 | 0x2);
1470 
1471  addr = dt.address + index * 8;
1472  return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1473 }
1474 
1476  u16 selector, struct desc_ptr *dt)
1477 {
1478  const struct x86_emulate_ops *ops = ctxt->ops;
1479  u32 base3 = 0;
1480 
1481  if (selector & 1 << 2) {
1482  struct desc_struct desc;
1483  u16 sel;
1484 
1485  memset(dt, 0, sizeof(*dt));
1486  if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1487  VCPU_SREG_LDTR))
1488  return;
1489 
1490  dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1491  dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1492  } else
1493  ops->get_gdt(ctxt, dt);
1494 }
1495 
1496 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1497  u16 selector, ulong *desc_addr_p)
1498 {
1499  struct desc_ptr dt;
1500  u16 index = selector >> 3;
1501  ulong addr;
1502 
1503  get_descriptor_table_ptr(ctxt, selector, &dt);
1504 
1505  if (dt.size < index * 8 + 7)
1506  return emulate_gp(ctxt, selector & 0xfffc);
1507 
1508  addr = dt.address + index * 8;
1509 
1510 #ifdef CONFIG_X86_64
1511  if (addr >> 32 != 0) {
1512  u64 efer = 0;
1513 
1514  ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1515  if (!(efer & EFER_LMA))
1516  addr &= (u32)-1;
1517  }
1518 #endif
1519 
1520  *desc_addr_p = addr;
1521  return X86EMUL_CONTINUE;
1522 }
1523 
1524 /* allowed just for 8 bytes segments */
1526  u16 selector, struct desc_struct *desc,
1527  ulong *desc_addr_p)
1528 {
1529  int rc;
1530 
1531  rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1532  if (rc != X86EMUL_CONTINUE)
1533  return rc;
1534 
1535  return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1536 }
1537 
1538 /* allowed just for 8 bytes segments */
1540  u16 selector, struct desc_struct *desc)
1541 {
1542  int rc;
1543  ulong addr;
1544 
1545  rc = get_descriptor_ptr(ctxt, selector, &addr);
1546  if (rc != X86EMUL_CONTINUE)
1547  return rc;
1548 
1549  return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1550 }
1551 
1553  u16 selector, int seg, u8 cpl,
1554  enum x86_transfer_type transfer,
1555  struct desc_struct *desc)
1556 {
1557  struct desc_struct seg_desc, old_desc;
1558  u8 dpl, rpl;
1559  unsigned err_vec = GP_VECTOR;
1560  u32 err_code = 0;
1561  bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1562  ulong desc_addr;
1563  int ret;
1564  u16 dummy;
1565  u32 base3 = 0;
1566 
1567  memset(&seg_desc, 0, sizeof(seg_desc));
1568 
1569  if (ctxt->mode == X86EMUL_MODE_REAL) {
1570  /* set real mode segment descriptor (keep limit etc. for
1571  * unreal mode) */
1572  ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1573  set_desc_base(&seg_desc, selector << 4);
1574  goto load;
1575  } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1576  /* VM86 needs a clean new segment descriptor */
1577  set_desc_base(&seg_desc, selector << 4);
1578  set_desc_limit(&seg_desc, 0xffff);
1579  seg_desc.type = 3;
1580  seg_desc.p = 1;
1581  seg_desc.s = 1;
1582  seg_desc.dpl = 3;
1583  goto load;
1584  }
1585 
1586  rpl = selector & 3;
1587 
1588  /* TR should be in GDT only */
1589  if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1590  goto exception;
1591 
1592  /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1593  if (null_selector) {
1594  if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1595  goto exception;
1596 
1597  if (seg == VCPU_SREG_SS) {
1598  if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1599  goto exception;
1600 
1601  /*
1602  * ctxt->ops->set_segment expects the CPL to be in
1603  * SS.DPL, so fake an expand-up 32-bit data segment.
1604  */
1605  seg_desc.type = 3;
1606  seg_desc.p = 1;
1607  seg_desc.s = 1;
1608  seg_desc.dpl = cpl;
1609  seg_desc.d = 1;
1610  seg_desc.g = 1;
1611  }
1612 
1613  /* Skip all following checks */
1614  goto load;
1615  }
1616 
1617  ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1618  if (ret != X86EMUL_CONTINUE)
1619  return ret;
1620 
1621  err_code = selector & 0xfffc;
1622  err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1623  GP_VECTOR;
1624 
1625  /* can't load system descriptor into segment selector */
1626  if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1627  if (transfer == X86_TRANSFER_CALL_JMP)
1628  return X86EMUL_UNHANDLEABLE;
1629  goto exception;
1630  }
1631 
1632  dpl = seg_desc.dpl;
1633 
1634  switch (seg) {
1635  case VCPU_SREG_SS:
1636  /*
1637  * segment is not a writable data segment or segment
1638  * selector's RPL != CPL or DPL != CPL
1639  */
1640  if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1641  goto exception;
1642  break;
1643  case VCPU_SREG_CS:
1644  /*
1645  * KVM uses "none" when loading CS as part of emulating Real
1646  * Mode exceptions and IRET (handled above). In all other
1647  * cases, loading CS without a control transfer is a KVM bug.
1648  */
1649  if (WARN_ON_ONCE(transfer == X86_TRANSFER_NONE))
1650  goto exception;
1651 
1652  if (!(seg_desc.type & 8))
1653  goto exception;
1654 
1655  if (transfer == X86_TRANSFER_RET) {
1656  /* RET can never return to an inner privilege level. */
1657  if (rpl < cpl)
1658  goto exception;
1659  /* Outer-privilege level return is not implemented */
1660  if (rpl > cpl)
1661  return X86EMUL_UNHANDLEABLE;
1662  }
1663  if (transfer == X86_TRANSFER_RET || transfer == X86_TRANSFER_TASK_SWITCH) {
1664  if (seg_desc.type & 4) {
1665  /* conforming */
1666  if (dpl > rpl)
1667  goto exception;
1668  } else {
1669  /* nonconforming */
1670  if (dpl != rpl)
1671  goto exception;
1672  }
1673  } else { /* X86_TRANSFER_CALL_JMP */
1674  if (seg_desc.type & 4) {
1675  /* conforming */
1676  if (dpl > cpl)
1677  goto exception;
1678  } else {
1679  /* nonconforming */
1680  if (rpl > cpl || dpl != cpl)
1681  goto exception;
1682  }
1683  }
1684  /* in long-mode d/b must be clear if l is set */
1685  if (seg_desc.d && seg_desc.l) {
1686  u64 efer = 0;
1687 
1688  ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1689  if (efer & EFER_LMA)
1690  goto exception;
1691  }
1692 
1693  /* CS(RPL) <- CPL */
1694  selector = (selector & 0xfffc) | cpl;
1695  break;
1696  case VCPU_SREG_TR:
1697  if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1698  goto exception;
1699  break;
1700  case VCPU_SREG_LDTR:
1701  if (seg_desc.s || seg_desc.type != 2)
1702  goto exception;
1703  break;
1704  default: /* DS, ES, FS, or GS */
1705  /*
1706  * segment is not a data or readable code segment or
1707  * ((segment is a data or nonconforming code segment)
1708  * and ((RPL > DPL) or (CPL > DPL)))
1709  */
1710  if ((seg_desc.type & 0xa) == 0x8 ||
1711  (((seg_desc.type & 0xc) != 0xc) &&
1712  (rpl > dpl || cpl > dpl)))
1713  goto exception;
1714  break;
1715  }
1716 
1717  if (!seg_desc.p) {
1718  err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1719  goto exception;
1720  }
1721 
1722  if (seg_desc.s) {
1723  /* mark segment as accessed */
1724  if (!(seg_desc.type & 1)) {
1725  seg_desc.type |= 1;
1726  ret = write_segment_descriptor(ctxt, selector,
1727  &seg_desc);
1728  if (ret != X86EMUL_CONTINUE)
1729  return ret;
1730  }
1731  } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1732  ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1733  if (ret != X86EMUL_CONTINUE)
1734  return ret;
1735  if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1736  ((u64)base3 << 32), ctxt))
1737  return emulate_gp(ctxt, err_code);
1738  }
1739 
1740  if (seg == VCPU_SREG_TR) {
1741  old_desc = seg_desc;
1742  seg_desc.type |= 2; /* busy */
1743  ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1744  sizeof(seg_desc), &ctxt->exception);
1745  if (ret != X86EMUL_CONTINUE)
1746  return ret;
1747  }
1748 load:
1749  ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1750  if (desc)
1751  *desc = seg_desc;
1752  return X86EMUL_CONTINUE;
1753 exception:
1754  return emulate_exception(ctxt, err_vec, err_code, true);
1755 }
1756 
1758  u16 selector, int seg)
1759 {
1760  u8 cpl = ctxt->ops->cpl(ctxt);
1761 
1762  /*
1763  * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1764  * they can load it at CPL<3 (Intel's manual says only LSS can,
1765  * but it's wrong).
1766  *
1767  * However, the Intel manual says that putting IST=1/DPL=3 in
1768  * an interrupt gate will result in SS=3 (the AMD manual instead
1769  * says it doesn't), so allow SS=3 in __load_segment_descriptor
1770  * and only forbid it here.
1771  */
1772  if (seg == VCPU_SREG_SS && selector == 3 &&
1773  ctxt->mode == X86EMUL_MODE_PROT64)
1774  return emulate_exception(ctxt, GP_VECTOR, 0, true);
1775 
1776  return __load_segment_descriptor(ctxt, selector, seg, cpl,
1777  X86_TRANSFER_NONE, NULL);
1778 }
1779 
1780 static void write_register_operand(struct operand *op)
1781 {
1782  return assign_register(op->addr.reg, op->val, op->bytes);
1783 }
1784 
1785 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1786 {
1787  switch (op->type) {
1788  case OP_REG:
1790  break;
1791  case OP_MEM:
1792  if (ctxt->lock_prefix)
1793  return segmented_cmpxchg(ctxt,
1794  op->addr.mem,
1795  &op->orig_val,
1796  &op->val,
1797  op->bytes);
1798  else
1799  return segmented_write(ctxt,
1800  op->addr.mem,
1801  &op->val,
1802  op->bytes);
1803  case OP_MEM_STR:
1804  return segmented_write(ctxt,
1805  op->addr.mem,
1806  op->data,
1807  op->bytes * op->count);
1808  case OP_XMM:
1809  kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1810  break;
1811  case OP_MM:
1812  kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1813  break;
1814  case OP_NONE:
1815  /* no writeback */
1816  break;
1817  default:
1818  break;
1819  }
1820  return X86EMUL_CONTINUE;
1821 }
1822 
1823 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1824 {
1825  struct segmented_address addr;
1826 
1827  rsp_increment(ctxt, -bytes);
1828  addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1829  addr.seg = VCPU_SREG_SS;
1830 
1831  return segmented_write(ctxt, addr, data, bytes);
1832 }
1833 
1834 static int em_push(struct x86_emulate_ctxt *ctxt)
1835 {
1836  /* Disable writeback. */
1837  ctxt->dst.type = OP_NONE;
1838  return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1839 }
1840 
1841 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1842  void *dest, int len)
1843 {
1844  int rc;
1845  struct segmented_address addr;
1846 
1847  addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1848  addr.seg = VCPU_SREG_SS;
1849  rc = segmented_read(ctxt, addr, dest, len);
1850  if (rc != X86EMUL_CONTINUE)
1851  return rc;
1852 
1853  rsp_increment(ctxt, len);
1854  return rc;
1855 }
1856 
1857 static int em_pop(struct x86_emulate_ctxt *ctxt)
1858 {
1859  return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1860 }
1861 
1862 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1863  void *dest, int len)
1864 {
1865  int rc;
1866  unsigned long val, change_mask;
1867  int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1868  int cpl = ctxt->ops->cpl(ctxt);
1869 
1870  rc = emulate_pop(ctxt, &val, len);
1871  if (rc != X86EMUL_CONTINUE)
1872  return rc;
1873 
1874  change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1875  X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1876  X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1877  X86_EFLAGS_AC | X86_EFLAGS_ID;
1878 
1879  switch(ctxt->mode) {
1880  case X86EMUL_MODE_PROT64:
1881  case X86EMUL_MODE_PROT32:
1882  case X86EMUL_MODE_PROT16:
1883  if (cpl == 0)
1884  change_mask |= X86_EFLAGS_IOPL;
1885  if (cpl <= iopl)
1886  change_mask |= X86_EFLAGS_IF;
1887  break;
1888  case X86EMUL_MODE_VM86:
1889  if (iopl < 3)
1890  return emulate_gp(ctxt, 0);
1891  change_mask |= X86_EFLAGS_IF;
1892  break;
1893  default: /* real mode */
1894  change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1895  break;
1896  }
1897 
1898  *(unsigned long *)dest =
1899  (ctxt->eflags & ~change_mask) | (val & change_mask);
1900 
1901  return rc;
1902 }
1903 
1904 static int em_popf(struct x86_emulate_ctxt *ctxt)
1905 {
1906  ctxt->dst.type = OP_REG;
1907  ctxt->dst.addr.reg = &ctxt->eflags;
1908  ctxt->dst.bytes = ctxt->op_bytes;
1909  return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1910 }
1911 
1912 static int em_enter(struct x86_emulate_ctxt *ctxt)
1913 {
1914  int rc;
1915  unsigned frame_size = ctxt->src.val;
1916  unsigned nesting_level = ctxt->src2.val & 31;
1917  ulong rbp;
1918 
1919  if (nesting_level)
1920  return X86EMUL_UNHANDLEABLE;
1921 
1922  rbp = reg_read(ctxt, VCPU_REGS_RBP);
1923  rc = push(ctxt, &rbp, stack_size(ctxt));
1924  if (rc != X86EMUL_CONTINUE)
1925  return rc;
1926  assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1927  stack_mask(ctxt));
1928  assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1929  reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1930  stack_mask(ctxt));
1931  return X86EMUL_CONTINUE;
1932 }
1933 
1934 static int em_leave(struct x86_emulate_ctxt *ctxt)
1935 {
1936  assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1937  stack_mask(ctxt));
1938  return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1939 }
1940 
1941 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1942 {
1943  int seg = ctxt->src2.val;
1944 
1945  ctxt->src.val = get_segment_selector(ctxt, seg);
1946  if (ctxt->op_bytes == 4) {
1947  rsp_increment(ctxt, -2);
1948  ctxt->op_bytes = 2;
1949  }
1950 
1951  return em_push(ctxt);
1952 }
1953 
1954 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1955 {
1956  int seg = ctxt->src2.val;
1957  unsigned long selector;
1958  int rc;
1959 
1960  rc = emulate_pop(ctxt, &selector, 2);
1961  if (rc != X86EMUL_CONTINUE)
1962  return rc;
1963 
1964  if (seg == VCPU_SREG_SS)
1965  ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1966  if (ctxt->op_bytes > 2)
1967  rsp_increment(ctxt, ctxt->op_bytes - 2);
1968 
1969  rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1970  return rc;
1971 }
1972 
1973 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1974 {
1975  unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1976  int rc = X86EMUL_CONTINUE;
1977  int reg = VCPU_REGS_RAX;
1978 
1979  while (reg <= VCPU_REGS_RDI) {
1980  (reg == VCPU_REGS_RSP) ?
1981  (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1982 
1983  rc = em_push(ctxt);
1984  if (rc != X86EMUL_CONTINUE)
1985  return rc;
1986 
1987  ++reg;
1988  }
1989 
1990  return rc;
1991 }
1992 
1993 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1994 {
1995  ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1996  return em_push(ctxt);
1997 }
1998 
1999 static int em_popa(struct x86_emulate_ctxt *ctxt)
2000 {
2001  int rc = X86EMUL_CONTINUE;
2002  int reg = VCPU_REGS_RDI;
2003  u32 val;
2004 
2005  while (reg >= VCPU_REGS_RAX) {
2006  if (reg == VCPU_REGS_RSP) {
2007  rsp_increment(ctxt, ctxt->op_bytes);
2008  --reg;
2009  }
2010 
2011  rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
2012  if (rc != X86EMUL_CONTINUE)
2013  break;
2014  assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
2015  --reg;
2016  }
2017  return rc;
2018 }
2019 
2020 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2021 {
2022  const struct x86_emulate_ops *ops = ctxt->ops;
2023  int rc;
2024  struct desc_ptr dt;
2025  gva_t cs_addr;
2026  gva_t eip_addr;
2027  u16 cs, eip;
2028 
2029  /* TODO: Add limit checks */
2030  ctxt->src.val = ctxt->eflags;
2031  rc = em_push(ctxt);
2032  if (rc != X86EMUL_CONTINUE)
2033  return rc;
2034 
2035  ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2036 
2037  ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2038  rc = em_push(ctxt);
2039  if (rc != X86EMUL_CONTINUE)
2040  return rc;
2041 
2042  ctxt->src.val = ctxt->_eip;
2043  rc = em_push(ctxt);
2044  if (rc != X86EMUL_CONTINUE)
2045  return rc;
2046 
2047  ops->get_idt(ctxt, &dt);
2048 
2049  eip_addr = dt.address + (irq << 2);
2050  cs_addr = dt.address + (irq << 2) + 2;
2051 
2052  rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2053  if (rc != X86EMUL_CONTINUE)
2054  return rc;
2055 
2056  rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2057  if (rc != X86EMUL_CONTINUE)
2058  return rc;
2059 
2060  rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2061  if (rc != X86EMUL_CONTINUE)
2062  return rc;
2063 
2064  ctxt->_eip = eip;
2065 
2066  return rc;
2067 }
2068 
2069 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2070 {
2071  int rc;
2072 
2073  invalidate_registers(ctxt);
2074  rc = __emulate_int_real(ctxt, irq);
2075  if (rc == X86EMUL_CONTINUE)
2076  writeback_registers(ctxt);
2077  return rc;
2078 }
2079 
2080 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2081 {
2082  switch(ctxt->mode) {
2083  case X86EMUL_MODE_REAL:
2084  return __emulate_int_real(ctxt, irq);
2085  case X86EMUL_MODE_VM86:
2086  case X86EMUL_MODE_PROT16:
2087  case X86EMUL_MODE_PROT32:
2088  case X86EMUL_MODE_PROT64:
2089  default:
2090  /* Protected mode interrupts unimplemented yet */
2091  return X86EMUL_UNHANDLEABLE;
2092  }
2093 }
2094 
2095 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2096 {
2097  int rc = X86EMUL_CONTINUE;
2098  unsigned long temp_eip = 0;
2099  unsigned long temp_eflags = 0;
2100  unsigned long cs = 0;
2101  unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2102  X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2103  X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2104  X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2105  X86_EFLAGS_AC | X86_EFLAGS_ID |
2106  X86_EFLAGS_FIXED;
2107  unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2108  X86_EFLAGS_VIP;
2109 
2110  /* TODO: Add stack limit check */
2111 
2112  rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2113 
2114  if (rc != X86EMUL_CONTINUE)
2115  return rc;
2116 
2117  if (temp_eip & ~0xffff)
2118  return emulate_gp(ctxt, 0);
2119 
2120  rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2121 
2122  if (rc != X86EMUL_CONTINUE)
2123  return rc;
2124 
2125  rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2126 
2127  if (rc != X86EMUL_CONTINUE)
2128  return rc;
2129 
2130  rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2131 
2132  if (rc != X86EMUL_CONTINUE)
2133  return rc;
2134 
2135  ctxt->_eip = temp_eip;
2136 
2137  if (ctxt->op_bytes == 4)
2138  ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2139  else if (ctxt->op_bytes == 2) {
2140  ctxt->eflags &= ~0xffff;
2141  ctxt->eflags |= temp_eflags;
2142  }
2143 
2144  ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2145  ctxt->eflags |= X86_EFLAGS_FIXED;
2146  ctxt->ops->set_nmi_mask(ctxt, false);
2147 
2148  return rc;
2149 }
2150 
2151 static int em_iret(struct x86_emulate_ctxt *ctxt)
2152 {
2153  switch(ctxt->mode) {
2154  case X86EMUL_MODE_REAL:
2155  return emulate_iret_real(ctxt);
2156  case X86EMUL_MODE_VM86:
2157  case X86EMUL_MODE_PROT16:
2158  case X86EMUL_MODE_PROT32:
2159  case X86EMUL_MODE_PROT64:
2160  default:
2161  /* iret from protected mode unimplemented yet */
2162  return X86EMUL_UNHANDLEABLE;
2163  }
2164 }
2165 
2166 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2167 {
2168  int rc;
2169  unsigned short sel;
2170  struct desc_struct new_desc;
2171  u8 cpl = ctxt->ops->cpl(ctxt);
2172 
2173  memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2174 
2175  rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2177  &new_desc);
2178  if (rc != X86EMUL_CONTINUE)
2179  return rc;
2180 
2181  rc = assign_eip_far(ctxt, ctxt->src.val);
2182  /* Error handling is not implemented. */
2183  if (rc != X86EMUL_CONTINUE)
2184  return X86EMUL_UNHANDLEABLE;
2185 
2186  return rc;
2187 }
2188 
2189 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2190 {
2191  return assign_eip_near(ctxt, ctxt->src.val);
2192 }
2193 
2194 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2195 {
2196  int rc;
2197  long int old_eip;
2198 
2199  old_eip = ctxt->_eip;
2200  rc = assign_eip_near(ctxt, ctxt->src.val);
2201  if (rc != X86EMUL_CONTINUE)
2202  return rc;
2203  ctxt->src.val = old_eip;
2204  rc = em_push(ctxt);
2205  return rc;
2206 }
2207 
2208 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2209 {
2210  u64 old = ctxt->dst.orig_val64;
2211 
2212  if (ctxt->dst.bytes == 16)
2213  return X86EMUL_UNHANDLEABLE;
2214 
2215  if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2216  ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2217  *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2218  *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2219  ctxt->eflags &= ~X86_EFLAGS_ZF;
2220  } else {
2221  ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2222  (u32) reg_read(ctxt, VCPU_REGS_RBX);
2223 
2224  ctxt->eflags |= X86_EFLAGS_ZF;
2225  }
2226  return X86EMUL_CONTINUE;
2227 }
2228 
2229 static int em_ret(struct x86_emulate_ctxt *ctxt)
2230 {
2231  int rc;
2232  unsigned long eip;
2233 
2234  rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2235  if (rc != X86EMUL_CONTINUE)
2236  return rc;
2237 
2238  return assign_eip_near(ctxt, eip);
2239 }
2240 
2241 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2242 {
2243  int rc;
2244  unsigned long eip, cs;
2245  int cpl = ctxt->ops->cpl(ctxt);
2246  struct desc_struct new_desc;
2247 
2248  rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2249  if (rc != X86EMUL_CONTINUE)
2250  return rc;
2251  rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2252  if (rc != X86EMUL_CONTINUE)
2253  return rc;
2254  rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2256  &new_desc);
2257  if (rc != X86EMUL_CONTINUE)
2258  return rc;
2259  rc = assign_eip_far(ctxt, eip);
2260  /* Error handling is not implemented. */
2261  if (rc != X86EMUL_CONTINUE)
2262  return X86EMUL_UNHANDLEABLE;
2263 
2264  return rc;
2265 }
2266 
2267 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2268 {
2269  int rc;
2270 
2271  rc = em_ret_far(ctxt);
2272  if (rc != X86EMUL_CONTINUE)
2273  return rc;
2274  rsp_increment(ctxt, ctxt->src.val);
2275  return X86EMUL_CONTINUE;
2276 }
2277 
2278 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2279 {
2280  /* Save real source value, then compare EAX against destination. */
2281  ctxt->dst.orig_val = ctxt->dst.val;
2282  ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2283  ctxt->src.orig_val = ctxt->src.val;
2284  ctxt->src.val = ctxt->dst.orig_val;
2285  fastop(ctxt, em_cmp);
2286 
2287  if (ctxt->eflags & X86_EFLAGS_ZF) {
2288  /* Success: write back to memory; no update of EAX */
2289  ctxt->src.type = OP_NONE;
2290  ctxt->dst.val = ctxt->src.orig_val;
2291  } else {
2292  /* Failure: write the value we saw to EAX. */
2293  ctxt->src.type = OP_REG;
2294  ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2295  ctxt->src.val = ctxt->dst.orig_val;
2296  /* Create write-cycle to dest by writing the same value */
2297  ctxt->dst.val = ctxt->dst.orig_val;
2298  }
2299  return X86EMUL_CONTINUE;
2300 }
2301 
2302 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2303 {
2304  int seg = ctxt->src2.val;
2305  unsigned short sel;
2306  int rc;
2307 
2308  memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2309 
2310  rc = load_segment_descriptor(ctxt, sel, seg);
2311  if (rc != X86EMUL_CONTINUE)
2312  return rc;
2313 
2314  ctxt->dst.val = ctxt->src.val;
2315  return rc;
2316 }
2317 
2318 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2319 {
2320  if (!ctxt->ops->is_smm(ctxt))
2321  return emulate_ud(ctxt);
2322 
2323  if (ctxt->ops->leave_smm(ctxt))
2324  ctxt->ops->triple_fault(ctxt);
2325 
2326  return emulator_recalc_and_set_mode(ctxt);
2327 }
2328 
2329 static void
2330 setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
2331 {
2332  cs->l = 0; /* will be adjusted later */
2333  set_desc_base(cs, 0); /* flat segment */
2334  cs->g = 1; /* 4kb granularity */
2335  set_desc_limit(cs, 0xfffff); /* 4GB limit */
2336  cs->type = 0x0b; /* Read, Execute, Accessed */
2337  cs->s = 1;
2338  cs->dpl = 0; /* will be adjusted later */
2339  cs->p = 1;
2340  cs->d = 1;
2341  cs->avl = 0;
2342 
2343  set_desc_base(ss, 0); /* flat segment */
2344  set_desc_limit(ss, 0xfffff); /* 4GB limit */
2345  ss->g = 1; /* 4kb granularity */
2346  ss->s = 1;
2347  ss->type = 0x03; /* Read/Write, Accessed */
2348  ss->d = 1; /* 32bit stack segment */
2349  ss->dpl = 0;
2350  ss->p = 1;
2351  ss->l = 0;
2352  ss->avl = 0;
2353 }
2354 
2355 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2356 {
2357  u32 eax, ebx, ecx, edx;
2358 
2359  eax = ecx = 0;
2360  ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2361  return is_guest_vendor_intel(ebx, ecx, edx);
2362 }
2363 
2364 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2365 {
2366  const struct x86_emulate_ops *ops = ctxt->ops;
2367  u32 eax, ebx, ecx, edx;
2368 
2369  /*
2370  * syscall should always be enabled in longmode - so only become
2371  * vendor specific (cpuid) if other modes are active...
2372  */
2373  if (ctxt->mode == X86EMUL_MODE_PROT64)
2374  return true;
2375 
2376  eax = 0x00000000;
2377  ecx = 0x00000000;
2378  ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2379  /*
2380  * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2381  * 64bit guest with a 32bit compat-app running will #UD !! While this
2382  * behaviour can be fixed (by emulating) into AMD response - CPUs of
2383  * AMD can't behave like Intel.
2384  */
2385  if (is_guest_vendor_intel(ebx, ecx, edx))
2386  return false;
2387 
2388  if (is_guest_vendor_amd(ebx, ecx, edx) ||
2389  is_guest_vendor_hygon(ebx, ecx, edx))
2390  return true;
2391 
2392  /*
2393  * default: (not Intel, not AMD, not Hygon), apply Intel's
2394  * stricter rules...
2395  */
2396  return false;
2397 }
2398 
2399 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2400 {
2401  const struct x86_emulate_ops *ops = ctxt->ops;
2402  struct desc_struct cs, ss;
2403  u64 msr_data;
2404  u16 cs_sel, ss_sel;
2405  u64 efer = 0;
2406 
2407  /* syscall is not available in real mode */
2408  if (ctxt->mode == X86EMUL_MODE_REAL ||
2409  ctxt->mode == X86EMUL_MODE_VM86)
2410  return emulate_ud(ctxt);
2411 
2412  if (!(em_syscall_is_enabled(ctxt)))
2413  return emulate_ud(ctxt);
2414 
2415  ops->get_msr(ctxt, MSR_EFER, &efer);
2416  if (!(efer & EFER_SCE))
2417  return emulate_ud(ctxt);
2418 
2419  setup_syscalls_segments(&cs, &ss);
2420  ops->get_msr(ctxt, MSR_STAR, &msr_data);
2421  msr_data >>= 32;
2422  cs_sel = (u16)(msr_data & 0xfffc);
2423  ss_sel = (u16)(msr_data + 8);
2424 
2425  if (efer & EFER_LMA) {
2426  cs.d = 0;
2427  cs.l = 1;
2428  }
2429  ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2430  ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2431 
2432  *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2433  if (efer & EFER_LMA) {
2434 #ifdef CONFIG_X86_64
2435  *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2436 
2437  ops->get_msr(ctxt,
2438  ctxt->mode == X86EMUL_MODE_PROT64 ?
2439  MSR_LSTAR : MSR_CSTAR, &msr_data);
2440  ctxt->_eip = msr_data;
2441 
2442  ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2443  ctxt->eflags &= ~msr_data;
2444  ctxt->eflags |= X86_EFLAGS_FIXED;
2445 #endif
2446  } else {
2447  /* legacy mode */
2448  ops->get_msr(ctxt, MSR_STAR, &msr_data);
2449  ctxt->_eip = (u32)msr_data;
2450 
2451  ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2452  }
2453 
2454  ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2455  return X86EMUL_CONTINUE;
2456 }
2457 
2458 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2459 {
2460  const struct x86_emulate_ops *ops = ctxt->ops;
2461  struct desc_struct cs, ss;
2462  u64 msr_data;
2463  u16 cs_sel, ss_sel;
2464  u64 efer = 0;
2465 
2466  ops->get_msr(ctxt, MSR_EFER, &efer);
2467  /* inject #GP if in real mode */
2468  if (ctxt->mode == X86EMUL_MODE_REAL)
2469  return emulate_gp(ctxt, 0);
2470 
2471  /*
2472  * Not recognized on AMD in compat mode (but is recognized in legacy
2473  * mode).
2474  */
2475  if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2476  && !vendor_intel(ctxt))
2477  return emulate_ud(ctxt);
2478 
2479  /* sysenter/sysexit have not been tested in 64bit mode. */
2480  if (ctxt->mode == X86EMUL_MODE_PROT64)
2481  return X86EMUL_UNHANDLEABLE;
2482 
2483  ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2484  if ((msr_data & 0xfffc) == 0x0)
2485  return emulate_gp(ctxt, 0);
2486 
2487  setup_syscalls_segments(&cs, &ss);
2488  ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2489  cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2490  ss_sel = cs_sel + 8;
2491  if (efer & EFER_LMA) {
2492  cs.d = 0;
2493  cs.l = 1;
2494  }
2495 
2496  ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2497  ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2498 
2499  ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2500  ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2501 
2502  ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2503  *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2504  (u32)msr_data;
2505  if (efer & EFER_LMA)
2506  ctxt->mode = X86EMUL_MODE_PROT64;
2507 
2508  return X86EMUL_CONTINUE;
2509 }
2510 
2511 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2512 {
2513  const struct x86_emulate_ops *ops = ctxt->ops;
2514  struct desc_struct cs, ss;
2515  u64 msr_data, rcx, rdx;
2516  int usermode;
2517  u16 cs_sel = 0, ss_sel = 0;
2518 
2519  /* inject #GP if in real mode or Virtual 8086 mode */
2520  if (ctxt->mode == X86EMUL_MODE_REAL ||
2521  ctxt->mode == X86EMUL_MODE_VM86)
2522  return emulate_gp(ctxt, 0);
2523 
2524  setup_syscalls_segments(&cs, &ss);
2525 
2526  if ((ctxt->rex_prefix & 0x8) != 0x0)
2527  usermode = X86EMUL_MODE_PROT64;
2528  else
2529  usermode = X86EMUL_MODE_PROT32;
2530 
2531  rcx = reg_read(ctxt, VCPU_REGS_RCX);
2532  rdx = reg_read(ctxt, VCPU_REGS_RDX);
2533 
2534  cs.dpl = 3;
2535  ss.dpl = 3;
2536  ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2537  switch (usermode) {
2538  case X86EMUL_MODE_PROT32:
2539  cs_sel = (u16)(msr_data + 16);
2540  if ((msr_data & 0xfffc) == 0x0)
2541  return emulate_gp(ctxt, 0);
2542  ss_sel = (u16)(msr_data + 24);
2543  rcx = (u32)rcx;
2544  rdx = (u32)rdx;
2545  break;
2546  case X86EMUL_MODE_PROT64:
2547  cs_sel = (u16)(msr_data + 32);
2548  if (msr_data == 0x0)
2549  return emulate_gp(ctxt, 0);
2550  ss_sel = cs_sel + 8;
2551  cs.d = 0;
2552  cs.l = 1;
2553  if (emul_is_noncanonical_address(rcx, ctxt) ||
2554  emul_is_noncanonical_address(rdx, ctxt))
2555  return emulate_gp(ctxt, 0);
2556  break;
2557  }
2558  cs_sel |= SEGMENT_RPL_MASK;
2559  ss_sel |= SEGMENT_RPL_MASK;
2560 
2561  ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2562  ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2563 
2564  ctxt->_eip = rdx;
2565  ctxt->mode = usermode;
2566  *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2567 
2568  return X86EMUL_CONTINUE;
2569 }
2570 
2571 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2572 {
2573  int iopl;
2574  if (ctxt->mode == X86EMUL_MODE_REAL)
2575  return false;
2576  if (ctxt->mode == X86EMUL_MODE_VM86)
2577  return true;
2578  iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2579  return ctxt->ops->cpl(ctxt) > iopl;
2580 }
2581 
2582 #define VMWARE_PORT_VMPORT (0x5658)
2583 #define VMWARE_PORT_VMRPC (0x5659)
2584 
2586  u16 port, u16 len)
2587 {
2588  const struct x86_emulate_ops *ops = ctxt->ops;
2589  struct desc_struct tr_seg;
2590  u32 base3;
2591  int r;
2592  u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2593  unsigned mask = (1 << len) - 1;
2594  unsigned long base;
2595 
2596  /*
2597  * VMware allows access to these ports even if denied
2598  * by TSS I/O permission bitmap. Mimic behavior.
2599  */
2600  if (enable_vmware_backdoor &&
2601  ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2602  return true;
2603 
2604  ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2605  if (!tr_seg.p)
2606  return false;
2607  if (desc_limit_scaled(&tr_seg) < 103)
2608  return false;
2609  base = get_desc_base(&tr_seg);
2610 #ifdef CONFIG_X86_64
2611  base |= ((u64)base3) << 32;
2612 #endif
2613  r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2614  if (r != X86EMUL_CONTINUE)
2615  return false;
2616  if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2617  return false;
2618  r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2619  if (r != X86EMUL_CONTINUE)
2620  return false;
2621  if ((perm >> bit_idx) & mask)
2622  return false;
2623  return true;
2624 }
2625 
2626 static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt,
2627  u16 port, u16 len)
2628 {
2629  if (ctxt->perm_ok)
2630  return true;
2631 
2632  if (emulator_bad_iopl(ctxt))
2633  if (!emulator_io_port_access_allowed(ctxt, port, len))
2634  return false;
2635 
2636  ctxt->perm_ok = true;
2637 
2638  return true;
2639 }
2640 
2641 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2642 {
2643  /*
2644  * Intel CPUs mask the counter and pointers in quite strange
2645  * manner when ECX is zero due to REP-string optimizations.
2646  */
2647 #ifdef CONFIG_X86_64
2648  if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2649  return;
2650 
2651  *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2652 
2653  switch (ctxt->b) {
2654  case 0xa4: /* movsb */
2655  case 0xa5: /* movsd/w */
2656  *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2657  fallthrough;
2658  case 0xaa: /* stosb */
2659  case 0xab: /* stosd/w */
2660  *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2661  }
2662 #endif
2663 }
2664 
2665 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2666  struct tss_segment_16 *tss)
2667 {
2668  tss->ip = ctxt->_eip;
2669  tss->flag = ctxt->eflags;
2670  tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2671  tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2672  tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2673  tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2674  tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2675  tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2676  tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2677  tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2678 
2679  tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2680  tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2681  tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2682  tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2683  tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2684 }
2685 
2686 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2687  struct tss_segment_16 *tss)
2688 {
2689  int ret;
2690  u8 cpl;
2691 
2692  ctxt->_eip = tss->ip;
2693  ctxt->eflags = tss->flag | 2;
2694  *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2695  *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2696  *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2697  *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2698  *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2699  *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2700  *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2701  *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2702 
2703  /*
2704  * SDM says that segment selectors are loaded before segment
2705  * descriptors
2706  */
2707  set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2708  set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2709  set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2710  set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2711  set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2712 
2713  cpl = tss->cs & 3;
2714 
2715  /*
2716  * Now load segment descriptors. If fault happens at this stage
2717  * it is handled in a context of new task
2718  */
2719  ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2720  X86_TRANSFER_TASK_SWITCH, NULL);
2721  if (ret != X86EMUL_CONTINUE)
2722  return ret;
2723  ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2724  X86_TRANSFER_TASK_SWITCH, NULL);
2725  if (ret != X86EMUL_CONTINUE)
2726  return ret;
2727  ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2728  X86_TRANSFER_TASK_SWITCH, NULL);
2729  if (ret != X86EMUL_CONTINUE)
2730  return ret;
2731  ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2732  X86_TRANSFER_TASK_SWITCH, NULL);
2733  if (ret != X86EMUL_CONTINUE)
2734  return ret;
2735  ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2736  X86_TRANSFER_TASK_SWITCH, NULL);
2737  if (ret != X86EMUL_CONTINUE)
2738  return ret;
2739 
2740  return X86EMUL_CONTINUE;
2741 }
2742 
2743 static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
2744  ulong old_tss_base, struct desc_struct *new_desc)
2745 {
2746  struct tss_segment_16 tss_seg;
2747  int ret;
2748  u32 new_tss_base = get_desc_base(new_desc);
2749 
2750  ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
2751  if (ret != X86EMUL_CONTINUE)
2752  return ret;
2753 
2754  save_state_to_tss16(ctxt, &tss_seg);
2755 
2756  ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
2757  if (ret != X86EMUL_CONTINUE)
2758  return ret;
2759 
2760  ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
2761  if (ret != X86EMUL_CONTINUE)
2762  return ret;
2763 
2764  if (old_tss_sel != 0xffff) {
2765  tss_seg.prev_task_link = old_tss_sel;
2766 
2767  ret = linear_write_system(ctxt, new_tss_base,
2768  &tss_seg.prev_task_link,
2769  sizeof(tss_seg.prev_task_link));
2770  if (ret != X86EMUL_CONTINUE)
2771  return ret;
2772  }
2773 
2774  return load_state_from_tss16(ctxt, &tss_seg);
2775 }
2776 
2777 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2778  struct tss_segment_32 *tss)
2779 {
2780  /* CR3 and ldt selector are not saved intentionally */
2781  tss->eip = ctxt->_eip;
2782  tss->eflags = ctxt->eflags;
2783  tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2784  tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2785  tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2786  tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2787  tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2788  tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2789  tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2790  tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2791 
2792  tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2793  tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2794  tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2795  tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2796  tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2797  tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2798 }
2799 
2800 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2801  struct tss_segment_32 *tss)
2802 {
2803  int ret;
2804  u8 cpl;
2805 
2806  if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2807  return emulate_gp(ctxt, 0);
2808  ctxt->_eip = tss->eip;
2809  ctxt->eflags = tss->eflags | 2;
2810 
2811  /* General purpose registers */
2812  *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2813  *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2814  *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2815  *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2816  *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2817  *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2818  *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2819  *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2820 
2821  /*
2822  * SDM says that segment selectors are loaded before segment
2823  * descriptors. This is important because CPL checks will
2824  * use CS.RPL.
2825  */
2826  set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2827  set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2828  set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2829  set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2830  set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2831  set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2832  set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2833 
2834  /*
2835  * If we're switching between Protected Mode and VM86, we need to make
2836  * sure to update the mode before loading the segment descriptors so
2837  * that the selectors are interpreted correctly.
2838  */
2839  if (ctxt->eflags & X86_EFLAGS_VM) {
2840  ctxt->mode = X86EMUL_MODE_VM86;
2841  cpl = 3;
2842  } else {
2843  ctxt->mode = X86EMUL_MODE_PROT32;
2844  cpl = tss->cs & 3;
2845  }
2846 
2847  /*
2848  * Now load segment descriptors. If fault happens at this stage
2849  * it is handled in a context of new task
2850  */
2851  ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2852  cpl, X86_TRANSFER_TASK_SWITCH, NULL);
2853  if (ret != X86EMUL_CONTINUE)
2854  return ret;
2855  ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2856  X86_TRANSFER_TASK_SWITCH, NULL);
2857  if (ret != X86EMUL_CONTINUE)
2858  return ret;
2859  ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2860  X86_TRANSFER_TASK_SWITCH, NULL);
2861  if (ret != X86EMUL_CONTINUE)
2862  return ret;
2863  ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2864  X86_TRANSFER_TASK_SWITCH, NULL);
2865  if (ret != X86EMUL_CONTINUE)
2866  return ret;
2867  ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2868  X86_TRANSFER_TASK_SWITCH, NULL);
2869  if (ret != X86EMUL_CONTINUE)
2870  return ret;
2871  ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2872  X86_TRANSFER_TASK_SWITCH, NULL);
2873  if (ret != X86EMUL_CONTINUE)
2874  return ret;
2875  ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2876  X86_TRANSFER_TASK_SWITCH, NULL);
2877 
2878  return ret;
2879 }
2880 
2881 static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel,
2882  ulong old_tss_base, struct desc_struct *new_desc)
2883 {
2884  struct tss_segment_32 tss_seg;
2885  int ret;
2886  u32 new_tss_base = get_desc_base(new_desc);
2887  u32 eip_offset = offsetof(struct tss_segment_32, eip);
2888  u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2889 
2890  ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
2891  if (ret != X86EMUL_CONTINUE)
2892  return ret;
2893 
2894  save_state_to_tss32(ctxt, &tss_seg);
2895 
2896  /* Only GP registers and segment selectors are saved */
2897  ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2898  ldt_sel_offset - eip_offset);
2899  if (ret != X86EMUL_CONTINUE)
2900  return ret;
2901 
2902  ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
2903  if (ret != X86EMUL_CONTINUE)
2904  return ret;
2905 
2906  if (old_tss_sel != 0xffff) {
2907  tss_seg.prev_task_link = old_tss_sel;
2908 
2909  ret = linear_write_system(ctxt, new_tss_base,
2910  &tss_seg.prev_task_link,
2911  sizeof(tss_seg.prev_task_link));
2912  if (ret != X86EMUL_CONTINUE)
2913  return ret;
2914  }
2915 
2916  return load_state_from_tss32(ctxt, &tss_seg);
2917 }
2918 
2920  u16 tss_selector, int idt_index, int reason,
2921  bool has_error_code, u32 error_code)
2922 {
2923  const struct x86_emulate_ops *ops = ctxt->ops;
2924  struct desc_struct curr_tss_desc, next_tss_desc;
2925  int ret;
2926  u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2927  ulong old_tss_base =
2928  ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2929  u32 desc_limit;
2930  ulong desc_addr, dr7;
2931 
2932  /* FIXME: old_tss_base == ~0 ? */
2933 
2934  ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2935  if (ret != X86EMUL_CONTINUE)
2936  return ret;
2937  ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2938  if (ret != X86EMUL_CONTINUE)
2939  return ret;
2940 
2941  /* FIXME: check that next_tss_desc is tss */
2942 
2943  /*
2944  * Check privileges. The three cases are task switch caused by...
2945  *
2946  * 1. jmp/call/int to task gate: Check against DPL of the task gate
2947  * 2. Exception/IRQ/iret: No check is performed
2948  * 3. jmp/call to TSS/task-gate: No check is performed since the
2949  * hardware checks it before exiting.
2950  */
2951  if (reason == TASK_SWITCH_GATE) {
2952  if (idt_index != -1) {
2953  /* Software interrupts */
2954  struct desc_struct task_gate_desc;
2955  int dpl;
2956 
2957  ret = read_interrupt_descriptor(ctxt, idt_index,
2958  &task_gate_desc);
2959  if (ret != X86EMUL_CONTINUE)
2960  return ret;
2961 
2962  dpl = task_gate_desc.dpl;
2963  if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2964  return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2965  }
2966  }
2967 
2968  desc_limit = desc_limit_scaled(&next_tss_desc);
2969  if (!next_tss_desc.p ||
2970  ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2971  desc_limit < 0x2b)) {
2972  return emulate_ts(ctxt, tss_selector & 0xfffc);
2973  }
2974 
2975  if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2976  curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2977  write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2978  }
2979 
2980  if (reason == TASK_SWITCH_IRET)
2981  ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2982 
2983  /* set back link to prev task only if NT bit is set in eflags
2984  note that old_tss_sel is not used after this point */
2985  if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2986  old_tss_sel = 0xffff;
2987 
2988  if (next_tss_desc.type & 8)
2989  ret = task_switch_32(ctxt, old_tss_sel, old_tss_base, &next_tss_desc);
2990  else
2991  ret = task_switch_16(ctxt, old_tss_sel,
2992  old_tss_base, &next_tss_desc);
2993  if (ret != X86EMUL_CONTINUE)
2994  return ret;
2995 
2996  if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2997  ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2998 
2999  if (reason != TASK_SWITCH_IRET) {
3000  next_tss_desc.type |= (1 << 1); /* set busy flag */
3001  write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3002  }
3003 
3004  ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3005  ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3006 
3007  if (has_error_code) {
3008  ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3009  ctxt->lock_prefix = 0;
3010  ctxt->src.val = (unsigned long) error_code;
3011  ret = em_push(ctxt);
3012  }
3013 
3014  ops->get_dr(ctxt, 7, &dr7);
3015  ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3016 
3017  return ret;
3018 }
3019 
3021  u16 tss_selector, int idt_index, int reason,
3022  bool has_error_code, u32 error_code)
3023 {
3024  int rc;
3025 
3026  invalidate_registers(ctxt);
3027  ctxt->_eip = ctxt->eip;
3028  ctxt->dst.type = OP_NONE;
3029 
3030  rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3031  has_error_code, error_code);
3032 
3033  if (rc == X86EMUL_CONTINUE) {
3034  ctxt->eip = ctxt->_eip;
3035  writeback_registers(ctxt);
3036  }
3037 
3039 }
3040 
3041 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3042  struct operand *op)
3043 {
3044  int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3045 
3046  register_address_increment(ctxt, reg, df * op->bytes);
3047  op->addr.mem.ea = register_address(ctxt, reg);
3048 }
3049 
3050 static int em_das(struct x86_emulate_ctxt *ctxt)
3051 {
3052  u8 al, old_al;
3053  bool af, cf, old_cf;
3054 
3055  cf = ctxt->eflags & X86_EFLAGS_CF;
3056  al = ctxt->dst.val;
3057 
3058  old_al = al;
3059  old_cf = cf;
3060  cf = false;
3061  af = ctxt->eflags & X86_EFLAGS_AF;
3062  if ((al & 0x0f) > 9 || af) {
3063  al -= 6;
3064  cf = old_cf | (al >= 250);
3065  af = true;
3066  } else {
3067  af = false;
3068  }
3069  if (old_al > 0x99 || old_cf) {
3070  al -= 0x60;
3071  cf = true;
3072  }
3073 
3074  ctxt->dst.val = al;
3075  /* Set PF, ZF, SF */
3076  ctxt->src.type = OP_IMM;
3077  ctxt->src.val = 0;
3078  ctxt->src.bytes = 1;
3079  fastop(ctxt, em_or);
3080  ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3081  if (cf)
3082  ctxt->eflags |= X86_EFLAGS_CF;
3083  if (af)
3084  ctxt->eflags |= X86_EFLAGS_AF;
3085  return X86EMUL_CONTINUE;
3086 }
3087 
3088 static int em_aam(struct x86_emulate_ctxt *ctxt)
3089 {
3090  u8 al, ah;
3091 
3092  if (ctxt->src.val == 0)
3093  return emulate_de(ctxt);
3094 
3095  al = ctxt->dst.val & 0xff;
3096  ah = al / ctxt->src.val;
3097  al %= ctxt->src.val;
3098 
3099  ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3100 
3101  /* Set PF, ZF, SF */
3102  ctxt->src.type = OP_IMM;
3103  ctxt->src.val = 0;
3104  ctxt->src.bytes = 1;
3105  fastop(ctxt, em_or);
3106 
3107  return X86EMUL_CONTINUE;
3108 }
3109 
3110 static int em_aad(struct x86_emulate_ctxt *ctxt)
3111 {
3112  u8 al = ctxt->dst.val & 0xff;
3113  u8 ah = (ctxt->dst.val >> 8) & 0xff;
3114 
3115  al = (al + (ah * ctxt->src.val)) & 0xff;
3116 
3117  ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3118 
3119  /* Set PF, ZF, SF */
3120  ctxt->src.type = OP_IMM;
3121  ctxt->src.val = 0;
3122  ctxt->src.bytes = 1;
3123  fastop(ctxt, em_or);
3124 
3125  return X86EMUL_CONTINUE;
3126 }
3127 
3128 static int em_call(struct x86_emulate_ctxt *ctxt)
3129 {
3130  int rc;
3131  long rel = ctxt->src.val;
3132 
3133  ctxt->src.val = (unsigned long)ctxt->_eip;
3134  rc = jmp_rel(ctxt, rel);
3135  if (rc != X86EMUL_CONTINUE)
3136  return rc;
3137  return em_push(ctxt);
3138 }
3139 
3140 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3141 {
3142  u16 sel, old_cs;
3143  ulong old_eip;
3144  int rc;
3145  struct desc_struct old_desc, new_desc;
3146  const struct x86_emulate_ops *ops = ctxt->ops;
3147  int cpl = ctxt->ops->cpl(ctxt);
3148  enum x86emul_mode prev_mode = ctxt->mode;
3149 
3150  old_eip = ctxt->_eip;
3151  ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3152 
3153  memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3154  rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3155  X86_TRANSFER_CALL_JMP, &new_desc);
3156  if (rc != X86EMUL_CONTINUE)
3157  return rc;
3158 
3159  rc = assign_eip_far(ctxt, ctxt->src.val);
3160  if (rc != X86EMUL_CONTINUE)
3161  goto fail;
3162 
3163  ctxt->src.val = old_cs;
3164  rc = em_push(ctxt);
3165  if (rc != X86EMUL_CONTINUE)
3166  goto fail;
3167 
3168  ctxt->src.val = old_eip;
3169  rc = em_push(ctxt);
3170  /* If we failed, we tainted the memory, but the very least we should
3171  restore cs */
3172  if (rc != X86EMUL_CONTINUE) {
3173  pr_warn_once("faulting far call emulation tainted memory\n");
3174  goto fail;
3175  }
3176  return rc;
3177 fail:
3178  ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3179  ctxt->mode = prev_mode;
3180  return rc;
3181 
3182 }
3183 
3184 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3185 {
3186  int rc;
3187  unsigned long eip;
3188 
3189  rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3190  if (rc != X86EMUL_CONTINUE)
3191  return rc;
3192  rc = assign_eip_near(ctxt, eip);
3193  if (rc != X86EMUL_CONTINUE)
3194  return rc;
3195  rsp_increment(ctxt, ctxt->src.val);
3196  return X86EMUL_CONTINUE;
3197 }
3198 
3199 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3200 {
3201  /* Write back the register source. */
3202  ctxt->src.val = ctxt->dst.val;
3203  write_register_operand(&ctxt->src);
3204 
3205  /* Write back the memory destination with implicit LOCK prefix. */
3206  ctxt->dst.val = ctxt->src.orig_val;
3207  ctxt->lock_prefix = 1;
3208  return X86EMUL_CONTINUE;
3209 }
3210 
3211 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3212 {
3213  ctxt->dst.val = ctxt->src2.val;
3214  return fastop(ctxt, em_imul);
3215 }
3216 
3217 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3218 {
3219  ctxt->dst.type = OP_REG;
3220  ctxt->dst.bytes = ctxt->src.bytes;
3221  ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3222  ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3223 
3224  return X86EMUL_CONTINUE;
3225 }
3226 
3227 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3228 {
3229  u64 tsc_aux = 0;
3230 
3231  if (!ctxt->ops->guest_has_rdpid(ctxt))
3232  return emulate_ud(ctxt);
3233 
3234  ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux);
3235  ctxt->dst.val = tsc_aux;
3236  return X86EMUL_CONTINUE;
3237 }
3238 
3239 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3240 {
3241  u64 tsc = 0;
3242 
3243  ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3244  *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3245  *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3246  return X86EMUL_CONTINUE;
3247 }
3248 
3249 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3250 {
3251  u64 pmc;
3252 
3253  if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3254  return emulate_gp(ctxt, 0);
3255  *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3256  *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3257  return X86EMUL_CONTINUE;
3258 }
3259 
3260 static int em_mov(struct x86_emulate_ctxt *ctxt)
3261 {
3262  memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3263  return X86EMUL_CONTINUE;
3264 }
3265 
3266 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3267 {
3268  u16 tmp;
3269 
3270  if (!ctxt->ops->guest_has_movbe(ctxt))
3271  return emulate_ud(ctxt);
3272 
3273  switch (ctxt->op_bytes) {
3274  case 2:
3275  /*
3276  * From MOVBE definition: "...When the operand size is 16 bits,
3277  * the upper word of the destination register remains unchanged
3278  * ..."
3279  *
3280  * Both casting ->valptr and ->val to u16 breaks strict aliasing
3281  * rules so we have to do the operation almost per hand.
3282  */
3283  tmp = (u16)ctxt->src.val;
3284  ctxt->dst.val &= ~0xffffUL;
3285  ctxt->dst.val |= (unsigned long)swab16(tmp);
3286  break;
3287  case 4:
3288  ctxt->dst.val = swab32((u32)ctxt->src.val);
3289  break;
3290  case 8:
3291  ctxt->dst.val = swab64(ctxt->src.val);
3292  break;
3293  default:
3294  BUG();
3295  }
3296  return X86EMUL_CONTINUE;
3297 }
3298 
3299 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3300 {
3301  int cr_num = ctxt->modrm_reg;
3302  int r;
3303 
3304  if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
3305  return emulate_gp(ctxt, 0);
3306 
3307  /* Disable writeback. */
3308  ctxt->dst.type = OP_NONE;
3309 
3310  if (cr_num == 0) {
3311  /*
3312  * CR0 write might have updated CR0.PE and/or CR0.PG
3313  * which can affect the cpu's execution mode.
3314  */
3315  r = emulator_recalc_and_set_mode(ctxt);
3316  if (r != X86EMUL_CONTINUE)
3317  return r;
3318  }
3319 
3320  return X86EMUL_CONTINUE;
3321 }
3322 
3323 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3324 {
3325  unsigned long val;
3326 
3327  if (ctxt->mode == X86EMUL_MODE_PROT64)
3328  val = ctxt->src.val & ~0ULL;
3329  else
3330  val = ctxt->src.val & ~0U;
3331 
3332  /* #UD condition is already handled. */
3333  if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3334  return emulate_gp(ctxt, 0);
3335 
3336  /* Disable writeback. */
3337  ctxt->dst.type = OP_NONE;
3338  return X86EMUL_CONTINUE;
3339 }
3340 
3341 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3342 {
3343  u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3344  u64 msr_data;
3345  int r;
3346 
3347  msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3348  | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3349  r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
3350 
3351  if (r == X86EMUL_PROPAGATE_FAULT)
3352  return emulate_gp(ctxt, 0);
3353 
3354  return r;
3355 }
3356 
3357 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3358 {
3359  u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3360  u64 msr_data;
3361  int r;
3362 
3363  r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
3364 
3365  if (r == X86EMUL_PROPAGATE_FAULT)
3366  return emulate_gp(ctxt, 0);
3367 
3368  if (r == X86EMUL_CONTINUE) {
3369  *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3370  *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3371  }
3372  return r;
3373 }
3374 
3375 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3376 {
3377  if (segment > VCPU_SREG_GS &&
3378  (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3379  ctxt->ops->cpl(ctxt) > 0)
3380  return emulate_gp(ctxt, 0);
3381 
3382  ctxt->dst.val = get_segment_selector(ctxt, segment);
3383  if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3384  ctxt->dst.bytes = 2;
3385  return X86EMUL_CONTINUE;
3386 }
3387 
3388 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3389 {
3390  if (ctxt->modrm_reg > VCPU_SREG_GS)
3391  return emulate_ud(ctxt);
3392 
3393  return em_store_sreg(ctxt, ctxt->modrm_reg);
3394 }
3395 
3396 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3397 {
3398  u16 sel = ctxt->src.val;
3399 
3400  if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3401  return emulate_ud(ctxt);
3402 
3403  if (ctxt->modrm_reg == VCPU_SREG_SS)
3404  ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3405 
3406  /* Disable writeback. */
3407  ctxt->dst.type = OP_NONE;
3408  return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3409 }
3410 
3411 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3412 {
3413  return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3414 }
3415 
3416 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3417 {
3418  u16 sel = ctxt->src.val;
3419 
3420  /* Disable writeback. */
3421  ctxt->dst.type = OP_NONE;
3422  return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3423 }
3424 
3425 static int em_str(struct x86_emulate_ctxt *ctxt)
3426 {
3427  return em_store_sreg(ctxt, VCPU_SREG_TR);
3428 }
3429 
3430 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3431 {
3432  u16 sel = ctxt->src.val;
3433 
3434  /* Disable writeback. */
3435  ctxt->dst.type = OP_NONE;
3436  return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3437 }
3438 
3439 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3440 {
3441  int rc;
3442  ulong linear;
3443  unsigned int max_size;
3444 
3445  rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode,
3446  &linear, X86EMUL_F_INVLPG);
3447  if (rc == X86EMUL_CONTINUE)
3448  ctxt->ops->invlpg(ctxt, linear);
3449  /* Disable writeback. */
3450  ctxt->dst.type = OP_NONE;
3451  return X86EMUL_CONTINUE;
3452 }
3453 
3454 static int em_clts(struct x86_emulate_ctxt *ctxt)
3455 {
3456  ulong cr0;
3457 
3458  cr0 = ctxt->ops->get_cr(ctxt, 0);
3459  cr0 &= ~X86_CR0_TS;
3460  ctxt->ops->set_cr(ctxt, 0, cr0);
3461  return X86EMUL_CONTINUE;
3462 }
3463 
3464 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3465 {
3466  int rc = ctxt->ops->fix_hypercall(ctxt);
3467 
3468  if (rc != X86EMUL_CONTINUE)
3469  return rc;
3470 
3471  /* Let the processor re-execute the fixed hypercall */
3472  ctxt->_eip = ctxt->eip;
3473  /* Disable writeback. */
3474  ctxt->dst.type = OP_NONE;
3475  return X86EMUL_CONTINUE;
3476 }
3477 
3479  void (*get)(struct x86_emulate_ctxt *ctxt,
3480  struct desc_ptr *ptr))
3481 {
3482  struct desc_ptr desc_ptr;
3483 
3484  if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3485  ctxt->ops->cpl(ctxt) > 0)
3486  return emulate_gp(ctxt, 0);
3487 
3488  if (ctxt->mode == X86EMUL_MODE_PROT64)
3489  ctxt->op_bytes = 8;
3490  get(ctxt, &desc_ptr);
3491  if (ctxt->op_bytes == 2) {
3492  ctxt->op_bytes = 4;
3493  desc_ptr.address &= 0x00ffffff;
3494  }
3495  /* Disable writeback. */
3496  ctxt->dst.type = OP_NONE;
3497  return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3498  &desc_ptr, 2 + ctxt->op_bytes);
3499 }
3500 
3501 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3502 {
3503  return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3504 }
3505 
3506 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3507 {
3508  return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3509 }
3510 
3511 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3512 {
3513  struct desc_ptr desc_ptr;
3514  int rc;
3515 
3516  if (ctxt->mode == X86EMUL_MODE_PROT64)
3517  ctxt->op_bytes = 8;
3518  rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3519  &desc_ptr.size, &desc_ptr.address,
3520  ctxt->op_bytes);
3521  if (rc != X86EMUL_CONTINUE)
3522  return rc;
3523  if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3524  emul_is_noncanonical_address(desc_ptr.address, ctxt))
3525  return emulate_gp(ctxt, 0);
3526  if (lgdt)
3527  ctxt->ops->set_gdt(ctxt, &desc_ptr);
3528  else
3529  ctxt->ops->set_idt(ctxt, &desc_ptr);
3530  /* Disable writeback. */
3531  ctxt->dst.type = OP_NONE;
3532  return X86EMUL_CONTINUE;
3533 }
3534 
3535 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3536 {
3537  return em_lgdt_lidt(ctxt, true);
3538 }
3539 
3540 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3541 {
3542  return em_lgdt_lidt(ctxt, false);
3543 }
3544 
3545 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3546 {
3547  if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3548  ctxt->ops->cpl(ctxt) > 0)
3549  return emulate_gp(ctxt, 0);
3550 
3551  if (ctxt->dst.type == OP_MEM)
3552  ctxt->dst.bytes = 2;
3553  ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3554  return X86EMUL_CONTINUE;
3555 }
3556 
3557 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3558 {
3559  ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3560  | (ctxt->src.val & 0x0f));
3561  ctxt->dst.type = OP_NONE;
3562  return X86EMUL_CONTINUE;
3563 }
3564 
3565 static int em_loop(struct x86_emulate_ctxt *ctxt)
3566 {
3567  int rc = X86EMUL_CONTINUE;
3568 
3569  register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3570  if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3571  (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3572  rc = jmp_rel(ctxt, ctxt->src.val);
3573 
3574  return rc;
3575 }
3576 
3577 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3578 {
3579  int rc = X86EMUL_CONTINUE;
3580 
3581  if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3582  rc = jmp_rel(ctxt, ctxt->src.val);
3583 
3584  return rc;
3585 }
3586 
3587 static int em_in(struct x86_emulate_ctxt *ctxt)
3588 {
3589  if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3590  &ctxt->dst.val))
3591  return X86EMUL_IO_NEEDED;
3592 
3593  return X86EMUL_CONTINUE;
3594 }
3595 
3596 static int em_out(struct x86_emulate_ctxt *ctxt)
3597 {
3598  ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3599  &ctxt->src.val, 1);
3600  /* Disable writeback. */
3601  ctxt->dst.type = OP_NONE;
3602  return X86EMUL_CONTINUE;
3603 }
3604 
3605 static int em_cli(struct x86_emulate_ctxt *ctxt)
3606 {
3607  if (emulator_bad_iopl(ctxt))
3608  return emulate_gp(ctxt, 0);
3609 
3610  ctxt->eflags &= ~X86_EFLAGS_IF;
3611  return X86EMUL_CONTINUE;
3612 }
3613 
3614 static int em_sti(struct x86_emulate_ctxt *ctxt)
3615 {
3616  if (emulator_bad_iopl(ctxt))
3617  return emulate_gp(ctxt, 0);
3618 
3619  ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3620  ctxt->eflags |= X86_EFLAGS_IF;
3621  return X86EMUL_CONTINUE;
3622 }
3623 
3624 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3625 {
3626  u32 eax, ebx, ecx, edx;
3627  u64 msr = 0;
3628 
3629  ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3630  if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3631  ctxt->ops->cpl(ctxt)) {
3632  return emulate_gp(ctxt, 0);
3633  }
3634 
3635  eax = reg_read(ctxt, VCPU_REGS_RAX);
3636  ecx = reg_read(ctxt, VCPU_REGS_RCX);
3637  ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3638  *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3639  *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3640  *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3641  *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3642  return X86EMUL_CONTINUE;
3643 }
3644 
3645 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3646 {
3647  u32 flags;
3648 
3649  flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3650  X86_EFLAGS_SF;
3651  flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3652 
3653  ctxt->eflags &= ~0xffUL;
3654  ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3655  return X86EMUL_CONTINUE;
3656 }
3657 
3658 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3659 {
3660  *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3661  *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3662  return X86EMUL_CONTINUE;
3663 }
3664 
3665 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3666 {
3667  switch (ctxt->op_bytes) {
3668 #ifdef CONFIG_X86_64
3669  case 8:
3670  asm("bswap %0" : "+r"(ctxt->dst.val));
3671  break;
3672 #endif
3673  default:
3674  asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3675  break;
3676  }
3677  return X86EMUL_CONTINUE;
3678 }
3679 
3680 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3681 {
3682  /* emulating clflush regardless of cpuid */
3683  return X86EMUL_CONTINUE;
3684 }
3685 
3686 static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3687 {
3688  /* emulating clflushopt regardless of cpuid */
3689  return X86EMUL_CONTINUE;
3690 }
3691 
3692 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3693 {
3694  ctxt->dst.val = (s32) ctxt->src.val;
3695  return X86EMUL_CONTINUE;
3696 }
3697 
3698 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3699 {
3700  if (!ctxt->ops->guest_has_fxsr(ctxt))
3701  return emulate_ud(ctxt);
3702 
3703  if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3704  return emulate_nm(ctxt);
3705 
3706  /*
3707  * Don't emulate a case that should never be hit, instead of working
3708  * around a lack of fxsave64/fxrstor64 on old compilers.
3709  */
3710  if (ctxt->mode >= X86EMUL_MODE_PROT64)
3711  return X86EMUL_UNHANDLEABLE;
3712 
3713  return X86EMUL_CONTINUE;
3714 }
3715 
3716 /*
3717  * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3718  * and restore MXCSR.
3719  */
3720 static size_t __fxstate_size(int nregs)
3721 {
3722  return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3723 }
3724 
3725 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3726 {
3727  bool cr4_osfxsr;
3728  if (ctxt->mode == X86EMUL_MODE_PROT64)
3729  return __fxstate_size(16);
3730 
3731  cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
3732  return __fxstate_size(cr4_osfxsr ? 8 : 0);
3733 }
3734 
3735 /*
3736  * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
3737  * 1) 16 bit mode
3738  * 2) 32 bit mode
3739  * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
3740  * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
3741  * save and restore
3742  * 3) 64-bit mode with REX.W prefix
3743  * - like (2), but XMM 8-15 are being saved and restored
3744  * 4) 64-bit mode without REX.W prefix
3745  * - like (3), but FIP and FDP are 64 bit
3746  *
3747  * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
3748  * desired result. (4) is not emulated.
3749  *
3750  * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
3751  * and FPU DS) should match.
3752  */
3753 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
3754 {
3755  struct fxregs_state fx_state;
3756  int rc;
3757 
3758  rc = check_fxsr(ctxt);
3759  if (rc != X86EMUL_CONTINUE)
3760  return rc;
3761 
3762  kvm_fpu_get();
3763 
3764  rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
3765 
3766  kvm_fpu_put();
3767 
3768  if (rc != X86EMUL_CONTINUE)
3769  return rc;
3770 
3771  return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
3772  fxstate_size(ctxt));
3773 }
3774 
3775 /*
3776  * FXRSTOR might restore XMM registers not provided by the guest. Fill
3777  * in the host registers (via FXSAVE) instead, so they won't be modified.
3778  * (preemption has to stay disabled until FXRSTOR).
3779  *
3780  * Use noinline to keep the stack for other functions called by callers small.
3781  */
3782 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
3783  const size_t used_size)
3784 {
3785  struct fxregs_state fx_tmp;
3786  int rc;
3787 
3788  rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
3789  memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
3790  __fxstate_size(16) - used_size);
3791 
3792  return rc;
3793 }
3794 
3795 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
3796 {
3797  struct fxregs_state fx_state;
3798  int rc;
3799  size_t size;
3800 
3801  rc = check_fxsr(ctxt);
3802  if (rc != X86EMUL_CONTINUE)
3803  return rc;
3804 
3805  size = fxstate_size(ctxt);
3806  rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
3807  if (rc != X86EMUL_CONTINUE)
3808  return rc;
3809 
3810  kvm_fpu_get();
3811 
3812  if (size < __fxstate_size(16)) {
3813  rc = fxregs_fixup(&fx_state, size);
3814  if (rc != X86EMUL_CONTINUE)
3815  goto out;
3816  }
3817 
3818  if (fx_state.mxcsr >> 16) {
3819  rc = emulate_gp(ctxt, 0);
3820  goto out;
3821  }
3822 
3823  if (rc == X86EMUL_CONTINUE)
3824  rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
3825 
3826 out:
3827  kvm_fpu_put();
3828 
3829  return rc;
3830 }
3831 
3832 static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
3833 {
3834  u32 eax, ecx, edx;
3835 
3836  if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
3837  return emulate_ud(ctxt);
3838 
3839  eax = reg_read(ctxt, VCPU_REGS_RAX);
3840  edx = reg_read(ctxt, VCPU_REGS_RDX);
3841  ecx = reg_read(ctxt, VCPU_REGS_RCX);
3842 
3843  if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
3844  return emulate_gp(ctxt, 0);
3845 
3846  return X86EMUL_CONTINUE;
3847 }
3848 
3849 static bool valid_cr(int nr)
3850 {
3851  switch (nr) {
3852  case 0:
3853  case 2 ... 4:
3854  case 8:
3855  return true;
3856  default:
3857  return false;
3858  }
3859 }
3860 
3861 static int check_cr_access(struct x86_emulate_ctxt *ctxt)
3862 {
3863  if (!valid_cr(ctxt->modrm_reg))
3864  return emulate_ud(ctxt);
3865 
3866  return X86EMUL_CONTINUE;
3867 }
3868 
3869 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3870 {
3871  unsigned long dr7;
3872 
3873  ctxt->ops->get_dr(ctxt, 7, &dr7);
3874 
3875  return dr7 & DR7_GD;
3876 }
3877 
3878 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3879 {
3880  int dr = ctxt->modrm_reg;
3881  u64 cr4;
3882 
3883  if (dr > 7)
3884  return emulate_ud(ctxt);
3885 
3886  cr4 = ctxt->ops->get_cr(ctxt, 4);
3887  if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3888  return emulate_ud(ctxt);
3889 
3890  if (check_dr7_gd(ctxt)) {
3891  ulong dr6;
3892 
3893  ctxt->ops->get_dr(ctxt, 6, &dr6);
3894  dr6 &= ~DR_TRAP_BITS;
3895  dr6 |= DR6_BD | DR6_ACTIVE_LOW;
3896  ctxt->ops->set_dr(ctxt, 6, dr6);
3897  return emulate_db(ctxt);
3898  }
3899 
3900  return X86EMUL_CONTINUE;
3901 }
3902 
3903 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3904 {
3905  u64 new_val = ctxt->src.val64;
3906  int dr = ctxt->modrm_reg;
3907 
3908  if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3909  return emulate_gp(ctxt, 0);
3910 
3911  return check_dr_read(ctxt);
3912 }
3913 
3914 static int check_svme(struct x86_emulate_ctxt *ctxt)
3915 {
3916  u64 efer = 0;
3917 
3918  ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3919 
3920  if (!(efer & EFER_SVME))
3921  return emulate_ud(ctxt);
3922 
3923  return X86EMUL_CONTINUE;
3924 }
3925 
3926 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3927 {
3928  u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3929 
3930  /* Valid physical address? */
3931  if (rax & 0xffff000000000000ULL)
3932  return emulate_gp(ctxt, 0);
3933 
3934  return check_svme(ctxt);
3935 }
3936 
3937 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3938 {
3939  u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3940 
3941  if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3942  return emulate_gp(ctxt, 0);
3943 
3944  return X86EMUL_CONTINUE;
3945 }
3946 
3947 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3948 {
3949  u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3950  u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3951 
3952  /*
3953  * VMware allows access to these Pseduo-PMCs even when read via RDPMC
3954  * in Ring3 when CR4.PCE=0.
3955  */
3957  return X86EMUL_CONTINUE;
3958 
3959  /*
3960  * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE
3961  * check however is unnecessary because CPL is always 0 outside
3962  * protected mode.
3963  */
3964  if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3965  ctxt->ops->check_pmc(ctxt, rcx))
3966  return emulate_gp(ctxt, 0);
3967 
3968  return X86EMUL_CONTINUE;
3969 }
3970 
3971 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3972 {
3973  ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3974  if (!emulator_io_permitted(ctxt, ctxt->src.val, ctxt->dst.bytes))
3975  return emulate_gp(ctxt, 0);
3976 
3977  return X86EMUL_CONTINUE;
3978 }
3979 
3980 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3981 {
3982  ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3983  if (!emulator_io_permitted(ctxt, ctxt->dst.val, ctxt->src.bytes))
3984  return emulate_gp(ctxt, 0);
3985 
3986  return X86EMUL_CONTINUE;
3987 }
3988 
3989 #define D(_y) { .flags = (_y) }
3990 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3991 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3992  .intercept = x86_intercept_##_i, .check_perm = (_p) }
3993 #define N D(NotImpl)
3994 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3995 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3996 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3997 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3998 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
3999 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4000 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4001 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4002 #define II(_f, _e, _i) \
4003  { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4004 #define IIP(_f, _e, _i, _p) \
4005  { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4006  .intercept = x86_intercept_##_i, .check_perm = (_p) }
4007 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4008 
4009 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4010 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4011 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4012 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4013 #define I2bvIP(_f, _e, _i, _p) \
4014  IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4015 
4016 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4017  F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4018  F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4019 
4020 static const struct opcode group7_rm0[] = {
4021  N,
4023  N, N, N, N, N, N,
4024 };
4025 
4026 static const struct opcode group7_rm1[] = {
4027  DI(SrcNone | Priv, monitor),
4028  DI(SrcNone | Priv, mwait),
4029  N, N, N, N, N, N,
4030 };
4031 
4032 static const struct opcode group7_rm2[] = {
4033  N,
4034  II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4035  N, N, N, N, N, N,
4036 };
4037 
4038 static const struct opcode group7_rm3[] = {
4039  DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4040  II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4041  DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4045  DIP(SrcNone | Prot | Priv, skinit, check_svme),
4047 };
4048 
4049 static const struct opcode group7_rm7[] = {
4050  N,
4051  DIP(SrcNone, rdtscp, check_rdtsc),
4052  N, N, N, N, N, N,
4053 };
4054 
4055 static const struct opcode group1[] = {
4056  F(Lock, em_add),
4057  F(Lock | PageTable, em_or),
4058  F(Lock, em_adc),
4059  F(Lock, em_sbb),
4060  F(Lock | PageTable, em_and),
4061  F(Lock, em_sub),
4062  F(Lock, em_xor),
4063  F(NoWrite, em_cmp),
4064 };
4065 
4066 static const struct opcode group1A[] = {
4067  I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4068 };
4069 
4070 static const struct opcode group2[] = {
4071  F(DstMem | ModRM, em_rol),
4072  F(DstMem | ModRM, em_ror),
4073  F(DstMem | ModRM, em_rcl),
4074  F(DstMem | ModRM, em_rcr),
4075  F(DstMem | ModRM, em_shl),
4076  F(DstMem | ModRM, em_shr),
4077  F(DstMem | ModRM, em_shl),
4078  F(DstMem | ModRM, em_sar),
4079 };
4080 
4081 static const struct opcode group3[] = {
4082  F(DstMem | SrcImm | NoWrite, em_test),
4083  F(DstMem | SrcImm | NoWrite, em_test),
4084  F(DstMem | SrcNone | Lock, em_not),
4085  F(DstMem | SrcNone | Lock, em_neg),
4086  F(DstXacc | Src2Mem, em_mul_ex),
4087  F(DstXacc | Src2Mem, em_imul_ex),
4088  F(DstXacc | Src2Mem, em_div_ex),
4089  F(DstXacc | Src2Mem, em_idiv_ex),
4090 };
4091 
4092 static const struct opcode group4[] = {
4093  F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4094  F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4095  N, N, N, N, N, N,
4096 };
4097 
4098 static const struct opcode group5[] = {
4099  F(DstMem | SrcNone | Lock, em_inc),
4100  F(DstMem | SrcNone | Lock, em_dec),
4106 };
4107 
4108 static const struct opcode group6[] = {
4109  II(Prot | DstMem, em_sldt, sldt),
4110  II(Prot | DstMem, em_str, str),
4111  II(Prot | Priv | SrcMem16, em_lldt, lldt),
4112  II(Prot | Priv | SrcMem16, em_ltr, ltr),
4113  N, N, N, N,
4114 };
4115 
4116 static const struct group_dual group7 = { {
4117  II(Mov | DstMem, em_sgdt, sgdt),
4118  II(Mov | DstMem, em_sidt, sidt),
4119  II(SrcMem | Priv, em_lgdt, lgdt),
4120  II(SrcMem | Priv, em_lidt, lidt),
4121  II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4122  II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4123  II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4124 }, {
4125  EXT(0, group7_rm0),
4126  EXT(0, group7_rm1),
4127  EXT(0, group7_rm2),
4128  EXT(0, group7_rm3),
4129  II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4130  II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4131  EXT(0, group7_rm7),
4132 } };
4133 
4134 static const struct opcode group8[] = {
4135  N, N, N, N,
4136  F(DstMem | SrcImmByte | NoWrite, em_bt),
4137  F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4138  F(DstMem | SrcImmByte | Lock, em_btr),
4139  F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4140 };
4141 
4142 /*
4143  * The "memory" destination is actually always a register, since we come
4144  * from the register case of group9.
4145  */
4146 static const struct gprefix pfx_0f_c7_7 = {
4147  N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4148 };
4149 
4150 
4151 static const struct group_dual group9 = { {
4152  N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4153 }, {
4154  N, N, N, N, N, N, N,
4155  GP(0, &pfx_0f_c7_7),
4156 } };
4157 
4158 static const struct opcode group11[] = {
4159  I(DstMem | SrcImm | Mov | PageTable, em_mov),
4160  X7(D(Undefined)),
4161 };
4162 
4163 static const struct gprefix pfx_0f_ae_7 = {
4165 };
4166 
4167 static const struct group_dual group15 = { {
4168  I(ModRM | Aligned16, em_fxsave),
4170  N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4171 }, {
4172  N, N, N, N, N, N, N, N,
4173 } };
4174 
4175 static const struct gprefix pfx_0f_6f_0f_7f = {
4176  I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4177 };
4178 
4179 static const struct instr_dual instr_dual_0f_2b = {
4180  I(0, em_mov), N
4181 };
4182 
4183 static const struct gprefix pfx_0f_2b = {
4184  ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4185 };
4186 
4187 static const struct gprefix pfx_0f_10_0f_11 = {
4188  I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4189 };
4190 
4191 static const struct gprefix pfx_0f_28_0f_29 = {
4192  I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4193 };
4194 
4195 static const struct gprefix pfx_0f_e7 = {
4196  N, I(Sse, em_mov), N, N,
4197 };
4198 
4199 static const struct escape escape_d9 = { {
4200  N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4201 }, {
4202  /* 0xC0 - 0xC7 */
4203  N, N, N, N, N, N, N, N,
4204  /* 0xC8 - 0xCF */
4205  N, N, N, N, N, N, N, N,
4206  /* 0xD0 - 0xC7 */
4207  N, N, N, N, N, N, N, N,
4208  /* 0xD8 - 0xDF */
4209  N, N, N, N, N, N, N, N,
4210  /* 0xE0 - 0xE7 */
4211  N, N, N, N, N, N, N, N,
4212  /* 0xE8 - 0xEF */
4213  N, N, N, N, N, N, N, N,
4214  /* 0xF0 - 0xF7 */
4215  N, N, N, N, N, N, N, N,
4216  /* 0xF8 - 0xFF */
4217  N, N, N, N, N, N, N, N,
4218 } };
4219 
4220 static const struct escape escape_db = { {
4221  N, N, N, N, N, N, N, N,
4222 }, {
4223  /* 0xC0 - 0xC7 */
4224  N, N, N, N, N, N, N, N,
4225  /* 0xC8 - 0xCF */
4226  N, N, N, N, N, N, N, N,
4227  /* 0xD0 - 0xC7 */
4228  N, N, N, N, N, N, N, N,
4229  /* 0xD8 - 0xDF */
4230  N, N, N, N, N, N, N, N,
4231  /* 0xE0 - 0xE7 */
4232  N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4233  /* 0xE8 - 0xEF */
4234  N, N, N, N, N, N, N, N,
4235  /* 0xF0 - 0xF7 */
4236  N, N, N, N, N, N, N, N,
4237  /* 0xF8 - 0xFF */
4238  N, N, N, N, N, N, N, N,
4239 } };
4240 
4241 static const struct escape escape_dd = { {
4242  N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4243 }, {
4244  /* 0xC0 - 0xC7 */
4245  N, N, N, N, N, N, N, N,
4246  /* 0xC8 - 0xCF */
4247  N, N, N, N, N, N, N, N,
4248  /* 0xD0 - 0xC7 */
4249  N, N, N, N, N, N, N, N,
4250  /* 0xD8 - 0xDF */
4251  N, N, N, N, N, N, N, N,
4252  /* 0xE0 - 0xE7 */
4253  N, N, N, N, N, N, N, N,
4254  /* 0xE8 - 0xEF */
4255  N, N, N, N, N, N, N, N,
4256  /* 0xF0 - 0xF7 */
4257  N, N, N, N, N, N, N, N,
4258  /* 0xF8 - 0xFF */
4259  N, N, N, N, N, N, N, N,
4260 } };
4261 
4262 static const struct instr_dual instr_dual_0f_c3 = {
4263  I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4264 };
4265 
4266 static const struct mode_dual mode_dual_63 = {
4267  N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4268 };
4269 
4270 static const struct instr_dual instr_dual_8d = {
4271  D(DstReg | SrcMem | ModRM | NoAccess), N
4272 };
4273 
4274 static const struct opcode opcode_table[256] = {
4275  /* 0x00 - 0x07 */
4276  F6ALU(Lock, em_add),
4279  /* 0x08 - 0x0F */
4280  F6ALU(Lock | PageTable, em_or),
4282  N,
4283  /* 0x10 - 0x17 */
4284  F6ALU(Lock, em_adc),
4287  /* 0x18 - 0x1F */
4288  F6ALU(Lock, em_sbb),
4291  /* 0x20 - 0x27 */
4292  F6ALU(Lock | PageTable, em_and), N, N,
4293  /* 0x28 - 0x2F */
4294  F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4295  /* 0x30 - 0x37 */
4296  F6ALU(Lock, em_xor), N, N,
4297  /* 0x38 - 0x3F */
4298  F6ALU(NoWrite, em_cmp), N, N,
4299  /* 0x40 - 0x4F */
4300  X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4301  /* 0x50 - 0x57 */
4302  X8(I(SrcReg | Stack, em_push)),
4303  /* 0x58 - 0x5F */
4304  X8(I(DstReg | Stack, em_pop)),
4305  /* 0x60 - 0x67 */
4307  I(ImplicitOps | Stack | No64, em_popa),
4308  N, MD(ModRM, &mode_dual_63),
4309  N, N, N, N,
4310  /* 0x68 - 0x6F */
4311  I(SrcImm | Mov | Stack, em_push),
4313  I(SrcImmByte | Mov | Stack, em_push),
4315  I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4316  I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4317  /* 0x70 - 0x7F */
4319  /* 0x80 - 0x87 */
4320  G(ByteOp | DstMem | SrcImm, group1),
4321  G(DstMem | SrcImm, group1),
4322  G(ByteOp | DstMem | SrcImm | No64, group1),
4323  G(DstMem | SrcImmByte, group1),
4324  F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4326  /* 0x88 - 0x8F */
4328  I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4330  ID(0, &instr_dual_8d),
4332  G(0, group1A),
4333  /* 0x90 - 0x97 */
4334  DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4335  /* 0x98 - 0x9F */
4339  II(ImplicitOps | Stack, em_popf, popf),
4341  /* 0xA0 - 0xA7 */
4342  I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4344  I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4345  F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4346  /* 0xA8 - 0xAF */
4347  F2bv(DstAcc | SrcImm | NoWrite, em_test),
4348  I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4349  I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4350  F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4351  /* 0xB0 - 0xB7 */
4352  X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4353  /* 0xB8 - 0xBF */
4354  X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4355  /* 0xC0 - 0xC7 */
4361  G(ByteOp, group11), G(0, group11),
4362  /* 0xC8 - 0xCF */
4364  I(Stack | IsBranch, em_leave),
4367  D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4368  D(ImplicitOps | No64 | IsBranch),
4369  II(ImplicitOps | IsBranch, em_iret, iret),
4370  /* 0xD0 - 0xD7 */
4372  G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4373  I(DstAcc | SrcImmUByte | No64, em_aam),
4374  I(DstAcc | SrcImmUByte | No64, em_aad),
4375  F(DstAcc | ByteOp | No64, em_salc),
4376  I(DstAcc | SrcXLat | ByteOp, em_mov),
4377  /* 0xD8 - 0xDF */
4378  N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4379  /* 0xE0 - 0xE7 */
4384  /* 0xE8 - 0xEF */
4391  /* 0xF0 - 0xF7 */
4392  N, DI(ImplicitOps, icebp), N, N,
4393  DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4394  G(ByteOp, group3), G(0, group3),
4395  /* 0xF8 - 0xFF */
4398  D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4399 };
4400 
4401 static const struct opcode twobyte_table[256] = {
4402  /* 0x00 - 0x0F */
4403  G(0, group6), GD(0, &group7), N, N,
4405  II(ImplicitOps | Priv, em_clts, clts), N,
4406  DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4407  N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4408  /* 0x10 - 0x1F */
4411  N, N, N, N, N, N,
4412  D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4413  D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4414  D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4415  D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4416  D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4417  D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4418  /* 0x20 - 0x2F */
4419  DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4420  DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4421  IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4422  check_cr_access),
4423  IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4424  check_dr_write),
4425  N, N, N, N,
4428  N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4429  N, N, N, N,
4430  /* 0x30 - 0x3F */
4431  II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4432  IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4433  II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4434  IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4437  N, N,
4438  N, N, N, N, N, N, N, N,
4439  /* 0x40 - 0x4F */
4440  X16(D(DstReg | SrcMem | ModRM)),
4441  /* 0x50 - 0x5F */
4442  N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4443  /* 0x60 - 0x6F */
4444  N, N, N, N,
4445  N, N, N, N,
4446  N, N, N, N,
4447  N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4448  /* 0x70 - 0x7F */
4449  N, N, N, N,
4450  N, N, N, N,
4451  N, N, N, N,
4452  N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4453  /* 0x80 - 0x8F */
4454  X16(D(SrcImm | NearBranch | IsBranch)),
4455  /* 0x90 - 0x9F */
4456  X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4457  /* 0xA0 - 0xA7 */
4459  II(ImplicitOps, em_cpuid, cpuid),
4460  F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4461  F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4462  F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4463  /* 0xA8 - 0xAF */
4465  II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4466  F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4467  F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4468  F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4469  GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4470  /* 0xB0 - 0xB7 */
4473  F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4476  D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4477  /* 0xB8 - 0xBF */
4478  N, N,
4479  G(BitOp, group8),
4480  F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4481  I(DstReg | SrcMem | ModRM, em_bsf_c),
4482  I(DstReg | SrcMem | ModRM, em_bsr_c),
4483  D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4484  /* 0xC0 - 0xC7 */
4485  F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4486  N, ID(0, &instr_dual_0f_c3),
4487  N, N, N, GD(0, &group9),
4488  /* 0xC8 - 0xCF */
4489  X8(I(DstReg, em_bswap)),
4490  /* 0xD0 - 0xDF */
4491  N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4492  /* 0xE0 - 0xEF */
4493  N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4494  N, N, N, N, N, N, N, N,
4495  /* 0xF0 - 0xFF */
4496  N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4497 };
4498 
4499 static const struct instr_dual instr_dual_0f_38_f0 = {
4500  I(DstReg | SrcMem | Mov, em_movbe), N
4501 };
4502 
4503 static const struct instr_dual instr_dual_0f_38_f1 = {
4504  I(DstMem | SrcReg | Mov, em_movbe), N
4505 };
4506 
4507 static const struct gprefix three_byte_0f_38_f0 = {
4508  ID(0, &instr_dual_0f_38_f0), N, N, N
4509 };
4510 
4511 static const struct gprefix three_byte_0f_38_f1 = {
4512  ID(0, &instr_dual_0f_38_f1), N, N, N
4513 };
4514 
4515 /*
4516  * Insns below are selected by the prefix which indexed by the third opcode
4517  * byte.
4518  */
4519 static const struct opcode opcode_map_0f_38[256] = {
4520  /* 0x00 - 0x7f */
4521  X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4522  /* 0x80 - 0xef */
4523  X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4524  /* 0xf0 - 0xf1 */
4527  /* 0xf2 - 0xff */
4528  N, N, X4(N), X8(N)
4529 };
4530 
4531 #undef D
4532 #undef N
4533 #undef G
4534 #undef GD
4535 #undef I
4536 #undef GP
4537 #undef EXT
4538 #undef MD
4539 #undef ID
4540 
4541 #undef D2bv
4542 #undef D2bvIP
4543 #undef I2bv
4544 #undef I2bvIP
4545 #undef I6ALU
4546 
4547 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4548 {
4549  unsigned size;
4550 
4551  size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4552  if (size == 8)
4553  size = 4;
4554  return size;
4555 }
4556 
4557 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4558  unsigned size, bool sign_extension)
4559 {
4560  int rc = X86EMUL_CONTINUE;
4561 
4562  op->type = OP_IMM;
4563  op->bytes = size;
4564  op->addr.mem.ea = ctxt->_eip;
4565  /* NB. Immediates are sign-extended as necessary. */
4566  switch (op->bytes) {
4567  case 1:
4568  op->val = insn_fetch(s8, ctxt);
4569  break;
4570  case 2:
4571  op->val = insn_fetch(s16, ctxt);
4572  break;
4573  case 4:
4574  op->val = insn_fetch(s32, ctxt);
4575  break;
4576  case 8:
4577  op->val = insn_fetch(s64, ctxt);
4578  break;
4579  }
4580  if (!sign_extension) {
4581  switch (op->bytes) {
4582  case 1:
4583  op->val &= 0xff;
4584  break;
4585  case 2:
4586  op->val &= 0xffff;
4587  break;
4588  case 4:
4589  op->val &= 0xffffffff;
4590  break;
4591  }
4592  }
4593 done:
4594  return rc;
4595 }
4596 
4597 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4598  unsigned d)
4599 {
4600  int rc = X86EMUL_CONTINUE;
4601 
4602  switch (d) {
4603  case OpReg:
4604  decode_register_operand(ctxt, op);
4605  break;
4606  case OpImmUByte:
4607  rc = decode_imm(ctxt, op, 1, false);
4608  break;
4609  case OpMem:
4610  ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4611  mem_common:
4612  *op = ctxt->memop;
4613  ctxt->memopp = op;
4614  if (ctxt->d & BitOp)
4615  fetch_bit_operand(ctxt);
4616  op->orig_val = op->val;
4617  break;
4618  case OpMem64:
4619  ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4620  goto mem_common;
4621  case OpAcc:
4622  op->type = OP_REG;
4623  op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4624  op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4626  op->orig_val = op->val;
4627  break;
4628  case OpAccLo:
4629  op->type = OP_REG;
4630  op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4631  op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4633  op->orig_val = op->val;
4634  break;
4635  case OpAccHi:
4636  if (ctxt->d & ByteOp) {
4637  op->type = OP_NONE;
4638  break;
4639  }
4640  op->type = OP_REG;
4641  op->bytes = ctxt->op_bytes;
4642  op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4644  op->orig_val = op->val;
4645  break;
4646  case OpDI:
4647  op->type = OP_MEM;
4648  op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4649  op->addr.mem.ea =
4650  register_address(ctxt, VCPU_REGS_RDI);
4651  op->addr.mem.seg = VCPU_SREG_ES;
4652  op->val = 0;
4653  op->count = 1;
4654  break;
4655  case OpDX:
4656  op->type = OP_REG;
4657  op->bytes = 2;
4658  op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4660  break;
4661  case OpCL:
4662  op->type = OP_IMM;
4663  op->bytes = 1;
4664  op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4665  break;
4666  case OpImmByte:
4667  rc = decode_imm(ctxt, op, 1, true);
4668  break;
4669  case OpOne:
4670  op->type = OP_IMM;
4671  op->bytes = 1;
4672  op->val = 1;
4673  break;
4674  case OpImm:
4675  rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4676  break;
4677  case OpImm64:
4678  rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4679  break;
4680  case OpMem8:
4681  ctxt->memop.bytes = 1;
4682  if (ctxt->memop.type == OP_REG) {
4683  ctxt->memop.addr.reg = decode_register(ctxt,
4684  ctxt->modrm_rm, true);
4685  fetch_register_operand(&ctxt->memop);
4686  }
4687  goto mem_common;
4688  case OpMem16:
4689  ctxt->memop.bytes = 2;
4690  goto mem_common;
4691  case OpMem32:
4692  ctxt->memop.bytes = 4;
4693  goto mem_common;
4694  case OpImmU16:
4695  rc = decode_imm(ctxt, op, 2, false);
4696  break;
4697  case OpImmU:
4698  rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4699  break;
4700  case OpSI:
4701  op->type = OP_MEM;
4702  op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4703  op->addr.mem.ea =
4704  register_address(ctxt, VCPU_REGS_RSI);
4705  op->addr.mem.seg = ctxt->seg_override;
4706  op->val = 0;
4707  op->count = 1;
4708  break;
4709  case OpXLat:
4710  op->type = OP_MEM;
4711  op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4712  op->addr.mem.ea =
4713  address_mask(ctxt,
4714  reg_read(ctxt, VCPU_REGS_RBX) +
4715  (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4716  op->addr.mem.seg = ctxt->seg_override;
4717  op->val = 0;
4718  break;
4719  case OpImmFAddr:
4720  op->type = OP_IMM;
4721  op->addr.mem.ea = ctxt->_eip;
4722  op->bytes = ctxt->op_bytes + 2;
4723  insn_fetch_arr(op->valptr, op->bytes, ctxt);
4724  break;
4725  case OpMemFAddr:
4726  ctxt->memop.bytes = ctxt->op_bytes + 2;
4727  goto mem_common;
4728  case OpES:
4729  op->type = OP_IMM;
4730  op->val = VCPU_SREG_ES;
4731  break;
4732  case OpCS:
4733  op->type = OP_IMM;
4734  op->val = VCPU_SREG_CS;
4735  break;
4736  case OpSS:
4737  op->type = OP_IMM;
4738  op->val = VCPU_SREG_SS;
4739  break;
4740  case OpDS:
4741  op->type = OP_IMM;
4742  op->val = VCPU_SREG_DS;
4743  break;
4744  case OpFS:
4745  op->type = OP_IMM;
4746  op->val = VCPU_SREG_FS;
4747  break;
4748  case OpGS:
4749  op->type = OP_IMM;
4750  op->val = VCPU_SREG_GS;
4751  break;
4752  case OpImplicit:
4753  /* Special instructions do their own operand decoding. */
4754  default:
4755  op->type = OP_NONE; /* Disable writeback. */
4756  break;
4757  }
4758 
4759 done:
4760  return rc;
4761 }
4762 
4763 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
4764 {
4765  int rc = X86EMUL_CONTINUE;
4766  int mode = ctxt->mode;
4767  int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4768  bool op_prefix = false;
4769  bool has_seg_override = false;
4770  struct opcode opcode;
4771  u16 dummy;
4772  struct desc_struct desc;
4773 
4774  ctxt->memop.type = OP_NONE;
4775  ctxt->memopp = NULL;
4776  ctxt->_eip = ctxt->eip;
4777  ctxt->fetch.ptr = ctxt->fetch.data;
4778  ctxt->fetch.end = ctxt->fetch.data + insn_len;
4779  ctxt->opcode_len = 1;
4780  ctxt->intercept = x86_intercept_none;
4781  if (insn_len > 0)
4782  memcpy(ctxt->fetch.data, insn, insn_len);
4783  else {
4784  rc = __do_insn_fetch_bytes(ctxt, 1);
4785  if (rc != X86EMUL_CONTINUE)
4786  goto done;
4787  }
4788 
4789  switch (mode) {
4790  case X86EMUL_MODE_REAL:
4791  case X86EMUL_MODE_VM86:
4792  def_op_bytes = def_ad_bytes = 2;
4793  ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
4794  if (desc.d)
4795  def_op_bytes = def_ad_bytes = 4;
4796  break;
4797  case X86EMUL_MODE_PROT16:
4798  def_op_bytes = def_ad_bytes = 2;
4799  break;
4800  case X86EMUL_MODE_PROT32:
4801  def_op_bytes = def_ad_bytes = 4;
4802  break;
4803 #ifdef CONFIG_X86_64
4804  case X86EMUL_MODE_PROT64:
4805  def_op_bytes = 4;
4806  def_ad_bytes = 8;
4807  break;
4808 #endif
4809  default:
4810  return EMULATION_FAILED;
4811  }
4812 
4813  ctxt->op_bytes = def_op_bytes;
4814  ctxt->ad_bytes = def_ad_bytes;
4815 
4816  /* Legacy prefixes. */
4817  for (;;) {
4818  switch (ctxt->b = insn_fetch(u8, ctxt)) {
4819  case 0x66: /* operand-size override */
4820  op_prefix = true;
4821  /* switch between 2/4 bytes */
4822  ctxt->op_bytes = def_op_bytes ^ 6;
4823  break;
4824  case 0x67: /* address-size override */
4825  if (mode == X86EMUL_MODE_PROT64)
4826  /* switch between 4/8 bytes */
4827  ctxt->ad_bytes = def_ad_bytes ^ 12;
4828  else
4829  /* switch between 2/4 bytes */
4830  ctxt->ad_bytes = def_ad_bytes ^ 6;
4831  break;
4832  case 0x26: /* ES override */
4833  has_seg_override = true;
4834  ctxt->seg_override = VCPU_SREG_ES;
4835  break;
4836  case 0x2e: /* CS override */
4837  has_seg_override = true;
4838  ctxt->seg_override = VCPU_SREG_CS;
4839  break;
4840  case 0x36: /* SS override */
4841  has_seg_override = true;
4842  ctxt->seg_override = VCPU_SREG_SS;
4843  break;
4844  case 0x3e: /* DS override */
4845  has_seg_override = true;
4846  ctxt->seg_override = VCPU_SREG_DS;
4847  break;
4848  case 0x64: /* FS override */
4849  has_seg_override = true;
4850  ctxt->seg_override = VCPU_SREG_FS;
4851  break;
4852  case 0x65: /* GS override */
4853  has_seg_override = true;
4854  ctxt->seg_override = VCPU_SREG_GS;
4855  break;
4856  case 0x40 ... 0x4f: /* REX */
4857  if (mode != X86EMUL_MODE_PROT64)
4858  goto done_prefixes;
4859  ctxt->rex_prefix = ctxt->b;
4860  continue;
4861  case 0xf0: /* LOCK */
4862  ctxt->lock_prefix = 1;
4863  break;
4864  case 0xf2: /* REPNE/REPNZ */
4865  case 0xf3: /* REP/REPE/REPZ */
4866  ctxt->rep_prefix = ctxt->b;
4867  break;
4868  default:
4869  goto done_prefixes;
4870  }
4871 
4872  /* Any legacy prefix after a REX prefix nullifies its effect. */
4873 
4874  ctxt->rex_prefix = 0;
4875  }
4876 
4877 done_prefixes:
4878 
4879  /* REX prefix. */
4880  if (ctxt->rex_prefix & 8)
4881  ctxt->op_bytes = 8; /* REX.W */
4882 
4883  /* Opcode byte(s). */
4884  opcode = opcode_table[ctxt->b];
4885  /* Two-byte opcode? */
4886  if (ctxt->b == 0x0f) {
4887  ctxt->opcode_len = 2;
4888  ctxt->b = insn_fetch(u8, ctxt);
4889  opcode = twobyte_table[ctxt->b];
4890 
4891  /* 0F_38 opcode map */
4892  if (ctxt->b == 0x38) {
4893  ctxt->opcode_len = 3;
4894  ctxt->b = insn_fetch(u8, ctxt);
4895  opcode = opcode_map_0f_38[ctxt->b];
4896  }
4897  }
4898  ctxt->d = opcode.flags;
4899 
4900  if (ctxt->d & ModRM)
4901  ctxt->modrm = insn_fetch(u8, ctxt);
4902 
4903  /* vex-prefix instructions are not implemented */
4904  if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4905  (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4906  ctxt->d = NotImpl;
4907  }
4908 
4909  while (ctxt->d & GroupMask) {
4910  switch (ctxt->d & GroupMask) {
4911  case Group:
4912  goffset = (ctxt->modrm >> 3) & 7;
4913  opcode = opcode.u.group[goffset];
4914  break;
4915  case GroupDual:
4916  goffset = (ctxt->modrm >> 3) & 7;
4917  if ((ctxt->modrm >> 6) == 3)
4918  opcode = opcode.u.gdual->mod3[goffset];
4919  else
4920  opcode = opcode.u.gdual->mod012[goffset];
4921  break;
4922  case RMExt:
4923  goffset = ctxt->modrm & 7;
4924  opcode = opcode.u.group[goffset];
4925  break;
4926  case Prefix:
4927  if (ctxt->rep_prefix && op_prefix)
4928  return EMULATION_FAILED;
4929  simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4930  switch (simd_prefix) {
4931  case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4932  case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4933  case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4934  case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4935  }
4936  break;
4937  case Escape:
4938  if (ctxt->modrm > 0xbf) {
4939  size_t size = ARRAY_SIZE(opcode.u.esc->high);
4940  u32 index = array_index_nospec(
4941  ctxt->modrm - 0xc0, size);
4942 
4943  opcode = opcode.u.esc->high[index];
4944  } else {
4945  opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4946  }
4947  break;
4948  case InstrDual:
4949  if ((ctxt->modrm >> 6) == 3)
4950  opcode = opcode.u.idual->mod3;
4951  else
4952  opcode = opcode.u.idual->mod012;
4953  break;
4954  case ModeDual:
4955  if (ctxt->mode == X86EMUL_MODE_PROT64)
4956  opcode = opcode.u.mdual->mode64;
4957  else
4958  opcode = opcode.u.mdual->mode32;
4959  break;
4960  default:
4961  return EMULATION_FAILED;
4962  }
4963 
4964  ctxt->d &= ~(u64)GroupMask;
4965  ctxt->d |= opcode.flags;
4966  }
4967 
4968  ctxt->is_branch = opcode.flags & IsBranch;
4969 
4970  /* Unrecognised? */
4971  if (ctxt->d == 0)
4972  return EMULATION_FAILED;
4973 
4974  ctxt->execute = opcode.u.execute;
4975 
4976  if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
4977  likely(!(ctxt->d & EmulateOnUD)))
4978  return EMULATION_FAILED;
4979 
4980  if (unlikely(ctxt->d &
4982  No16))) {
4983  /*
4984  * These are copied unconditionally here, and checked unconditionally
4985  * in x86_emulate_insn.
4986  */
4987  ctxt->check_perm = opcode.check_perm;
4988  ctxt->intercept = opcode.intercept;
4989 
4990  if (ctxt->d & NotImpl)
4991  return EMULATION_FAILED;
4992 
4993  if (mode == X86EMUL_MODE_PROT64) {
4994  if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4995  ctxt->op_bytes = 8;
4996  else if (ctxt->d & NearBranch)
4997  ctxt->op_bytes = 8;
4998  }
4999 
5000  if (ctxt->d & Op3264) {
5001  if (mode == X86EMUL_MODE_PROT64)
5002  ctxt->op_bytes = 8;
5003  else
5004  ctxt->op_bytes = 4;
5005  }
5006 
5007  if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5008  ctxt->op_bytes = 4;
5009 
5010  if (ctxt->d & Sse)
5011  ctxt->op_bytes = 16;
5012  else if (ctxt->d & Mmx)
5013  ctxt->op_bytes = 8;
5014  }
5015 
5016  /* ModRM and SIB bytes. */
5017  if (ctxt->d & ModRM) {
5018  rc = decode_modrm(ctxt, &ctxt->memop);
5019  if (!has_seg_override) {
5020  has_seg_override = true;
5021  ctxt->seg_override = ctxt->modrm_seg;
5022  }
5023  } else if (ctxt->d & MemAbs)
5024  rc = decode_abs(ctxt, &ctxt->memop);
5025  if (rc != X86EMUL_CONTINUE)
5026  goto done;
5027 
5028  if (!has_seg_override)
5029  ctxt->seg_override = VCPU_SREG_DS;
5030 
5031  ctxt->memop.addr.mem.seg = ctxt->seg_override;
5032 
5033  /*
5034  * Decode and fetch the source operand: register, memory
5035  * or immediate.
5036  */
5037  rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5038  if (rc != X86EMUL_CONTINUE)
5039  goto done;
5040 
5041  /*
5042  * Decode and fetch the second source operand: register, memory
5043  * or immediate.
5044  */
5045  rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5046  if (rc != X86EMUL_CONTINUE)
5047  goto done;
5048 
5049  /* Decode and fetch the destination operand: register or memory. */
5050  rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5051 
5052  if (ctxt->rip_relative && likely(ctxt->memopp))
5053  ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5054  ctxt->memopp->addr.mem.ea + ctxt->_eip);
5055 
5056 done:
5057  if (rc == X86EMUL_PROPAGATE_FAULT)
5058  ctxt->have_exception = true;
5059  return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5060 }
5061 
5063 {
5064  return ctxt->d & PageTable;
5065 }
5066 
5067 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5068 {
5069  /* The second termination condition only applies for REPE
5070  * and REPNE. Test if the repeat string operation prefix is
5071  * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5072  * corresponding termination condition according to:
5073  * - if REPE/REPZ and ZF = 0 then done
5074  * - if REPNE/REPNZ and ZF = 1 then done
5075  */
5076  if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5077  (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5078  && (((ctxt->rep_prefix == REPE_PREFIX) &&
5079  ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5080  || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5081  ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5082  return true;
5083 
5084  return false;
5085 }
5086 
5088 {
5089  int rc;
5090 
5091  kvm_fpu_get();
5092  rc = asm_safe("fwait");
5093  kvm_fpu_put();
5094 
5095  if (unlikely(rc != X86EMUL_CONTINUE))
5096  return emulate_exception(ctxt, MF_VECTOR, 0, false);
5097 
5098  return X86EMUL_CONTINUE;
5099 }
5100 
5101 static void fetch_possible_mmx_operand(struct operand *op)
5102 {
5103  if (op->type == OP_MM)
5104  kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5105 }
5106 
5107 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5108 {
5109  ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5110 
5111  if (!(ctxt->d & ByteOp))
5112  fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5113 
5114  asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5115  : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5116  [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5117  : "c"(ctxt->src2.val));
5118 
5119  ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5120  if (!fop) /* exception is returned in fop variable */
5121  return emulate_de(ctxt);
5122  return X86EMUL_CONTINUE;
5123 }
5124 
5126 {
5127  /* Clear fields that are set conditionally but read without a guard. */
5128  ctxt->rip_relative = false;
5129  ctxt->rex_prefix = 0;
5130  ctxt->lock_prefix = 0;
5131  ctxt->rep_prefix = 0;
5132  ctxt->regs_valid = 0;
5133  ctxt->regs_dirty = 0;
5134 
5135  ctxt->io_read.pos = 0;
5136  ctxt->io_read.end = 0;
5137  ctxt->mem_read.end = 0;
5138 }
5139 
5141 {
5142  const struct x86_emulate_ops *ops = ctxt->ops;
5143  int rc = X86EMUL_CONTINUE;
5144  int saved_dst_type = ctxt->dst.type;
5145  bool is_guest_mode = ctxt->ops->is_guest_mode(ctxt);
5146 
5147  ctxt->mem_read.pos = 0;
5148 
5149  /* LOCK prefix is allowed only with some instructions */
5150  if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5151  rc = emulate_ud(ctxt);
5152  goto done;
5153  }
5154 
5155  if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5156  rc = emulate_ud(ctxt);
5157  goto done;
5158  }
5159 
5160  if (unlikely(ctxt->d &
5162  if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5163  (ctxt->d & Undefined)) {
5164  rc = emulate_ud(ctxt);
5165  goto done;
5166  }
5167 
5168  if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5169  || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5170  rc = emulate_ud(ctxt);
5171  goto done;
5172  }
5173 
5174  if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5175  rc = emulate_nm(ctxt);
5176  goto done;
5177  }
5178 
5179  if (ctxt->d & Mmx) {
5180  rc = flush_pending_x87_faults(ctxt);
5181  if (rc != X86EMUL_CONTINUE)
5182  goto done;
5183  /*
5184  * Now that we know the fpu is exception safe, we can fetch
5185  * operands from it.
5186  */
5189  if (!(ctxt->d & Mov))
5191  }
5192 
5193  if (unlikely(is_guest_mode) && ctxt->intercept) {
5194  rc = emulator_check_intercept(ctxt, ctxt->intercept,
5196  if (rc != X86EMUL_CONTINUE)
5197  goto done;
5198  }
5199 
5200  /* Instruction can only be executed in protected mode */
5201  if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5202  rc = emulate_ud(ctxt);
5203  goto done;
5204  }
5205 
5206  /* Privileged instruction can be executed only in CPL=0 */
5207  if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5208  if (ctxt->d & PrivUD)
5209  rc = emulate_ud(ctxt);
5210  else
5211  rc = emulate_gp(ctxt, 0);
5212  goto done;
5213  }
5214 
5215  /* Do instruction specific permission checks */
5216  if (ctxt->d & CheckPerm) {
5217  rc = ctxt->check_perm(ctxt);
5218  if (rc != X86EMUL_CONTINUE)
5219  goto done;
5220  }
5221 
5222  if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5223  rc = emulator_check_intercept(ctxt, ctxt->intercept,
5225  if (rc != X86EMUL_CONTINUE)
5226  goto done;
5227  }
5228 
5229  if (ctxt->rep_prefix && (ctxt->d & String)) {
5230  /* All REP prefixes have the same first termination condition */
5231  if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5232  string_registers_quirk(ctxt);
5233  ctxt->eip = ctxt->_eip;
5234  ctxt->eflags &= ~X86_EFLAGS_RF;
5235  goto done;
5236  }
5237  }
5238  }
5239 
5240  if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5241  rc = segmented_read(ctxt, ctxt->src.addr.mem,
5242  ctxt->src.valptr, ctxt->src.bytes);
5243  if (rc != X86EMUL_CONTINUE)
5244  goto done;
5245  ctxt->src.orig_val64 = ctxt->src.val64;
5246  }
5247 
5248  if (ctxt->src2.type == OP_MEM) {
5249  rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5250  &ctxt->src2.val, ctxt->src2.bytes);
5251  if (rc != X86EMUL_CONTINUE)
5252  goto done;
5253  }
5254 
5255  if ((ctxt->d & DstMask) == ImplicitOps)
5256  goto special_insn;
5257 
5258 
5259  if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5260  /* optimisation - avoid slow emulated read if Mov */
5261  rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5262  &ctxt->dst.val, ctxt->dst.bytes);
5263  if (rc != X86EMUL_CONTINUE) {
5264  if (!(ctxt->d & NoWrite) &&
5265  rc == X86EMUL_PROPAGATE_FAULT &&
5266  ctxt->exception.vector == PF_VECTOR)
5267  ctxt->exception.error_code |= PFERR_WRITE_MASK;
5268  goto done;
5269  }
5270  }
5271  /* Copy full 64-bit value for CMPXCHG8B. */
5272  ctxt->dst.orig_val64 = ctxt->dst.val64;
5273 
5274 special_insn:
5275 
5276  if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5277  rc = emulator_check_intercept(ctxt, ctxt->intercept,
5279  if (rc != X86EMUL_CONTINUE)
5280  goto done;
5281  }
5282 
5283  if (ctxt->rep_prefix && (ctxt->d & String))
5284  ctxt->eflags |= X86_EFLAGS_RF;
5285  else
5286  ctxt->eflags &= ~X86_EFLAGS_RF;
5287 
5288  if (ctxt->execute) {
5289  if (ctxt->d & Fastop)
5290  rc = fastop(ctxt, ctxt->fop);
5291  else
5292  rc = ctxt->execute(ctxt);
5293  if (rc != X86EMUL_CONTINUE)
5294  goto done;
5295  goto writeback;
5296  }
5297 
5298  if (ctxt->opcode_len == 2)
5299  goto twobyte_insn;
5300  else if (ctxt->opcode_len == 3)
5301  goto threebyte_insn;
5302 
5303  switch (ctxt->b) {
5304  case 0x70 ... 0x7f: /* jcc (short) */
5305  if (test_cc(ctxt->b, ctxt->eflags))
5306  rc = jmp_rel(ctxt, ctxt->src.val);
5307  break;
5308  case 0x8d: /* lea r16/r32, m */
5309  ctxt->dst.val = ctxt->src.addr.mem.ea;
5310  break;
5311  case 0x90 ... 0x97: /* nop / xchg reg, rax */
5312  if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5313  ctxt->dst.type = OP_NONE;
5314  else
5315  rc = em_xchg(ctxt);
5316  break;
5317  case 0x98: /* cbw/cwde/cdqe */
5318  switch (ctxt->op_bytes) {
5319  case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5320  case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5321  case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5322  }
5323  break;
5324  case 0xcc: /* int3 */
5325  rc = emulate_int(ctxt, 3);
5326  break;
5327  case 0xcd: /* int n */
5328  rc = emulate_int(ctxt, ctxt->src.val);
5329  break;
5330  case 0xce: /* into */
5331  if (ctxt->eflags & X86_EFLAGS_OF)
5332  rc = emulate_int(ctxt, 4);
5333  break;
5334  case 0xe9: /* jmp rel */
5335  case 0xeb: /* jmp rel short */
5336  rc = jmp_rel(ctxt, ctxt->src.val);
5337  ctxt->dst.type = OP_NONE; /* Disable writeback. */
5338  break;
5339  case 0xf4: /* hlt */
5340  ctxt->ops->halt(ctxt);
5341  break;
5342  case 0xf5: /* cmc */
5343  /* complement carry flag from eflags reg */
5344  ctxt->eflags ^= X86_EFLAGS_CF;
5345  break;
5346  case 0xf8: /* clc */
5347  ctxt->eflags &= ~X86_EFLAGS_CF;
5348  break;
5349  case 0xf9: /* stc */
5350  ctxt->eflags |= X86_EFLAGS_CF;
5351  break;
5352  case 0xfc: /* cld */
5353  ctxt->eflags &= ~X86_EFLAGS_DF;
5354  break;
5355  case 0xfd: /* std */
5356  ctxt->eflags |= X86_EFLAGS_DF;
5357  break;
5358  default:
5359  goto cannot_emulate;
5360  }
5361 
5362  if (rc != X86EMUL_CONTINUE)
5363  goto done;
5364 
5365 writeback:
5366  if (ctxt->d & SrcWrite) {
5367  BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5368  rc = writeback(ctxt, &ctxt->src);
5369  if (rc != X86EMUL_CONTINUE)
5370  goto done;
5371  }
5372  if (!(ctxt->d & NoWrite)) {
5373  rc = writeback(ctxt, &ctxt->dst);
5374  if (rc != X86EMUL_CONTINUE)
5375  goto done;
5376  }
5377 
5378  /*
5379  * restore dst type in case the decoding will be reused
5380  * (happens for string instruction )
5381  */
5382  ctxt->dst.type = saved_dst_type;
5383 
5384  if ((ctxt->d & SrcMask) == SrcSI)
5385  string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5386 
5387  if ((ctxt->d & DstMask) == DstDI)
5388  string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5389 
5390  if (ctxt->rep_prefix && (ctxt->d & String)) {
5391  unsigned int count;
5392  struct read_cache *r = &ctxt->io_read;
5393  if ((ctxt->d & SrcMask) == SrcSI)
5394  count = ctxt->src.count;
5395  else
5396  count = ctxt->dst.count;
5397  register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5398 
5399  if (!string_insn_completed(ctxt)) {
5400  /*
5401  * Re-enter guest when pio read ahead buffer is empty
5402  * or, if it is not used, after each 1024 iteration.
5403  */
5404  if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5405  (r->end == 0 || r->end != r->pos)) {
5406  /*
5407  * Reset read cache. Usually happens before
5408  * decode, but since instruction is restarted
5409  * we have to do it here.
5410  */
5411  ctxt->mem_read.end = 0;
5412  writeback_registers(ctxt);
5413  return EMULATION_RESTART;
5414  }
5415  goto done; /* skip rip writeback */
5416  }
5417  ctxt->eflags &= ~X86_EFLAGS_RF;
5418  }
5419 
5420  ctxt->eip = ctxt->_eip;
5421  if (ctxt->mode != X86EMUL_MODE_PROT64)
5422  ctxt->eip = (u32)ctxt->_eip;
5423 
5424 done:
5425  if (rc == X86EMUL_PROPAGATE_FAULT) {
5426  if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt))
5427  return EMULATION_FAILED;
5428  ctxt->have_exception = true;
5429  }
5430  if (rc == X86EMUL_INTERCEPTED)
5431  return EMULATION_INTERCEPTED;
5432 
5433  if (rc == X86EMUL_CONTINUE)
5434  writeback_registers(ctxt);
5435 
5437 
5438 twobyte_insn:
5439  switch (ctxt->b) {
5440  case 0x09: /* wbinvd */
5441  (ctxt->ops->wbinvd)(ctxt);
5442  break;
5443  case 0x08: /* invd */
5444  case 0x0d: /* GrpP (prefetch) */
5445  case 0x18: /* Grp16 (prefetch/nop) */
5446  case 0x1f: /* nop */
5447  break;
5448  case 0x20: /* mov cr, reg */
5449  ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5450  break;
5451  case 0x21: /* mov from dr to reg */
5452  ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5453  break;
5454  case 0x40 ... 0x4f: /* cmov */
5455  if (test_cc(ctxt->b, ctxt->eflags))
5456  ctxt->dst.val = ctxt->src.val;
5457  else if (ctxt->op_bytes != 4)
5458  ctxt->dst.type = OP_NONE; /* no writeback */
5459  break;
5460  case 0x80 ... 0x8f: /* jnz rel, etc*/
5461  if (test_cc(ctxt->b, ctxt->eflags))
5462  rc = jmp_rel(ctxt, ctxt->src.val);
5463  break;
5464  case 0x90 ... 0x9f: /* setcc r/m8 */
5465  ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5466  break;
5467  case 0xb6 ... 0xb7: /* movzx */
5468  ctxt->dst.bytes = ctxt->op_bytes;
5469  ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5470  : (u16) ctxt->src.val;
5471  break;
5472  case 0xbe ... 0xbf: /* movsx */
5473  ctxt->dst.bytes = ctxt->op_bytes;
5474  ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5475  (s16) ctxt->src.val;
5476  break;
5477  default:
5478  goto cannot_emulate;
5479  }
5480 
5481 threebyte_insn:
5482 
5483  if (rc != X86EMUL_CONTINUE)
5484  goto done;
5485 
5486  goto writeback;
5487 
5488 cannot_emulate:
5489  return EMULATION_FAILED;
5490 }
5491 
5493 {
5494  invalidate_registers(ctxt);
5495 }
5496 
5498 {
5499  writeback_registers(ctxt);
5500 }
5501 
5503 {
5504  if (ctxt->rep_prefix && (ctxt->d & String))
5505  return false;
5506 
5507  if (ctxt->d & TwoMemOp)
5508  return false;
5509 
5510  return true;
5511 }
static const struct group_dual group9
Definition: emulate.c:4151
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5140
static const struct opcode group7_rm7[]
Definition: emulate.c:4049
static unsigned long register_address(struct x86_emulate_ctxt *ctxt, int reg)
Definition: emulate.c:543
#define N
Definition: emulate.c:3993
static const struct mode_dual mode_dual_63
Definition: emulate.c:4266
static int em_out(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3596
#define DstShift
Definition: emulate.c:85
static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1057
#define GD(_f, _g)
Definition: emulate.c:3996
static void writeback_registers(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:246
#define Lock
Definition: emulate.c:143
static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3357
static int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
Definition: emulate.c:764
#define FASTOP_SIZE
Definition: emulate.c:293
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3878
static int em_aad(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3110
#define VMWARE_PORT_VMRPC
Definition: emulate.c:2583
#define OpImmUByte
Definition: emulate.c:45
static int em_syscall(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2399
static unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Definition: emulate.c:534
#define Op3264
Definition: emulate.c:141
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2208
static int stack_size(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:527
static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
Definition: emulate.c:3511
#define NoMod
Definition: emulate.c:172
#define E(_f, _e)
Definition: emulate.c:3999
static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1121
#define OpImm
Definition: emulate.c:50
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3535
static const struct escape escape_d9
Definition: emulate.c:4199
static int check_dr_write(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3903
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2364
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2355
static int em_leave(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1934
#define X8(x...)
Definition: emulate.c:190
#define F6ALU(_f, _e)
Definition: emulate.c:4016
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
Definition: emulate.c:628
#define DstReg
Definition: emulate.c:87
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss)
Definition: emulate.c:2777
#define SrcAcc
Definition: emulate.c:113
static int decode_abs(struct x86_emulate_ctxt *ctxt, struct operand *op)
Definition: emulate.c:1313
#define OpCL
Definition: emulate.c:47
#define Src2Mem
Definition: emulate.c:151
static int em_sti(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3614
static int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
Definition: emulate.c:827
static int em_hypercall(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3464
#define OpES
Definition: emulate.c:58
#define FOP_SETCC(op)
Definition: emulate.c:420
#define insn_fetch(_type, _ctxt)
Definition: emulate.c:948
static const struct gprefix pfx_0f_10_0f_11
Definition: emulate.c:4187
#define OpDS
Definition: emulate.c:61
#define OpAccHi
Definition: emulate.c:68
static const struct opcode group7_rm3[]
Definition: emulate.c:4038
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss)
Definition: emulate.c:2686
static const struct opcode group11[]
Definition: emulate.c:4158
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_ptr *dt)
Definition: emulate.c:1475
#define OpMask
Definition: emulate.c:71
static int em_lahf(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3658
#define Src2FS
Definition: emulate.c:160
#define OpAccLo
Definition: emulate.c:67
static u32 desc_limit_scaled(struct desc_struct *desc)
Definition: emulate.c:566
#define OpImplicit
Definition: emulate.c:39
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss)
Definition: emulate.c:2665
static int em_mov(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3260
static void assign_masked(ulong *dest, ulong src, ulong mask)
Definition: emulate.c:487
static const struct instr_dual instr_dual_8d
Definition: emulate.c:4270
static const struct opcode group1A[]
Definition: emulate.c:4066
#define Src2GS
Definition: emulate.c:161
static void fetch_possible_mmx_operand(struct operand *op)
Definition: emulate.c:5101
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
Definition: emulate.c:1065
#define DstAcc
Definition: emulate.c:89
#define No64
Definition: emulate.c:145
#define SrcImm64
Definition: emulate.c:115
static int check_svme(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3914
#define Sse
Definition: emulate.c:132
static int segmented_write(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *data, unsigned size)
Definition: emulate.c:1395
#define IncSP
Definition: emulate.c:178
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, u32 error, bool valid)
Definition: emulate.c:581
#define D(_y)
Definition: emulate.c:3989
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
Definition: emulate.c:5107
#define X3(x...)
Definition: emulate.c:185
static int linear_write_system(struct x86_emulate_ctxt *ctxt, ulong linear, void *data, unsigned int size)
Definition: emulate.c:853
#define X4(x...)
Definition: emulate.c:186
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2278
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
Definition: emulate.c:573
#define Aligned
Definition: emulate.c:165
#define Aligned16
Definition: emulate.c:168
static int em_call_far(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3140
static int em_clts(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3454
static int em_lldt(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3416
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1954
#define FASTOP1(op)
Definition: emulate.c:339
#define SrcMem8
Definition: emulate.c:117
#define FOP_START(op)
Definition: emulate.c:319
static const struct group_dual group15
Definition: emulate.c:4167
static int linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned size, bool write, ulong *linear)
Definition: emulate.c:754
static const struct gprefix pfx_0f_c7_7
Definition: emulate.c:4146
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code)
Definition: emulate.c:2919
#define FOP_FUNC(name)
Definition: emulate.c:302
static int emulate_db(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:593
static int em_pusha(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1973
#define MD(_f, _m)
Definition: emulate.c:3998
#define PrivUD
Definition: emulate.c:175
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2641
x86_transfer_type
Definition: emulate.c:239
@ X86_TRANSFER_NONE
Definition: emulate.c:240
@ X86_TRANSFER_TASK_SWITCH
Definition: emulate.c:243
@ X86_TRANSFER_CALL_JMP
Definition: emulate.c:241
@ X86_TRANSFER_RET
Definition: emulate.c:242
static const struct opcode group7_rm2[]
Definition: emulate.c:4032
static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3926
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
Definition: emulate.c:832
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2458
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3239
#define DstMem64
Definition: emulate.c:91
static int em_rdpid(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3227
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:4547
static void write_register_operand(struct operand *op)
Definition: emulate.c:1780
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Definition: emulate.c:1785
#define OpGS
Definition: emulate.c:63
static int em_cli(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3605
#define OpFS
Definition: emulate.c:62
#define FASTOP2CL(op)
Definition: emulate.c:388
#define SrcMemFAddr
Definition: emulate.c:112
static void masked_increment(ulong *reg, ulong mask, int inc)
Definition: emulate.c:548
#define InstrDual
Definition: emulate.c:130
#define ImplicitOps
Definition: emulate.c:86
static bool emulator_io_permitted(struct x86_emulate_ctxt *ctxt, u16 port, u16 len)
Definition: emulate.c:2626
#define GP(_f, _g)
Definition: emulate.c:4007
static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3832
#define SrcImmFAddr
Definition: emulate.c:111
static int emulate_popf(struct x86_emulate_ctxt *ctxt, void *dest, int len)
Definition: emulate.c:1862
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, void(*get)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *ptr))
Definition: emulate.c:3478
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
Definition: emulate.c:613
#define Mmx
Definition: emulate.c:163
static int em_movbe(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3266
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3439
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:608
static int em_loop(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3565
static int em_pop(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1857
#define IIP(_f, _e, _i, _p)
Definition: emulate.c:4004
#define I2bvIP(_f, _e, _i, _p)
Definition: emulate.c:4013
#define Prot
Definition: emulate.c:138
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned size, bool sign_extension)
Definition: emulate.c:4557
static int em_fninit(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1094
#define SrcMem16
Definition: emulate.c:102
static const struct opcode group2[]
Definition: emulate.c:4070
static const struct instr_dual instr_dual_0f_38_f0
Definition: emulate.c:4499
#define SrcSI
Definition: emulate.c:109
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op)
Definition: emulate.c:3041
#define NearBranch
Definition: emulate.c:176
static int em_enter(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1912
static const struct gprefix three_byte_0f_38_f1
Definition: emulate.c:4511
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3937
#define I2bv(_f, _e)
Definition: emulate.c:4011
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2166
#define SrcXLat
Definition: emulate.c:110
#define NoAccess
Definition: emulate.c:140
static int em_iret(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2151
static int em_xchg(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3199
#define Src2CS
Definition: emulate.c:157
#define OpReg
Definition: emulate.c:40
#define FASTOP1SRC2EX(op, name)
Definition: emulate.c:357
static const struct opcode group6[]
Definition: emulate.c:4108
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2267
#define OpImmU16
Definition: emulate.c:57
sbb al
Definition: emulate.c:446
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3869
static int em_push(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1834
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Definition: emulate.c:2080
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3388
#define OpOne
Definition: emulate.c:49
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1334
#define OpMem16
Definition: emulate.c:51
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned d)
Definition: emulate.c:4597
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3753
static void fetch_register_operand(struct operand *op)
Definition: emulate.c:1076
static int em_str(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3425
#define OpMem
Definition: emulate.c:41
static void register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Definition: emulate.c:554
#define ModRM
Definition: emulate.c:134
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, u16 port, u16 len)
Definition: emulate.c:2585
#define OpMem8
Definition: emulate.c:64
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3396
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
Definition: emulate.c:1170
static int em_sahf(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3645
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:623
#define FOP_RET(name)
Definition: emulate.c:309
#define BitOp
Definition: emulate.c:120
static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1049
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3249
static int em_sldt(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3411
static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes)
Definition: emulate.c:988
#define CheckPerm
Definition: emulate.c:174
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc)
Definition: emulate.c:1539
static int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:781
#define IsBranch
Definition: emulate.c:180
#define DI(_y, _i)
Definition: emulate.c:3990
static int check_fxsr(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3698
static const struct opcode twobyte_table[256]
Definition: emulate.c:4401
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, unsigned int size, unsigned short port, void *dest)
Definition: emulate.c:1425
static const struct group_dual group7
Definition: emulate.c:4116
#define SrcWrite
Definition: emulate.c:171
static int segmented_write_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned int size)
Definition: emulate.c:874
static int em_lidt(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3540
static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, unsigned seg)
Definition: emulate.c:637
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3341
#define OpImmByte
Definition: emulate.c:48
#define FASTOP1SRC2(op, name)
Definition: emulate.c:348
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5087
static int em_pushf(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1993
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3980
static const struct gprefix pfx_0f_28_0f_29
Definition: emulate.c:4191
#define Src2SS
Definition: emulate.c:158
#define OpImmFAddr
Definition: emulate.c:55
#define SrcReg
Definition: emulate.c:100
#define X16(x...)
Definition: emulate.c:191
static void * decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, int byteop)
Definition: emulate.c:975
static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
Definition: emulate.c:668
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3299
#define MemAbs
Definition: emulate.c:121
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt, u16 index, struct desc_struct *desc)
Definition: emulate.c:1460
#define SrcImmByte
Definition: emulate.c:105
static const struct instr_dual instr_dual_0f_2b
Definition: emulate.c:4179
static noinline int fxregs_fixup(struct fxregs_state *fx_state, const size_t used_size)
Definition: emulate.c:3782
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5067
#define Stack
Definition: emulate.c:123
#define F(_f, _e)
Definition: emulate.c:4001
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5492
#define OpDI
Definition: emulate.c:43
#define OpMem32
Definition: emulate.c:52
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, unsigned size)
Definition: emulate.c:936
static const struct escape escape_db
Definition: emulate.c:4220
#define Src2ImmByte
Definition: emulate.c:153
static const struct opcode opcode_map_0f_38[256]
Definition: emulate.c:4519
static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2189
#define II(_f, _e, _i)
Definition: emulate.c:4002
static void assign_register(unsigned long *reg, u64 val, int bytes)
Definition: emulate.c:492
#define OpImmU
Definition: emulate.c:53
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3184
#define FOP_END
Definition: emulate.c:321
#define Src2Shift
Definition: emulate.c:149
#define I(_f, _e)
Definition: emulate.c:4000
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:516
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg)
Definition: emulate.c:1757
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5125
static bool valid_cr(int nr)
Definition: emulate.c:3849
#define OpMem64
Definition: emulate.c:44
#define SrcImmU16
Definition: emulate.c:114
#define DIP(_y, _i, _p)
Definition: emulate.c:3991
#define EmulateOnUD
Definition: emulate.c:139
#define Undefined
Definition: emulate.c:142
#define Src2DS
Definition: emulate.c:159
static int em_das(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3050
#define OpCS
Definition: emulate.c:59
#define OpDX
Definition: emulate.c:46
#define TwoMemOp
Definition: emulate.c:179
#define SrcImm
Definition: emulate.c:104
static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1105
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, ulong *desc_addr_p)
Definition: emulate.c:1525
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss)
Definition: emulate.c:2800
#define EXT(_f, _e)
Definition: emulate.c:3994
#define No16
Definition: emulate.c:177
#define AlignMask
Definition: emulate.c:164
static int em_ret(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2229
static const struct opcode group8[]
Definition: emulate.c:4134
#define SrcImmUByte
Definition: emulate.c:107
static int em_rsm(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2318
static const struct opcode group5[]
Definition: emulate.c:4098
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Definition: emulate.c:2020
static int em_cwd(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3217
static const struct gprefix three_byte_0f_38_f0
Definition: emulate.c:4507
#define Fastop
Definition: emulate.c:169
static int em_popa(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1999
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage)
Definition: emulate.c:466
pushf
Definition: emulate.c:446
#define FASTOP2W(op)
Definition: emulate.c:379
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2511
static const struct instr_dual instr_dual_0f_c3
Definition: emulate.c:4262
static int em_popf(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1904
#define Avx
Definition: emulate.c:167
#define FASTOP2R(op, name)
Definition: emulate.c:397
static const struct opcode group4[]
Definition: emulate.c:4092
#define Src2CL
Definition: emulate.c:152
#define Priv
Definition: emulate.c:144
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
Definition: emulate.c:561
#define Mov
Definition: emulate.c:136
static size_t __fxstate_size(int nregs)
Definition: emulate.c:3720
#define F2bv(_f, _e)
Definition: emulate.c:4012
#define SrcShift
Definition: emulate.c:98
#define SrcMem32
Definition: emulate.c:103
#define SrcNone
Definition: emulate.c:99
#define OpSI
Definition: emulate.c:54
#define OpAcc
Definition: emulate.c:42
static int em_jcxz(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3577
static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
Definition: emulate.c:3375
static const struct escape escape_dd
Definition: emulate.c:4241
#define DstDX
Definition: emulate.c:94
static int check_cr_access(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3861
static const struct opcode group3[]
Definition: emulate.c:4081
static int segmented_read(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size)
Definition: emulate.c:1381
#define GroupDual
Definition: emulate.c:126
static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear, void *data, unsigned size)
Definition: emulate.c:847
static int em_movsxd(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3692
#define OpMemFAddr
Definition: emulate.c:56
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Definition: emulate.c:598
#define DstMem
Definition: emulate.c:88
#define SrcMask
Definition: emulate.c:119
static int segmented_read_std(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size)
Definition: emulate.c:860
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3624
#define NotImpl
Definition: emulate.c:147
#define SrcMem
Definition: emulate.c:101
static int emulate_de(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:618
#define asm_safe(insn, inoutclob...)
Definition: emulate.c:454
#define X7(x...)
Definition: emulate.c:189
static int em_aam(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3088
static u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:648
static int em_bswap(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3665
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Definition: emulate.c:2069
#define ModeDual
Definition: emulate.c:131
#define FASTOP3WCL(op)
Definition: emulate.c:411
static int em_ltr(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3430
int emulator_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code)
Definition: emulate.c:3020
#define PageTable
Definition: emulate.c:146
static const struct gprefix pfx_0f_2b
Definition: emulate.c:4183
static const struct gprefix pfx_0f_ae_7
Definition: emulate.c:4163
#define OpXLat
Definition: emulate.c:66
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Definition: emulate.c:892
static size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3725
#define OpSS
Definition: emulate.c:60
#define RMExt
Definition: emulate.c:128
static int em_call(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3128
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3795
#define String
Definition: emulate.c:122
#define NoWrite
Definition: emulate.c:170
#define Src2One
Definition: emulate.c:154
static int task_switch_16(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc)
Definition: emulate.c:2743
static int em_lseg(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2302
static const struct gprefix pfx_0f_6f_0f_7f
Definition: emulate.c:4175
static void decode_register_operand(struct x86_emulate_ctxt *ctxt, struct operand *op)
Definition: emulate.c:1137
static int task_switch_32(struct x86_emulate_ctxt *ctxt, u16 old_tss_sel, ulong old_tss_base, struct desc_struct *new_desc)
Definition: emulate.c:2881
#define SrcDX
Definition: emulate.c:116
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:1941
static unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:511
#define VMWARE_PORT_VMPORT
Definition: emulate.c:2582
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, enum x86emul_mode mode, ulong *linear, unsigned int flags)
Definition: emulate.c:687
static const struct instr_dual instr_dual_0f_38_f1
Definition: emulate.c:4503
#define DstXacc
Definition: emulate.c:182
#define insn_fetch_arr(_arr, _size, _ctxt)
Definition: emulate.c:960
static int em_smsw(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3545
#define DstDI
Definition: emulate.c:90
static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, const void *orig_data, const void *data, unsigned size)
Definition: emulate.c:1410
static const struct opcode group7_rm0[]
Definition: emulate.c:4020
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2095
static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op)
Definition: emulate.c:1176
static int read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *dest, unsigned size)
Definition: emulate.c:1356
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2241
static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt, u16 selector, ulong *desc_addr_p)
Definition: emulate.c:1496
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2571
void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5497
#define Prefix
Definition: emulate.c:127
#define ID(_f, _i)
Definition: emulate.c:3997
#define EFLG_RESERVED_ZEROS_MASK
Definition: emulate.c:237
static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:255
static bool emul_is_noncanonical_address(u64 la, struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:653
#define G(_f, _g)
Definition: emulate.c:3995
static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2194
static int em_clflush(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3680
static int em_lmsw(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3557
#define DstImmUByte
Definition: emulate.c:93
#define ByteOp
Definition: emulate.c:83
static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3686
static const struct opcode opcode_table[256]
Definition: emulate.c:4274
#define OpImm64
Definition: emulate.c:65
static int emulate_pop(struct x86_emulate_ctxt *ctxt, void *dest, int len)
Definition: emulate.c:1841
#define DstMask
Definition: emulate.c:96
#define Src2Imm
Definition: emulate.c:155
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3971
#define Intercept
Definition: emulate.c:173
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5062
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, enum x86_transfer_type transfer, struct desc_struct *desc)
Definition: emulate.c:1552
static const struct gprefix pfx_0f_e7
Definition: emulate.c:4195
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
Definition: emulate.c:4763
static int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
Definition: emulate.c:842
#define Group
Definition: emulate.c:125
static int em_in(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3587
#define FASTOP2(op)
Definition: emulate.c:370
#define Escape
Definition: emulate.c:129
static int em_sidt(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3506
static const struct opcode group1[]
Definition: emulate.c:4055
#define EFLAGS_MASK
Definition: emulate.c:265
#define Src2ES
Definition: emulate.c:156
bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5502
static const struct opcode group7_rm1[]
Definition: emulate.c:4026
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
Definition: emulate.c:1823
#define DstMem16
Definition: emulate.c:92
static void setup_syscalls_segments(struct desc_struct *cs, struct desc_struct *ss)
Definition: emulate.c:2330
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
Definition: emulate.c:603
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3947
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3501
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3211
#define Unaligned
Definition: emulate.c:166
static int em_dr_write(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3323
#define GroupMask
Definition: emulate.c:124
static void kvm_fpu_get(void)
Definition: fpu.h:98
static void kvm_read_sse_reg(int reg, sse128_t *data)
Definition: fpu.h:112
static void kvm_write_mmx_reg(int reg, const u64 *data)
Definition: fpu.h:133
static void kvm_write_sse_reg(int reg, const sse128_t *data)
Definition: fpu.h:119
static void kvm_fpu_put(void)
Definition: fpu.h:107
static void kvm_read_mmx_reg(int reg, u64 *data)
Definition: fpu.h:126
static bool is_guest_mode(struct kvm_vcpu *vcpu)
#define X86EMUL_F_INVLPG
Definition: kvm_emulate.h:95
#define X86EMUL_PROPAGATE_FAULT
Definition: kvm_emulate.h:85
#define EMULATION_INTERCEPTED
Definition: kvm_emulate.h:508
#define NR_EMULATOR_GPRS
Definition: kvm_emulate.h:304
x86_intercept
Definition: kvm_emulate.h:442
@ x86_intercept_none
Definition: kvm_emulate.h:443
static bool is_guest_vendor_hygon(u32 ebx, u32 ecx, u32 edx)
Definition: kvm_emulate.h:428
void(* fastop_t)(struct fastop *)
Definition: kvm_emulate.h:293
#define REPE_PREFIX
Definition: kvm_emulate.h:387
#define X86EMUL_F_WRITE
Definition: kvm_emulate.h:92
#define X86EMUL_UNHANDLEABLE
Definition: kvm_emulate.h:83
#define X86EMUL_CONTINUE
Definition: kvm_emulate.h:81
static bool is_guest_vendor_intel(u32 ebx, u32 ecx, u32 edx)
Definition: kvm_emulate.h:411
static ulong * reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
Definition: kvm_emulate.h:531
#define X86EMUL_F_FETCH
Definition: kvm_emulate.h:93
#define EMULATION_RESTART
Definition: kvm_emulate.h:507
#define EMULATION_OK
Definition: kvm_emulate.h:506
#define X86EMUL_INTERCEPTED
Definition: kvm_emulate.h:89
#define KVM_EMULATOR_BUG_ON(cond, ctxt)
Definition: kvm_emulate.h:377
x86emul_mode
Definition: kvm_emulate.h:279
@ X86EMUL_MODE_PROT64
Definition: kvm_emulate.h:284
@ X86EMUL_MODE_VM86
Definition: kvm_emulate.h:281
@ X86EMUL_MODE_REAL
Definition: kvm_emulate.h:280
@ X86EMUL_MODE_PROT32
Definition: kvm_emulate.h:283
@ X86EMUL_MODE_PROT16
Definition: kvm_emulate.h:282
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
Definition: kvm_emulate.h:519
static bool is_guest_vendor_amd(u32 ebx, u32 ecx, u32 edx)
Definition: kvm_emulate.h:418
static ulong * reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
Definition: kvm_emulate.h:544
#define EMULATION_FAILED
Definition: kvm_emulate.h:505
#define X86EMUL_IO_NEEDED
Definition: kvm_emulate.h:88
#define REPNE_PREFIX
Definition: kvm_emulate.h:388
x86_intercept_stage
Definition: kvm_emulate.h:435
@ X86_ICPT_POST_MEMACCESS
Definition: kvm_emulate.h:439
@ X86_ICPT_PRE_EXCEPT
Definition: kvm_emulate.h:437
@ X86_ICPT_POST_EXCEPT
Definition: kvm_emulate.h:438
bool is_vmware_backdoor_pmc(u32 pmc_idx)
Definition: pmu.c:534
struct opcode high[64]
Definition: emulate.c:224
struct opcode op[8]
Definition: emulate.c:223
u8 data[15]
Definition: kvm_emulate.h:267
struct opcode pfx_no
Definition: emulate.c:216
struct opcode pfx_66
Definition: emulate.c:217
struct opcode pfx_f2
Definition: emulate.c:218
struct opcode pfx_f3
Definition: emulate.c:219
struct opcode mod012[8]
Definition: emulate.c:211
struct opcode mod3[8]
Definition: emulate.c:212
struct opcode mod3
Definition: emulate.c:229
struct opcode mod012
Definition: emulate.c:228
struct opcode mode32
Definition: emulate.c:233
struct opcode mode64
Definition: emulate.c:234
u8 pad[7]
Definition: emulate.c:196
union opcode::@44 u
const struct opcode * group
Definition: emulate.c:199
int(* execute)(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:198
const struct mode_dual * mdual
Definition: emulate.c:204
int(* check_perm)(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:207
u64 flags
Definition: emulate.c:194
const struct escape * esc
Definition: emulate.c:202
const struct group_dual * gdual
Definition: emulate.c:200
const struct instr_dual * idual
Definition: emulate.c:203
const struct gprefix * gprefix
Definition: emulate.c:201
u8 intercept
Definition: emulate.c:195
void(* fastop)(struct fastop *fake)
Definition: emulate.c:205
unsigned int bytes
Definition: kvm_emulate.h:241
unsigned long * reg
Definition: kvm_emulate.h:248
unsigned mm
Definition: kvm_emulate.h:254
enum operand::@0 type
unsigned long orig_val
Definition: kvm_emulate.h:244
char valptr[sizeof(sse128_t)]
Definition: kvm_emulate.h:259
void * data
Definition: kvm_emulate.h:262
unsigned int count
Definition: kvm_emulate.h:242
unsigned long val
Definition: kvm_emulate.h:257
struct operand::@4::segmented_address mem
union operand::@4 addr
u64 mm_val
Definition: kvm_emulate.h:261
sse128_t vec_val
Definition: kvm_emulate.h:260
u64 val64
Definition: kvm_emulate.h:258
u64 orig_val64
Definition: kvm_emulate.h:245
unsigned xmm
Definition: kvm_emulate.h:253
unsigned long end
Definition: kvm_emulate.h:275
unsigned long pos
Definition: kvm_emulate.h:274
u8 data[1024]
Definition: kvm_emulate.h:273
u16 dx
Definition: tss.h:47
u16 ip
Definition: tss.h:43
u16 ss
Definition: tss.h:55
u16 sp
Definition: tss.h:49
u16 bx
Definition: tss.h:48
u16 ax
Definition: tss.h:45
u16 ds
Definition: tss.h:56
u16 si
Definition: tss.h:51
u16 cs
Definition: tss.h:54
u16 prev_task_link
Definition: tss.h:36
u16 cx
Definition: tss.h:46
u16 bp
Definition: tss.h:50
u16 es
Definition: tss.h:53
u16 flag
Definition: tss.h:44
u16 di
Definition: tss.h:52
u16 ldt
Definition: tss.h:57
u32 ds
Definition: tss.h:27
u32 gs
Definition: tss.h:29
u32 fs
Definition: tss.h:28
u32 esp
Definition: tss.h:20
u32 ebx
Definition: tss.h:19
u32 eflags
Definition: tss.h:15
u32 ss
Definition: tss.h:26
u32 esi
Definition: tss.h:22
u32 ecx
Definition: tss.h:17
u32 eip
Definition: tss.h:14
u32 eax
Definition: tss.h:16
u32 prev_task_link
Definition: tss.h:6
u32 cs
Definition: tss.h:25
u32 edx
Definition: tss.h:18
u32 ldt_selector
Definition: tss.h:30
u32 ebp
Definition: tss.h:21
u32 cr3
Definition: tss.h:13
u32 edi
Definition: tss.h:23
u32 es
Definition: tss.h:24
struct operand * memopp
Definition: kvm_emulate.h:370
int(* check_perm)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:344
struct fetch_cache fetch
Definition: kvm_emulate.h:371
unsigned long eflags
Definition: kvm_emulate.h:312
enum x86emul_mode mode
Definition: kvm_emulate.h:315
struct read_cache io_read
Definition: kvm_emulate.h:372
const struct x86_emulate_ops * ops
Definition: kvm_emulate.h:309
struct x86_exception exception
Definition: kvm_emulate.h:324
struct operand src2
Definition: kvm_emulate.h:366
unsigned long _eip
Definition: kvm_emulate.h:362
struct operand src
Definition: kvm_emulate.h:365
struct operand dst
Definition: kvm_emulate.h:367
struct read_cache mem_read
Definition: kvm_emulate.h:373
unsigned long eip
Definition: kvm_emulate.h:313
struct operand memop
Definition: kvm_emulate.h:368
unsigned long _regs[NR_EMULATOR_GPRS]
Definition: kvm_emulate.h:369
int(* execute)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:341
void(* halt)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:213
int(* set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value)
Definition: kvm_emulate.h:207
bool(* guest_has_rdpid)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:224
ulong(* get_cr)(struct x86_emulate_ctxt *ctxt, int cr)
Definition: kvm_emulate.h:203
int(* cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *fault)
Definition: kvm_emulate.h:177
void(* get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest)
Definition: kvm_emulate.h:206
int(* cpl)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:205
unsigned long(* get_cached_segment_base)(struct x86_emulate_ctxt *ctxt, int seg)
Definition: kvm_emulate.h:197
int(* leave_smm)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:230
void(* triple_fault)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:231
bool(* is_smm)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:228
void(* set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked)
Definition: kvm_emulate.h:226
int(* pio_out_emulated)(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, const void *val, unsigned int count)
Definition: kvm_emulate.h:189
int(* get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata)
Definition: kvm_emulate.h:210
void(* write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
Definition: kvm_emulate.h:111
int(* pio_in_emulated)(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, void *val, unsigned int count)
Definition: kvm_emulate.h:185
int(* write_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *fault)
Definition: kvm_emulate.h:164
int(* set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
Definition: kvm_emulate.h:232
int(* get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata)
Definition: kvm_emulate.h:209
bool(* guest_has_movbe)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:222
void(* get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
Definition: kvm_emulate.h:200
bool(* is_guest_mode)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:229
void(* set_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
Definition: kvm_emulate.h:201
void(* set_segment)(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, u32 base3, int seg)
Definition: kvm_emulate.h:195
bool(* get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector, struct desc_struct *desc, u32 *base3, int seg)
Definition: kvm_emulate.h:193
void(* set_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
Definition: kvm_emulate.h:202
gva_t(* get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr, unsigned int flags)
Definition: kvm_emulate.h:234
int(* fix_hypercall)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:215
int(* intercept)(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage)
Definition: kvm_emulate.h:216
int(* read_emulated)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault)
Definition: kvm_emulate.h:153
int(* fetch)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault)
Definition: kvm_emulate.h:143
int(* read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata)
Definition: kvm_emulate.h:212
void(* wbinvd)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:214
int(* read_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault, bool system)
Definition: kvm_emulate.h:120
int(* write_std)(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *fault, bool system)
Definition: kvm_emulate.h:133
bool(* get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only)
Definition: kvm_emulate.h:220
int(* check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc)
Definition: kvm_emulate.h:211
int(* set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data)
Definition: kvm_emulate.h:208
int(* set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
Definition: kvm_emulate.h:204
bool(* guest_has_fxsr)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:223
void(* invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr)
Definition: kvm_emulate.h:183
void(* get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
Definition: kvm_emulate.h:199
bool error_code_valid
Definition: kvm_emulate.h:24
static __always_inline void vmsave(unsigned long pa)
Definition: svm_ops.h:59
static void clgi(void)
Definition: svm_ops.h:39
static void stgi(void)
Definition: svm_ops.h:44
static void invlpga(unsigned long addr, u32 asid)
Definition: svm_ops.h:49
bool __read_mostly enable_vmware_backdoor
Definition: x86.c:176
uint32_t flags
Definition: xen.c:1