KVM
Classes | Macros | Typedefs | Enumerations | Functions
kvm_emulate.h File Reference
#include <asm/desc_defs.h>
#include "fpu.h"
Include dependency graph for kvm_emulate.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  x86_exception
 
struct  x86_instruction_info
 
struct  x86_emulate_ops
 
struct  operand
 
struct  fetch_cache
 
struct  read_cache
 
struct  x86_emulate_ctxt
 

Macros

#define X86EMUL_CONTINUE   0
 
#define X86EMUL_UNHANDLEABLE   1
 
#define X86EMUL_PROPAGATE_FAULT   2 /* propagate a generated fault to guest */
 
#define X86EMUL_RETRY_INSTR   3 /* retry the instruction for some reason */
 
#define X86EMUL_CMPXCHG_FAILED   4 /* cmpxchg did not see expected value */
 
#define X86EMUL_IO_NEEDED   5 /* IO is needed to complete emulation */
 
#define X86EMUL_INTERCEPTED   6 /* Intercepted by nested VMCB/VMCS */
 
#define X86EMUL_F_WRITE   BIT(0)
 
#define X86EMUL_F_FETCH   BIT(1)
 
#define X86EMUL_F_IMPLICIT   BIT(2)
 
#define X86EMUL_F_INVLPG   BIT(3)
 
#define NR_EMULATOR_GPRS   8
 
#define KVM_EMULATOR_BUG_ON(cond, ctxt)
 
#define REPE_PREFIX   0xf3
 
#define REPNE_PREFIX   0xf2
 
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx   0x68747541
 
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx   0x444d4163
 
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx   0x69746e65
 
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx   0x69444d41
 
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx   0x21726574
 
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx   0x74656273
 
#define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx   0x6f677948
 
#define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx   0x656e6975
 
#define X86EMUL_CPUID_VENDOR_HygonGenuine_edx   0x6e65476e
 
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx   0x756e6547
 
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx   0x6c65746e
 
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx   0x49656e69
 
#define X86EMUL_CPUID_VENDOR_CentaurHauls_ebx   0x746e6543
 
#define X86EMUL_CPUID_VENDOR_CentaurHauls_ecx   0x736c7561
 
#define X86EMUL_CPUID_VENDOR_CentaurHauls_edx   0x48727561
 
#define EMULATION_FAILED   -1
 
#define EMULATION_OK   0
 
#define EMULATION_RESTART   1
 
#define EMULATION_INTERCEPTED   2
 

Typedefs

typedef void(* fastop_t) (struct fastop *)
 

Enumerations

enum  x86emul_mode {
  X86EMUL_MODE_REAL , X86EMUL_MODE_VM86 , X86EMUL_MODE_PROT16 , X86EMUL_MODE_PROT32 ,
  X86EMUL_MODE_PROT64
}
 
enum  x86_intercept_stage { X86_ICTP_NONE = 0 , X86_ICPT_PRE_EXCEPT , X86_ICPT_POST_EXCEPT , X86_ICPT_POST_MEMACCESS }
 
enum  x86_intercept {
  x86_intercept_none , x86_intercept_cr_read , x86_intercept_cr_write , x86_intercept_clts ,
  x86_intercept_lmsw , x86_intercept_smsw , x86_intercept_dr_read , x86_intercept_dr_write ,
  x86_intercept_lidt , x86_intercept_sidt , x86_intercept_lgdt , x86_intercept_sgdt ,
  x86_intercept_lldt , x86_intercept_sldt , x86_intercept_ltr , x86_intercept_str ,
  x86_intercept_rdtsc , x86_intercept_rdpmc , x86_intercept_pushf , x86_intercept_popf ,
  x86_intercept_cpuid , x86_intercept_rsm , x86_intercept_iret , x86_intercept_intn ,
  x86_intercept_invd , x86_intercept_pause , x86_intercept_hlt , x86_intercept_invlpg ,
  x86_intercept_invlpga , x86_intercept_vmrun , x86_intercept_vmload , x86_intercept_vmsave ,
  x86_intercept_vmmcall , x86_intercept_stgi , x86_intercept_clgi , x86_intercept_skinit ,
  x86_intercept_rdtscp , x86_intercept_rdpid , x86_intercept_icebp , x86_intercept_wbinvd ,
  x86_intercept_monitor , x86_intercept_mwait , x86_intercept_rdmsr , x86_intercept_wrmsr ,
  x86_intercept_in , x86_intercept_ins , x86_intercept_out , x86_intercept_outs ,
  x86_intercept_xsetbv , nr_x86_intercepts
}
 

Functions

static bool is_guest_vendor_intel (u32 ebx, u32 ecx, u32 edx)
 
static bool is_guest_vendor_amd (u32 ebx, u32 ecx, u32 edx)
 
static bool is_guest_vendor_hygon (u32 ebx, u32 ecx, u32 edx)
 
int x86_decode_insn (struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
 
bool x86_page_table_writing_insn (struct x86_emulate_ctxt *ctxt)
 
void init_decode_cache (struct x86_emulate_ctxt *ctxt)
 
int x86_emulate_insn (struct x86_emulate_ctxt *ctxt)
 
int emulator_task_switch (struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code)
 
int emulate_int_real (struct x86_emulate_ctxt *ctxt, int irq)
 
void emulator_invalidate_register_cache (struct x86_emulate_ctxt *ctxt)
 
void emulator_writeback_register_cache (struct x86_emulate_ctxt *ctxt)
 
bool emulator_can_use_gpa (struct x86_emulate_ctxt *ctxt)
 
static ulong reg_read (struct x86_emulate_ctxt *ctxt, unsigned nr)
 
static ulong * reg_write (struct x86_emulate_ctxt *ctxt, unsigned nr)
 
static ulong * reg_rmw (struct x86_emulate_ctxt *ctxt, unsigned nr)
 

Macro Definition Documentation

◆ EMULATION_FAILED

#define EMULATION_FAILED   -1

Definition at line 505 of file kvm_emulate.h.

◆ EMULATION_INTERCEPTED

#define EMULATION_INTERCEPTED   2

Definition at line 508 of file kvm_emulate.h.

◆ EMULATION_OK

#define EMULATION_OK   0

Definition at line 506 of file kvm_emulate.h.

◆ EMULATION_RESTART

#define EMULATION_RESTART   1

Definition at line 507 of file kvm_emulate.h.

◆ KVM_EMULATOR_BUG_ON

#define KVM_EMULATOR_BUG_ON (   cond,
  ctxt 
)
Value:
({ \
int __ret = (cond); \
\
if (WARN_ON_ONCE(__ret)) \
ctxt->ops->vm_bugged(ctxt); \
unlikely(__ret); \
})

Definition at line 377 of file kvm_emulate.h.

◆ NR_EMULATOR_GPRS

#define NR_EMULATOR_GPRS   8

Definition at line 304 of file kvm_emulate.h.

◆ REPE_PREFIX

#define REPE_PREFIX   0xf3

Definition at line 387 of file kvm_emulate.h.

◆ REPNE_PREFIX

#define REPNE_PREFIX   0xf2

Definition at line 388 of file kvm_emulate.h.

◆ X86EMUL_CMPXCHG_FAILED

#define X86EMUL_CMPXCHG_FAILED   4 /* cmpxchg did not see expected value */

Definition at line 87 of file kvm_emulate.h.

◆ X86EMUL_CONTINUE

#define X86EMUL_CONTINUE   0

Definition at line 81 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx

#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx   0x69444d41

Definition at line 395 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx

#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx   0x21726574

Definition at line 396 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_AMDisbetterI_edx

#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx   0x74656273

Definition at line 397 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx

#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx   0x68747541

Definition at line 391 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx

#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx   0x444d4163

Definition at line 392 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_AuthenticAMD_edx

#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx   0x69746e65

Definition at line 393 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_CentaurHauls_ebx

#define X86EMUL_CPUID_VENDOR_CentaurHauls_ebx   0x746e6543

Definition at line 407 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_CentaurHauls_ecx

#define X86EMUL_CPUID_VENDOR_CentaurHauls_ecx   0x736c7561

Definition at line 408 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_CentaurHauls_edx

#define X86EMUL_CPUID_VENDOR_CentaurHauls_edx   0x48727561

Definition at line 409 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_GenuineIntel_ebx

#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx   0x756e6547

Definition at line 403 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_GenuineIntel_ecx

#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx   0x6c65746e

Definition at line 404 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_GenuineIntel_edx

#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx   0x49656e69

Definition at line 405 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_HygonGenuine_ebx

#define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx   0x6f677948

Definition at line 399 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_HygonGenuine_ecx

#define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx   0x656e6975

Definition at line 400 of file kvm_emulate.h.

◆ X86EMUL_CPUID_VENDOR_HygonGenuine_edx

#define X86EMUL_CPUID_VENDOR_HygonGenuine_edx   0x6e65476e

Definition at line 401 of file kvm_emulate.h.

◆ X86EMUL_F_FETCH

#define X86EMUL_F_FETCH   BIT(1)

Definition at line 93 of file kvm_emulate.h.

◆ X86EMUL_F_IMPLICIT

#define X86EMUL_F_IMPLICIT   BIT(2)

Definition at line 94 of file kvm_emulate.h.

◆ X86EMUL_F_INVLPG

#define X86EMUL_F_INVLPG   BIT(3)

Definition at line 95 of file kvm_emulate.h.

◆ X86EMUL_F_WRITE

#define X86EMUL_F_WRITE   BIT(0)

Definition at line 92 of file kvm_emulate.h.

◆ X86EMUL_INTERCEPTED

#define X86EMUL_INTERCEPTED   6 /* Intercepted by nested VMCB/VMCS */

Definition at line 89 of file kvm_emulate.h.

◆ X86EMUL_IO_NEEDED

#define X86EMUL_IO_NEEDED   5 /* IO is needed to complete emulation */

Definition at line 88 of file kvm_emulate.h.

◆ X86EMUL_PROPAGATE_FAULT

#define X86EMUL_PROPAGATE_FAULT   2 /* propagate a generated fault to guest */

Definition at line 85 of file kvm_emulate.h.

◆ X86EMUL_RETRY_INSTR

#define X86EMUL_RETRY_INSTR   3 /* retry the instruction for some reason */

Definition at line 86 of file kvm_emulate.h.

◆ X86EMUL_UNHANDLEABLE

#define X86EMUL_UNHANDLEABLE   1

Definition at line 83 of file kvm_emulate.h.

Typedef Documentation

◆ fastop_t

typedef void(* fastop_t) (struct fastop *)

Definition at line 293 of file kvm_emulate.h.

Enumeration Type Documentation

◆ x86_intercept

Enumerator
x86_intercept_none 
x86_intercept_cr_read 
x86_intercept_cr_write 
x86_intercept_clts 
x86_intercept_lmsw 
x86_intercept_smsw 
x86_intercept_dr_read 
x86_intercept_dr_write 
x86_intercept_lidt 
x86_intercept_sidt 
x86_intercept_lgdt 
x86_intercept_sgdt 
x86_intercept_lldt 
x86_intercept_sldt 
x86_intercept_ltr 
x86_intercept_str 
x86_intercept_rdtsc 
x86_intercept_rdpmc 
x86_intercept_pushf 
x86_intercept_popf 
x86_intercept_cpuid 
x86_intercept_rsm 
x86_intercept_iret 
x86_intercept_intn 
x86_intercept_invd 
x86_intercept_pause 
x86_intercept_hlt 
x86_intercept_invlpg 
x86_intercept_invlpga 
x86_intercept_vmrun 
x86_intercept_vmload 
x86_intercept_vmsave 
x86_intercept_vmmcall 
x86_intercept_stgi 
x86_intercept_clgi 
x86_intercept_skinit 
x86_intercept_rdtscp 
x86_intercept_rdpid 
x86_intercept_icebp 
x86_intercept_wbinvd 
x86_intercept_monitor 
x86_intercept_mwait 
x86_intercept_rdmsr 
x86_intercept_wrmsr 
x86_intercept_in 
x86_intercept_ins 
x86_intercept_out 
x86_intercept_outs 
x86_intercept_xsetbv 
nr_x86_intercepts 

Definition at line 442 of file kvm_emulate.h.

442  {
492 
494 };
@ nr_x86_intercepts
Definition: kvm_emulate.h:493
@ x86_intercept_monitor
Definition: kvm_emulate.h:483
@ x86_intercept_intn
Definition: kvm_emulate.h:466
@ x86_intercept_ins
Definition: kvm_emulate.h:488
@ x86_intercept_clts
Definition: kvm_emulate.h:446
@ x86_intercept_vmsave
Definition: kvm_emulate.h:474
@ x86_intercept_clgi
Definition: kvm_emulate.h:477
@ x86_intercept_wrmsr
Definition: kvm_emulate.h:486
@ x86_intercept_iret
Definition: kvm_emulate.h:465
@ x86_intercept_sldt
Definition: kvm_emulate.h:456
@ x86_intercept_cpuid
Definition: kvm_emulate.h:463
@ x86_intercept_pause
Definition: kvm_emulate.h:468
@ x86_intercept_rdmsr
Definition: kvm_emulate.h:485
@ x86_intercept_pushf
Definition: kvm_emulate.h:461
@ x86_intercept_rsm
Definition: kvm_emulate.h:464
@ x86_intercept_vmrun
Definition: kvm_emulate.h:472
@ x86_intercept_out
Definition: kvm_emulate.h:489
@ x86_intercept_rdpid
Definition: kvm_emulate.h:480
@ x86_intercept_stgi
Definition: kvm_emulate.h:476
@ x86_intercept_icebp
Definition: kvm_emulate.h:481
@ x86_intercept_hlt
Definition: kvm_emulate.h:469
@ x86_intercept_vmload
Definition: kvm_emulate.h:473
@ x86_intercept_lgdt
Definition: kvm_emulate.h:453
@ x86_intercept_vmmcall
Definition: kvm_emulate.h:475
@ x86_intercept_cr_write
Definition: kvm_emulate.h:445
@ x86_intercept_rdtscp
Definition: kvm_emulate.h:479
@ x86_intercept_smsw
Definition: kvm_emulate.h:448
@ x86_intercept_dr_write
Definition: kvm_emulate.h:450
@ x86_intercept_mwait
Definition: kvm_emulate.h:484
@ x86_intercept_popf
Definition: kvm_emulate.h:462
@ x86_intercept_xsetbv
Definition: kvm_emulate.h:491
@ x86_intercept_skinit
Definition: kvm_emulate.h:478
@ x86_intercept_outs
Definition: kvm_emulate.h:490
@ x86_intercept_invlpg
Definition: kvm_emulate.h:470
@ x86_intercept_wbinvd
Definition: kvm_emulate.h:482
@ x86_intercept_invlpga
Definition: kvm_emulate.h:471
@ x86_intercept_dr_read
Definition: kvm_emulate.h:449
@ x86_intercept_sidt
Definition: kvm_emulate.h:452
@ x86_intercept_in
Definition: kvm_emulate.h:487
@ x86_intercept_cr_read
Definition: kvm_emulate.h:444
@ x86_intercept_rdtsc
Definition: kvm_emulate.h:459
@ x86_intercept_lldt
Definition: kvm_emulate.h:455
@ x86_intercept_lidt
Definition: kvm_emulate.h:451
@ x86_intercept_sgdt
Definition: kvm_emulate.h:454
@ x86_intercept_ltr
Definition: kvm_emulate.h:457
@ x86_intercept_str
Definition: kvm_emulate.h:458
@ x86_intercept_lmsw
Definition: kvm_emulate.h:447
@ x86_intercept_rdpmc
Definition: kvm_emulate.h:460
@ x86_intercept_invd
Definition: kvm_emulate.h:467
@ x86_intercept_none
Definition: kvm_emulate.h:443

◆ x86_intercept_stage

Enumerator
X86_ICTP_NONE 
X86_ICPT_PRE_EXCEPT 
X86_ICPT_POST_EXCEPT 
X86_ICPT_POST_MEMACCESS 

Definition at line 435 of file kvm_emulate.h.

435  {
436  X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */
440 };
@ X86_ICTP_NONE
Definition: kvm_emulate.h:436
@ X86_ICPT_POST_MEMACCESS
Definition: kvm_emulate.h:439
@ X86_ICPT_PRE_EXCEPT
Definition: kvm_emulate.h:437
@ X86_ICPT_POST_EXCEPT
Definition: kvm_emulate.h:438

◆ x86emul_mode

Enumerator
X86EMUL_MODE_REAL 
X86EMUL_MODE_VM86 
X86EMUL_MODE_PROT16 
X86EMUL_MODE_PROT32 
X86EMUL_MODE_PROT64 

Definition at line 279 of file kvm_emulate.h.

279  {
280  X86EMUL_MODE_REAL, /* Real mode. */
281  X86EMUL_MODE_VM86, /* Virtual 8086 mode. */
282  X86EMUL_MODE_PROT16, /* 16-bit protected mode. */
283  X86EMUL_MODE_PROT32, /* 32-bit protected mode. */
284  X86EMUL_MODE_PROT64, /* 64-bit (long) mode. */
285 };
@ X86EMUL_MODE_PROT64
Definition: kvm_emulate.h:284
@ X86EMUL_MODE_VM86
Definition: kvm_emulate.h:281
@ X86EMUL_MODE_REAL
Definition: kvm_emulate.h:280
@ X86EMUL_MODE_PROT32
Definition: kvm_emulate.h:283
@ X86EMUL_MODE_PROT16
Definition: kvm_emulate.h:282

Function Documentation

◆ emulate_int_real()

int emulate_int_real ( struct x86_emulate_ctxt ctxt,
int  irq 
)

Definition at line 2069 of file emulate.c.

2070 {
2071  int rc;
2072 
2073  invalidate_registers(ctxt);
2074  rc = __emulate_int_real(ctxt, irq);
2075  if (rc == X86EMUL_CONTINUE)
2076  writeback_registers(ctxt);
2077  return rc;
2078 }
static void writeback_registers(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:246
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
Definition: emulate.c:2020
static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:255
#define X86EMUL_CONTINUE
Definition: kvm_emulate.h:81
Here is the call graph for this function:
Here is the caller graph for this function:

◆ emulator_can_use_gpa()

bool emulator_can_use_gpa ( struct x86_emulate_ctxt ctxt)

Definition at line 5502 of file emulate.c.

5503 {
5504  if (ctxt->rep_prefix && (ctxt->d & String))
5505  return false;
5506 
5507  if (ctxt->d & TwoMemOp)
5508  return false;
5509 
5510  return true;
5511 }
#define TwoMemOp
Definition: emulate.c:179
#define String
Definition: emulate.c:122
Here is the caller graph for this function:

◆ emulator_invalidate_register_cache()

void emulator_invalidate_register_cache ( struct x86_emulate_ctxt ctxt)

Definition at line 5492 of file emulate.c.

5493 {
5494  invalidate_registers(ctxt);
5495 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ emulator_task_switch()

int emulator_task_switch ( struct x86_emulate_ctxt ctxt,
u16  tss_selector,
int  idt_index,
int  reason,
bool  has_error_code,
u32  error_code 
)

Definition at line 3020 of file emulate.c.

3023 {
3024  int rc;
3025 
3026  invalidate_registers(ctxt);
3027  ctxt->_eip = ctxt->eip;
3028  ctxt->dst.type = OP_NONE;
3029 
3030  rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3031  has_error_code, error_code);
3032 
3033  if (rc == X86EMUL_CONTINUE) {
3034  ctxt->eip = ctxt->_eip;
3035  writeback_registers(ctxt);
3036  }
3037 
3039 }
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code)
Definition: emulate.c:2919
#define X86EMUL_UNHANDLEABLE
Definition: kvm_emulate.h:83
#define EMULATION_OK
Definition: kvm_emulate.h:506
#define EMULATION_FAILED
Definition: kvm_emulate.h:505
enum operand::@0 type
unsigned long _eip
Definition: kvm_emulate.h:362
struct operand dst
Definition: kvm_emulate.h:367
unsigned long eip
Definition: kvm_emulate.h:313
Here is the call graph for this function:
Here is the caller graph for this function:

◆ emulator_writeback_register_cache()

void emulator_writeback_register_cache ( struct x86_emulate_ctxt ctxt)

Definition at line 5497 of file emulate.c.

5498 {
5499  writeback_registers(ctxt);
5500 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ init_decode_cache()

void init_decode_cache ( struct x86_emulate_ctxt ctxt)

Definition at line 5125 of file emulate.c.

5126 {
5127  /* Clear fields that are set conditionally but read without a guard. */
5128  ctxt->rip_relative = false;
5129  ctxt->rex_prefix = 0;
5130  ctxt->lock_prefix = 0;
5131  ctxt->rep_prefix = 0;
5132  ctxt->regs_valid = 0;
5133  ctxt->regs_dirty = 0;
5134 
5135  ctxt->io_read.pos = 0;
5136  ctxt->io_read.end = 0;
5137  ctxt->mem_read.end = 0;
5138 }
unsigned long end
Definition: kvm_emulate.h:275
unsigned long pos
Definition: kvm_emulate.h:274
struct read_cache io_read
Definition: kvm_emulate.h:372
struct read_cache mem_read
Definition: kvm_emulate.h:373
Here is the caller graph for this function:

◆ is_guest_vendor_amd()

static bool is_guest_vendor_amd ( u32  ebx,
u32  ecx,
u32  edx 
)
inlinestatic

Definition at line 418 of file kvm_emulate.h.

419 {
420  return (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
426 }
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx
Definition: kvm_emulate.h:396
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx
Definition: kvm_emulate.h:391
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx
Definition: kvm_emulate.h:392
#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx
Definition: kvm_emulate.h:393
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx
Definition: kvm_emulate.h:395
#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx
Definition: kvm_emulate.h:397
Here is the caller graph for this function:

◆ is_guest_vendor_hygon()

static bool is_guest_vendor_hygon ( u32  ebx,
u32  ecx,
u32  edx 
)
inlinestatic

Definition at line 428 of file kvm_emulate.h.

429 {
433 }
#define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx
Definition: kvm_emulate.h:399
#define X86EMUL_CPUID_VENDOR_HygonGenuine_edx
Definition: kvm_emulate.h:401
#define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx
Definition: kvm_emulate.h:400
Here is the caller graph for this function:

◆ is_guest_vendor_intel()

static bool is_guest_vendor_intel ( u32  ebx,
u32  ecx,
u32  edx 
)
inlinestatic

Definition at line 411 of file kvm_emulate.h.

412 {
416 }
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
Definition: kvm_emulate.h:404
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx
Definition: kvm_emulate.h:405
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
Definition: kvm_emulate.h:403
Here is the caller graph for this function:

◆ reg_read()

static ulong reg_read ( struct x86_emulate_ctxt ctxt,
unsigned  nr 
)
inlinestatic

Definition at line 519 of file kvm_emulate.h.

520 {
521  if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt))
522  nr &= NR_EMULATOR_GPRS - 1;
523 
524  if (!(ctxt->regs_valid & (1 << nr))) {
525  ctxt->regs_valid |= 1 << nr;
526  ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
527  }
528  return ctxt->_regs[nr];
529 }
#define NR_EMULATOR_GPRS
Definition: kvm_emulate.h:304
#define KVM_EMULATOR_BUG_ON(cond, ctxt)
Definition: kvm_emulate.h:377
const struct x86_emulate_ops * ops
Definition: kvm_emulate.h:309
unsigned long _regs[NR_EMULATOR_GPRS]
Definition: kvm_emulate.h:369
ulong(* read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg)
Definition: kvm_emulate.h:104
Here is the caller graph for this function:

◆ reg_rmw()

static ulong* reg_rmw ( struct x86_emulate_ctxt ctxt,
unsigned  nr 
)
inlinestatic

Definition at line 544 of file kvm_emulate.h.

545 {
546  reg_read(ctxt, nr);
547  return reg_write(ctxt, nr);
548 }
static ulong * reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
Definition: kvm_emulate.h:531
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
Definition: kvm_emulate.h:519
Here is the call graph for this function:
Here is the caller graph for this function:

◆ reg_write()

static ulong* reg_write ( struct x86_emulate_ctxt ctxt,
unsigned  nr 
)
inlinestatic

Definition at line 531 of file kvm_emulate.h.

532 {
533  if (KVM_EMULATOR_BUG_ON(nr >= NR_EMULATOR_GPRS, ctxt))
534  nr &= NR_EMULATOR_GPRS - 1;
535 
536  BUILD_BUG_ON(sizeof(ctxt->regs_dirty) * BITS_PER_BYTE < NR_EMULATOR_GPRS);
537  BUILD_BUG_ON(sizeof(ctxt->regs_valid) * BITS_PER_BYTE < NR_EMULATOR_GPRS);
538 
539  ctxt->regs_valid |= 1 << nr;
540  ctxt->regs_dirty |= 1 << nr;
541  return &ctxt->_regs[nr];
542 }
Here is the caller graph for this function:

◆ x86_decode_insn()

int x86_decode_insn ( struct x86_emulate_ctxt ctxt,
void *  insn,
int  insn_len,
int  emulation_type 
)

Definition at line 4763 of file emulate.c.

4764 {
4765  int rc = X86EMUL_CONTINUE;
4766  int mode = ctxt->mode;
4767  int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4768  bool op_prefix = false;
4769  bool has_seg_override = false;
4770  struct opcode opcode;
4771  u16 dummy;
4772  struct desc_struct desc;
4773 
4774  ctxt->memop.type = OP_NONE;
4775  ctxt->memopp = NULL;
4776  ctxt->_eip = ctxt->eip;
4777  ctxt->fetch.ptr = ctxt->fetch.data;
4778  ctxt->fetch.end = ctxt->fetch.data + insn_len;
4779  ctxt->opcode_len = 1;
4780  ctxt->intercept = x86_intercept_none;
4781  if (insn_len > 0)
4782  memcpy(ctxt->fetch.data, insn, insn_len);
4783  else {
4784  rc = __do_insn_fetch_bytes(ctxt, 1);
4785  if (rc != X86EMUL_CONTINUE)
4786  goto done;
4787  }
4788 
4789  switch (mode) {
4790  case X86EMUL_MODE_REAL:
4791  case X86EMUL_MODE_VM86:
4792  def_op_bytes = def_ad_bytes = 2;
4793  ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
4794  if (desc.d)
4795  def_op_bytes = def_ad_bytes = 4;
4796  break;
4797  case X86EMUL_MODE_PROT16:
4798  def_op_bytes = def_ad_bytes = 2;
4799  break;
4800  case X86EMUL_MODE_PROT32:
4801  def_op_bytes = def_ad_bytes = 4;
4802  break;
4803 #ifdef CONFIG_X86_64
4804  case X86EMUL_MODE_PROT64:
4805  def_op_bytes = 4;
4806  def_ad_bytes = 8;
4807  break;
4808 #endif
4809  default:
4810  return EMULATION_FAILED;
4811  }
4812 
4813  ctxt->op_bytes = def_op_bytes;
4814  ctxt->ad_bytes = def_ad_bytes;
4815 
4816  /* Legacy prefixes. */
4817  for (;;) {
4818  switch (ctxt->b = insn_fetch(u8, ctxt)) {
4819  case 0x66: /* operand-size override */
4820  op_prefix = true;
4821  /* switch between 2/4 bytes */
4822  ctxt->op_bytes = def_op_bytes ^ 6;
4823  break;
4824  case 0x67: /* address-size override */
4825  if (mode == X86EMUL_MODE_PROT64)
4826  /* switch between 4/8 bytes */
4827  ctxt->ad_bytes = def_ad_bytes ^ 12;
4828  else
4829  /* switch between 2/4 bytes */
4830  ctxt->ad_bytes = def_ad_bytes ^ 6;
4831  break;
4832  case 0x26: /* ES override */
4833  has_seg_override = true;
4834  ctxt->seg_override = VCPU_SREG_ES;
4835  break;
4836  case 0x2e: /* CS override */
4837  has_seg_override = true;
4838  ctxt->seg_override = VCPU_SREG_CS;
4839  break;
4840  case 0x36: /* SS override */
4841  has_seg_override = true;
4842  ctxt->seg_override = VCPU_SREG_SS;
4843  break;
4844  case 0x3e: /* DS override */
4845  has_seg_override = true;
4846  ctxt->seg_override = VCPU_SREG_DS;
4847  break;
4848  case 0x64: /* FS override */
4849  has_seg_override = true;
4850  ctxt->seg_override = VCPU_SREG_FS;
4851  break;
4852  case 0x65: /* GS override */
4853  has_seg_override = true;
4854  ctxt->seg_override = VCPU_SREG_GS;
4855  break;
4856  case 0x40 ... 0x4f: /* REX */
4857  if (mode != X86EMUL_MODE_PROT64)
4858  goto done_prefixes;
4859  ctxt->rex_prefix = ctxt->b;
4860  continue;
4861  case 0xf0: /* LOCK */
4862  ctxt->lock_prefix = 1;
4863  break;
4864  case 0xf2: /* REPNE/REPNZ */
4865  case 0xf3: /* REP/REPE/REPZ */
4866  ctxt->rep_prefix = ctxt->b;
4867  break;
4868  default:
4869  goto done_prefixes;
4870  }
4871 
4872  /* Any legacy prefix after a REX prefix nullifies its effect. */
4873 
4874  ctxt->rex_prefix = 0;
4875  }
4876 
4877 done_prefixes:
4878 
4879  /* REX prefix. */
4880  if (ctxt->rex_prefix & 8)
4881  ctxt->op_bytes = 8; /* REX.W */
4882 
4883  /* Opcode byte(s). */
4884  opcode = opcode_table[ctxt->b];
4885  /* Two-byte opcode? */
4886  if (ctxt->b == 0x0f) {
4887  ctxt->opcode_len = 2;
4888  ctxt->b = insn_fetch(u8, ctxt);
4889  opcode = twobyte_table[ctxt->b];
4890 
4891  /* 0F_38 opcode map */
4892  if (ctxt->b == 0x38) {
4893  ctxt->opcode_len = 3;
4894  ctxt->b = insn_fetch(u8, ctxt);
4895  opcode = opcode_map_0f_38[ctxt->b];
4896  }
4897  }
4898  ctxt->d = opcode.flags;
4899 
4900  if (ctxt->d & ModRM)
4901  ctxt->modrm = insn_fetch(u8, ctxt);
4902 
4903  /* vex-prefix instructions are not implemented */
4904  if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4905  (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4906  ctxt->d = NotImpl;
4907  }
4908 
4909  while (ctxt->d & GroupMask) {
4910  switch (ctxt->d & GroupMask) {
4911  case Group:
4912  goffset = (ctxt->modrm >> 3) & 7;
4913  opcode = opcode.u.group[goffset];
4914  break;
4915  case GroupDual:
4916  goffset = (ctxt->modrm >> 3) & 7;
4917  if ((ctxt->modrm >> 6) == 3)
4918  opcode = opcode.u.gdual->mod3[goffset];
4919  else
4920  opcode = opcode.u.gdual->mod012[goffset];
4921  break;
4922  case RMExt:
4923  goffset = ctxt->modrm & 7;
4924  opcode = opcode.u.group[goffset];
4925  break;
4926  case Prefix:
4927  if (ctxt->rep_prefix && op_prefix)
4928  return EMULATION_FAILED;
4929  simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4930  switch (simd_prefix) {
4931  case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4932  case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4933  case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4934  case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4935  }
4936  break;
4937  case Escape:
4938  if (ctxt->modrm > 0xbf) {
4939  size_t size = ARRAY_SIZE(opcode.u.esc->high);
4940  u32 index = array_index_nospec(
4941  ctxt->modrm - 0xc0, size);
4942 
4943  opcode = opcode.u.esc->high[index];
4944  } else {
4945  opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4946  }
4947  break;
4948  case InstrDual:
4949  if ((ctxt->modrm >> 6) == 3)
4950  opcode = opcode.u.idual->mod3;
4951  else
4952  opcode = opcode.u.idual->mod012;
4953  break;
4954  case ModeDual:
4955  if (ctxt->mode == X86EMUL_MODE_PROT64)
4956  opcode = opcode.u.mdual->mode64;
4957  else
4958  opcode = opcode.u.mdual->mode32;
4959  break;
4960  default:
4961  return EMULATION_FAILED;
4962  }
4963 
4964  ctxt->d &= ~(u64)GroupMask;
4965  ctxt->d |= opcode.flags;
4966  }
4967 
4968  ctxt->is_branch = opcode.flags & IsBranch;
4969 
4970  /* Unrecognised? */
4971  if (ctxt->d == 0)
4972  return EMULATION_FAILED;
4973 
4974  ctxt->execute = opcode.u.execute;
4975 
4976  if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
4977  likely(!(ctxt->d & EmulateOnUD)))
4978  return EMULATION_FAILED;
4979 
4980  if (unlikely(ctxt->d &
4982  No16))) {
4983  /*
4984  * These are copied unconditionally here, and checked unconditionally
4985  * in x86_emulate_insn.
4986  */
4987  ctxt->check_perm = opcode.check_perm;
4988  ctxt->intercept = opcode.intercept;
4989 
4990  if (ctxt->d & NotImpl)
4991  return EMULATION_FAILED;
4992 
4993  if (mode == X86EMUL_MODE_PROT64) {
4994  if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4995  ctxt->op_bytes = 8;
4996  else if (ctxt->d & NearBranch)
4997  ctxt->op_bytes = 8;
4998  }
4999 
5000  if (ctxt->d & Op3264) {
5001  if (mode == X86EMUL_MODE_PROT64)
5002  ctxt->op_bytes = 8;
5003  else
5004  ctxt->op_bytes = 4;
5005  }
5006 
5007  if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5008  ctxt->op_bytes = 4;
5009 
5010  if (ctxt->d & Sse)
5011  ctxt->op_bytes = 16;
5012  else if (ctxt->d & Mmx)
5013  ctxt->op_bytes = 8;
5014  }
5015 
5016  /* ModRM and SIB bytes. */
5017  if (ctxt->d & ModRM) {
5018  rc = decode_modrm(ctxt, &ctxt->memop);
5019  if (!has_seg_override) {
5020  has_seg_override = true;
5021  ctxt->seg_override = ctxt->modrm_seg;
5022  }
5023  } else if (ctxt->d & MemAbs)
5024  rc = decode_abs(ctxt, &ctxt->memop);
5025  if (rc != X86EMUL_CONTINUE)
5026  goto done;
5027 
5028  if (!has_seg_override)
5029  ctxt->seg_override = VCPU_SREG_DS;
5030 
5031  ctxt->memop.addr.mem.seg = ctxt->seg_override;
5032 
5033  /*
5034  * Decode and fetch the source operand: register, memory
5035  * or immediate.
5036  */
5037  rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5038  if (rc != X86EMUL_CONTINUE)
5039  goto done;
5040 
5041  /*
5042  * Decode and fetch the second source operand: register, memory
5043  * or immediate.
5044  */
5045  rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5046  if (rc != X86EMUL_CONTINUE)
5047  goto done;
5048 
5049  /* Decode and fetch the destination operand: register or memory. */
5050  rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5051 
5052  if (ctxt->rip_relative && likely(ctxt->memopp))
5053  ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5054  ctxt->memopp->addr.mem.ea + ctxt->_eip);
5055 
5056 done:
5057  if (rc == X86EMUL_PROPAGATE_FAULT)
5058  ctxt->have_exception = true;
5059  return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5060 }
#define DstShift
Definition: emulate.c:85
static unsigned long address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
Definition: emulate.c:534
#define Op3264
Definition: emulate.c:141
static int decode_abs(struct x86_emulate_ctxt *ctxt, struct operand *op)
Definition: emulate.c:1313
#define insn_fetch(_type, _ctxt)
Definition: emulate.c:948
#define OpMask
Definition: emulate.c:71
#define Sse
Definition: emulate.c:132
#define InstrDual
Definition: emulate.c:130
#define Mmx
Definition: emulate.c:163
#define NearBranch
Definition: emulate.c:176
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, unsigned d)
Definition: emulate.c:4597
#define ModRM
Definition: emulate.c:134
#define CheckPerm
Definition: emulate.c:174
#define IsBranch
Definition: emulate.c:180
static const struct opcode twobyte_table[256]
Definition: emulate.c:4401
#define MemAbs
Definition: emulate.c:121
#define Stack
Definition: emulate.c:123
static const struct opcode opcode_map_0f_38[256]
Definition: emulate.c:4519
#define Src2Shift
Definition: emulate.c:149
#define EmulateOnUD
Definition: emulate.c:139
#define No16
Definition: emulate.c:177
#define SrcShift
Definition: emulate.c:98
#define GroupDual
Definition: emulate.c:126
#define NotImpl
Definition: emulate.c:147
#define ModeDual
Definition: emulate.c:131
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
Definition: emulate.c:892
#define RMExt
Definition: emulate.c:128
static int decode_modrm(struct x86_emulate_ctxt *ctxt, struct operand *op)
Definition: emulate.c:1176
#define Prefix
Definition: emulate.c:127
static const struct opcode opcode_table[256]
Definition: emulate.c:4274
#define Intercept
Definition: emulate.c:173
#define Group
Definition: emulate.c:125
#define Escape
Definition: emulate.c:129
#define GroupMask
Definition: emulate.c:124
#define X86EMUL_PROPAGATE_FAULT
Definition: kvm_emulate.h:85
struct opcode high[64]
Definition: emulate.c:224
struct opcode op[8]
Definition: emulate.c:223
u8 data[15]
Definition: kvm_emulate.h:267
struct opcode pfx_no
Definition: emulate.c:216
struct opcode pfx_66
Definition: emulate.c:217
struct opcode pfx_f2
Definition: emulate.c:218
struct opcode pfx_f3
Definition: emulate.c:219
struct opcode mod012[8]
Definition: emulate.c:211
struct opcode mod3[8]
Definition: emulate.c:212
struct opcode mod3
Definition: emulate.c:229
struct opcode mod012
Definition: emulate.c:228
struct opcode mode32
Definition: emulate.c:233
struct opcode mode64
Definition: emulate.c:234
union opcode::@44 u
const struct opcode * group
Definition: emulate.c:199
int(* execute)(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:198
const struct mode_dual * mdual
Definition: emulate.c:204
int(* check_perm)(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:207
u64 flags
Definition: emulate.c:194
const struct escape * esc
Definition: emulate.c:202
const struct group_dual * gdual
Definition: emulate.c:200
const struct instr_dual * idual
Definition: emulate.c:203
const struct gprefix * gprefix
Definition: emulate.c:201
u8 intercept
Definition: emulate.c:195
struct operand::@4::segmented_address mem
union operand::@4 addr
struct operand * memopp
Definition: kvm_emulate.h:370
int(* check_perm)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:344
struct fetch_cache fetch
Definition: kvm_emulate.h:371
enum x86emul_mode mode
Definition: kvm_emulate.h:315
struct operand src2
Definition: kvm_emulate.h:366
struct operand src
Definition: kvm_emulate.h:365
struct operand memop
Definition: kvm_emulate.h:368
int(* execute)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:341
bool(* get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector, struct desc_struct *desc, u32 *base3, int seg)
Definition: kvm_emulate.h:193
Here is the call graph for this function:
Here is the caller graph for this function:

◆ x86_emulate_insn()

int x86_emulate_insn ( struct x86_emulate_ctxt ctxt)

Definition at line 5140 of file emulate.c.

5141 {
5142  const struct x86_emulate_ops *ops = ctxt->ops;
5143  int rc = X86EMUL_CONTINUE;
5144  int saved_dst_type = ctxt->dst.type;
5145  bool is_guest_mode = ctxt->ops->is_guest_mode(ctxt);
5146 
5147  ctxt->mem_read.pos = 0;
5148 
5149  /* LOCK prefix is allowed only with some instructions */
5150  if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5151  rc = emulate_ud(ctxt);
5152  goto done;
5153  }
5154 
5155  if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5156  rc = emulate_ud(ctxt);
5157  goto done;
5158  }
5159 
5160  if (unlikely(ctxt->d &
5162  if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5163  (ctxt->d & Undefined)) {
5164  rc = emulate_ud(ctxt);
5165  goto done;
5166  }
5167 
5168  if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5169  || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5170  rc = emulate_ud(ctxt);
5171  goto done;
5172  }
5173 
5174  if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5175  rc = emulate_nm(ctxt);
5176  goto done;
5177  }
5178 
5179  if (ctxt->d & Mmx) {
5180  rc = flush_pending_x87_faults(ctxt);
5181  if (rc != X86EMUL_CONTINUE)
5182  goto done;
5183  /*
5184  * Now that we know the fpu is exception safe, we can fetch
5185  * operands from it.
5186  */
5189  if (!(ctxt->d & Mov))
5191  }
5192 
5193  if (unlikely(is_guest_mode) && ctxt->intercept) {
5194  rc = emulator_check_intercept(ctxt, ctxt->intercept,
5196  if (rc != X86EMUL_CONTINUE)
5197  goto done;
5198  }
5199 
5200  /* Instruction can only be executed in protected mode */
5201  if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5202  rc = emulate_ud(ctxt);
5203  goto done;
5204  }
5205 
5206  /* Privileged instruction can be executed only in CPL=0 */
5207  if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5208  if (ctxt->d & PrivUD)
5209  rc = emulate_ud(ctxt);
5210  else
5211  rc = emulate_gp(ctxt, 0);
5212  goto done;
5213  }
5214 
5215  /* Do instruction specific permission checks */
5216  if (ctxt->d & CheckPerm) {
5217  rc = ctxt->check_perm(ctxt);
5218  if (rc != X86EMUL_CONTINUE)
5219  goto done;
5220  }
5221 
5222  if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5223  rc = emulator_check_intercept(ctxt, ctxt->intercept,
5225  if (rc != X86EMUL_CONTINUE)
5226  goto done;
5227  }
5228 
5229  if (ctxt->rep_prefix && (ctxt->d & String)) {
5230  /* All REP prefixes have the same first termination condition */
5231  if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5232  string_registers_quirk(ctxt);
5233  ctxt->eip = ctxt->_eip;
5234  ctxt->eflags &= ~X86_EFLAGS_RF;
5235  goto done;
5236  }
5237  }
5238  }
5239 
5240  if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5241  rc = segmented_read(ctxt, ctxt->src.addr.mem,
5242  ctxt->src.valptr, ctxt->src.bytes);
5243  if (rc != X86EMUL_CONTINUE)
5244  goto done;
5245  ctxt->src.orig_val64 = ctxt->src.val64;
5246  }
5247 
5248  if (ctxt->src2.type == OP_MEM) {
5249  rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5250  &ctxt->src2.val, ctxt->src2.bytes);
5251  if (rc != X86EMUL_CONTINUE)
5252  goto done;
5253  }
5254 
5255  if ((ctxt->d & DstMask) == ImplicitOps)
5256  goto special_insn;
5257 
5258 
5259  if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5260  /* optimisation - avoid slow emulated read if Mov */
5261  rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5262  &ctxt->dst.val, ctxt->dst.bytes);
5263  if (rc != X86EMUL_CONTINUE) {
5264  if (!(ctxt->d & NoWrite) &&
5265  rc == X86EMUL_PROPAGATE_FAULT &&
5266  ctxt->exception.vector == PF_VECTOR)
5267  ctxt->exception.error_code |= PFERR_WRITE_MASK;
5268  goto done;
5269  }
5270  }
5271  /* Copy full 64-bit value for CMPXCHG8B. */
5272  ctxt->dst.orig_val64 = ctxt->dst.val64;
5273 
5274 special_insn:
5275 
5276  if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
5277  rc = emulator_check_intercept(ctxt, ctxt->intercept,
5279  if (rc != X86EMUL_CONTINUE)
5280  goto done;
5281  }
5282 
5283  if (ctxt->rep_prefix && (ctxt->d & String))
5284  ctxt->eflags |= X86_EFLAGS_RF;
5285  else
5286  ctxt->eflags &= ~X86_EFLAGS_RF;
5287 
5288  if (ctxt->execute) {
5289  if (ctxt->d & Fastop)
5290  rc = fastop(ctxt, ctxt->fop);
5291  else
5292  rc = ctxt->execute(ctxt);
5293  if (rc != X86EMUL_CONTINUE)
5294  goto done;
5295  goto writeback;
5296  }
5297 
5298  if (ctxt->opcode_len == 2)
5299  goto twobyte_insn;
5300  else if (ctxt->opcode_len == 3)
5301  goto threebyte_insn;
5302 
5303  switch (ctxt->b) {
5304  case 0x70 ... 0x7f: /* jcc (short) */
5305  if (test_cc(ctxt->b, ctxt->eflags))
5306  rc = jmp_rel(ctxt, ctxt->src.val);
5307  break;
5308  case 0x8d: /* lea r16/r32, m */
5309  ctxt->dst.val = ctxt->src.addr.mem.ea;
5310  break;
5311  case 0x90 ... 0x97: /* nop / xchg reg, rax */
5312  if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5313  ctxt->dst.type = OP_NONE;
5314  else
5315  rc = em_xchg(ctxt);
5316  break;
5317  case 0x98: /* cbw/cwde/cdqe */
5318  switch (ctxt->op_bytes) {
5319  case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5320  case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5321  case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5322  }
5323  break;
5324  case 0xcc: /* int3 */
5325  rc = emulate_int(ctxt, 3);
5326  break;
5327  case 0xcd: /* int n */
5328  rc = emulate_int(ctxt, ctxt->src.val);
5329  break;
5330  case 0xce: /* into */
5331  if (ctxt->eflags & X86_EFLAGS_OF)
5332  rc = emulate_int(ctxt, 4);
5333  break;
5334  case 0xe9: /* jmp rel */
5335  case 0xeb: /* jmp rel short */
5336  rc = jmp_rel(ctxt, ctxt->src.val);
5337  ctxt->dst.type = OP_NONE; /* Disable writeback. */
5338  break;
5339  case 0xf4: /* hlt */
5340  ctxt->ops->halt(ctxt);
5341  break;
5342  case 0xf5: /* cmc */
5343  /* complement carry flag from eflags reg */
5344  ctxt->eflags ^= X86_EFLAGS_CF;
5345  break;
5346  case 0xf8: /* clc */
5347  ctxt->eflags &= ~X86_EFLAGS_CF;
5348  break;
5349  case 0xf9: /* stc */
5350  ctxt->eflags |= X86_EFLAGS_CF;
5351  break;
5352  case 0xfc: /* cld */
5353  ctxt->eflags &= ~X86_EFLAGS_DF;
5354  break;
5355  case 0xfd: /* std */
5356  ctxt->eflags |= X86_EFLAGS_DF;
5357  break;
5358  default:
5359  goto cannot_emulate;
5360  }
5361 
5362  if (rc != X86EMUL_CONTINUE)
5363  goto done;
5364 
5365 writeback:
5366  if (ctxt->d & SrcWrite) {
5367  BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5368  rc = writeback(ctxt, &ctxt->src);
5369  if (rc != X86EMUL_CONTINUE)
5370  goto done;
5371  }
5372  if (!(ctxt->d & NoWrite)) {
5373  rc = writeback(ctxt, &ctxt->dst);
5374  if (rc != X86EMUL_CONTINUE)
5375  goto done;
5376  }
5377 
5378  /*
5379  * restore dst type in case the decoding will be reused
5380  * (happens for string instruction )
5381  */
5382  ctxt->dst.type = saved_dst_type;
5383 
5384  if ((ctxt->d & SrcMask) == SrcSI)
5385  string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5386 
5387  if ((ctxt->d & DstMask) == DstDI)
5388  string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5389 
5390  if (ctxt->rep_prefix && (ctxt->d & String)) {
5391  unsigned int count;
5392  struct read_cache *r = &ctxt->io_read;
5393  if ((ctxt->d & SrcMask) == SrcSI)
5394  count = ctxt->src.count;
5395  else
5396  count = ctxt->dst.count;
5397  register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5398 
5399  if (!string_insn_completed(ctxt)) {
5400  /*
5401  * Re-enter guest when pio read ahead buffer is empty
5402  * or, if it is not used, after each 1024 iteration.
5403  */
5404  if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5405  (r->end == 0 || r->end != r->pos)) {
5406  /*
5407  * Reset read cache. Usually happens before
5408  * decode, but since instruction is restarted
5409  * we have to do it here.
5410  */
5411  ctxt->mem_read.end = 0;
5412  writeback_registers(ctxt);
5413  return EMULATION_RESTART;
5414  }
5415  goto done; /* skip rip writeback */
5416  }
5417  ctxt->eflags &= ~X86_EFLAGS_RF;
5418  }
5419 
5420  ctxt->eip = ctxt->_eip;
5421  if (ctxt->mode != X86EMUL_MODE_PROT64)
5422  ctxt->eip = (u32)ctxt->_eip;
5423 
5424 done:
5425  if (rc == X86EMUL_PROPAGATE_FAULT) {
5426  if (KVM_EMULATOR_BUG_ON(ctxt->exception.vector > 0x1f, ctxt))
5427  return EMULATION_FAILED;
5428  ctxt->have_exception = true;
5429  }
5430  if (rc == X86EMUL_INTERCEPTED)
5431  return EMULATION_INTERCEPTED;
5432 
5433  if (rc == X86EMUL_CONTINUE)
5434  writeback_registers(ctxt);
5435 
5437 
5438 twobyte_insn:
5439  switch (ctxt->b) {
5440  case 0x09: /* wbinvd */
5441  (ctxt->ops->wbinvd)(ctxt);
5442  break;
5443  case 0x08: /* invd */
5444  case 0x0d: /* GrpP (prefetch) */
5445  case 0x18: /* Grp16 (prefetch/nop) */
5446  case 0x1f: /* nop */
5447  break;
5448  case 0x20: /* mov cr, reg */
5449  ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5450  break;
5451  case 0x21: /* mov from dr to reg */
5452  ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5453  break;
5454  case 0x40 ... 0x4f: /* cmov */
5455  if (test_cc(ctxt->b, ctxt->eflags))
5456  ctxt->dst.val = ctxt->src.val;
5457  else if (ctxt->op_bytes != 4)
5458  ctxt->dst.type = OP_NONE; /* no writeback */
5459  break;
5460  case 0x80 ... 0x8f: /* jnz rel, etc*/
5461  if (test_cc(ctxt->b, ctxt->eflags))
5462  rc = jmp_rel(ctxt, ctxt->src.val);
5463  break;
5464  case 0x90 ... 0x9f: /* setcc r/m8 */
5465  ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5466  break;
5467  case 0xb6 ... 0xb7: /* movzx */
5468  ctxt->dst.bytes = ctxt->op_bytes;
5469  ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5470  : (u16) ctxt->src.val;
5471  break;
5472  case 0xbe ... 0xbf: /* movsx */
5473  ctxt->dst.bytes = ctxt->op_bytes;
5474  ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5475  (s16) ctxt->src.val;
5476  break;
5477  default:
5478  goto cannot_emulate;
5479  }
5480 
5481 threebyte_insn:
5482 
5483  if (rc != X86EMUL_CONTINUE)
5484  goto done;
5485 
5486  goto writeback;
5487 
5488 cannot_emulate:
5489  return EMULATION_FAILED;
5490 }
#define Lock
Definition: emulate.c:143
static void fetch_possible_mmx_operand(struct operand *op)
Definition: emulate.c:5101
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
Definition: emulate.c:1065
#define No64
Definition: emulate.c:145
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
Definition: emulate.c:5107
#define PrivUD
Definition: emulate.c:175
static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:2641
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
Definition: emulate.c:1785
#define SrcMemFAddr
Definition: emulate.c:112
#define ImplicitOps
Definition: emulate.c:86
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:608
#define Prot
Definition: emulate.c:138
#define SrcSI
Definition: emulate.c:109
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg, struct operand *op)
Definition: emulate.c:3041
#define NoAccess
Definition: emulate.c:140
static int em_xchg(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:3199
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
Definition: emulate.c:2080
static void register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
Definition: emulate.c:554
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:623
#define SrcWrite
Definition: emulate.c:171
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5087
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
Definition: emulate.c:5067
#define Undefined
Definition: emulate.c:142
#define Fastop
Definition: emulate.c:169
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, enum x86_intercept intercept, enum x86_intercept_stage stage)
Definition: emulate.c:466
#define Priv
Definition: emulate.c:144
#define Mov
Definition: emulate.c:136
static int segmented_read(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, void *data, unsigned size)
Definition: emulate.c:1381
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
Definition: emulate.c:598
#define SrcMask
Definition: emulate.c:119
#define NoWrite
Definition: emulate.c:170
#define DstDI
Definition: emulate.c:90
#define DstMask
Definition: emulate.c:96
static int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
Definition: emulate.c:842
static bool is_guest_mode(struct kvm_vcpu *vcpu)
#define EMULATION_INTERCEPTED
Definition: kvm_emulate.h:508
#define EMULATION_RESTART
Definition: kvm_emulate.h:507
#define X86EMUL_INTERCEPTED
Definition: kvm_emulate.h:89
static ulong * reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
Definition: kvm_emulate.h:544
unsigned int bytes
Definition: kvm_emulate.h:241
unsigned long * reg
Definition: kvm_emulate.h:248
char valptr[sizeof(sse128_t)]
Definition: kvm_emulate.h:259
unsigned int count
Definition: kvm_emulate.h:242
unsigned long val
Definition: kvm_emulate.h:257
u64 val64
Definition: kvm_emulate.h:258
u64 orig_val64
Definition: kvm_emulate.h:245
unsigned long eflags
Definition: kvm_emulate.h:312
struct x86_exception exception
Definition: kvm_emulate.h:324
void(* halt)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:213
ulong(* get_cr)(struct x86_emulate_ctxt *ctxt, int cr)
Definition: kvm_emulate.h:203
void(* get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest)
Definition: kvm_emulate.h:206
int(* cpl)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:205
bool(* is_guest_mode)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:229
void(* wbinvd)(struct x86_emulate_ctxt *ctxt)
Definition: kvm_emulate.h:214
Here is the call graph for this function:
Here is the caller graph for this function:

◆ x86_page_table_writing_insn()

bool x86_page_table_writing_insn ( struct x86_emulate_ctxt ctxt)

Definition at line 5062 of file emulate.c.

5063 {
5064  return ctxt->d & PageTable;
5065 }
#define PageTable
Definition: emulate.c:146
Here is the caller graph for this function: