6235 u32 vmentry_ctl, vmexit_ctl;
6236 u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
6237 u64 tertiary_exec_control;
6242 pr_warn_ratelimited(
"set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
6248 cpu_based_exec_ctrl =
vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
6249 pin_based_exec_ctrl =
vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
6253 secondary_exec_control =
vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6255 secondary_exec_control = 0;
6258 tertiary_exec_control =
vmcs_read64(TERTIARY_VM_EXEC_CONTROL);
6260 tertiary_exec_control = 0;
6262 pr_err(
"VMCS %p, last attempted VM-entry on CPU %d\n",
6264 pr_err(
"*** Guest State ***\n");
6265 pr_err(
"CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6268 pr_err(
"CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6270 pr_err(
"CR3 = 0x%016lx\n",
vmcs_readl(GUEST_CR3));
6272 pr_err(
"PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
6274 pr_err(
"PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
6277 pr_err(
"RSP = 0x%016lx RIP = 0x%016lx\n",
6279 pr_err(
"RFLAGS=0x%08lx DR7 = 0x%016lx\n",
6281 pr_err(
"Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6295 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
6296 pr_err(
"EFER= 0x%016llx\n",
vmcs_read64(GUEST_IA32_EFER));
6297 else if (efer_slot >= 0)
6298 pr_err(
"EFER= 0x%016llx (autoload)\n",
6300 else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
6301 pr_err(
"EFER= 0x%016llx (effective)\n",
6302 vcpu->arch.efer | (EFER_LMA | EFER_LME));
6304 pr_err(
"EFER= 0x%016llx (effective)\n",
6305 vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6306 if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
6307 pr_err(
"PAT = 0x%016llx\n",
vmcs_read64(GUEST_IA32_PAT));
6308 pr_err(
"DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
6312 vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
6313 pr_err(
"PerfGlobCtl = 0x%016llx\n",
6315 if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
6316 pr_err(
"BndCfgS = 0x%016llx\n",
vmcs_read64(GUEST_BNDCFGS));
6317 pr_err(
"Interruptibility = %08x ActivityState = %08x\n",
6320 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
6321 pr_err(
"InterruptStatus = %04x\n",
6328 pr_err(
"*** Host State ***\n");
6329 pr_err(
"RIP = 0x%016lx RSP = 0x%016lx\n",
6331 pr_err(
"CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
6336 pr_err(
"FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
6339 pr_err(
"GDTBase=%016lx IDTBase=%016lx\n",
6341 pr_err(
"CR0=%016lx CR3=%016lx CR4=%016lx\n",
6344 pr_err(
"Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6348 if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER)
6349 pr_err(
"EFER= 0x%016llx\n",
vmcs_read64(HOST_IA32_EFER));
6350 if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT)
6351 pr_err(
"PAT = 0x%016llx\n",
vmcs_read64(HOST_IA32_PAT));
6353 vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
6354 pr_err(
"PerfGlobCtl = 0x%016llx\n",
6359 pr_err(
"*** Control State ***\n");
6360 pr_err(
"CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
6361 cpu_based_exec_ctrl, secondary_exec_control, tertiary_exec_control);
6362 pr_err(
"PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n",
6363 pin_based_exec_ctrl, vmentry_ctl, vmexit_ctl);
6364 pr_err(
"ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
6368 pr_err(
"VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
6372 pr_err(
"VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
6376 pr_err(
" reason=%08x qualification=%016lx\n",
6378 pr_err(
"IDTVectoring: info=%08x errcode=%08x\n",
6381 pr_err(
"TSC Offset = 0x%016llx\n",
vmcs_read64(TSC_OFFSET));
6382 if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
6383 pr_err(
"TSC Multiplier = 0x%016llx\n",
6385 if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW) {
6386 if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
6388 pr_err(
"SVI|RVI = %02x|%02x ", status >> 8, status & 0xff);
6390 pr_cont(
"TPR Threshold = 0x%02x\n",
vmcs_read32(TPR_THRESHOLD));
6391 if (secondary_exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
6392 pr_err(
"APIC-access addr = 0x%016llx ",
vmcs_read64(APIC_ACCESS_ADDR));
6393 pr_cont(
"virt-APIC addr = 0x%016llx\n",
vmcs_read64(VIRTUAL_APIC_PAGE_ADDR));
6395 if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
6396 pr_err(
"PostedIntrVec = 0x%02x\n",
vmcs_read16(POSTED_INTR_NV));
6397 if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
6398 pr_err(
"EPT pointer = 0x%016llx\n",
vmcs_read64(EPT_POINTER));
6399 if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
6400 pr_err(
"PLE Gap=%08x Window=%08x\n",
6402 if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
6403 pr_err(
"Virtual processor ID = 0x%04x\n",
static bool cpu_has_vmx_ept(void)
static bool cpu_has_secondary_exec_ctrls(void)
static bool cpu_has_load_perf_global_ctrl(void)
static bool cpu_has_tertiary_exec_ctrls(void)
struct vcpu_vmx::msr_autostore msr_autostore
struct vcpu_vmx::msr_autoload msr_autoload
struct vmx_msr_entry val[MAX_NR_LOADSTORE_MSRS]
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr)
static void vmx_dump_dtsel(char *name, uint32_t limit)
static void vmx_dump_sel(char *name, uint32_t sel)
static void vmx_dump_msrs(char *name, struct vmx_msrs *m)
static bool __read_mostly dump_invalid_vmcs
static __always_inline u64 vmcs_read64(unsigned long field)
static __always_inline u32 vmcs_read32(unsigned long field)
static __always_inline unsigned long vmcs_readl(unsigned long field)
static __always_inline u16 vmcs_read16(unsigned long field)