1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/kvm_host.h>
13 #include <linux/module.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/kernel.h>
16 #include <linux/vmalloc.h>
17 #include <linux/highmem.h>
18 #include <linux/amd-iommu.h>
19 #include <linux/sched.h>
20 #include <linux/trace_events.h>
21 #include <linux/slab.h>
22 #include <linux/hashtable.h>
23 #include <linux/objtool.h>
24 #include <linux/psp-sev.h>
25 #include <linux/file.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/rwsem.h>
29 #include <linux/cc_platform.h>
30 #include <linux/smp.h>
33 #include <asm/perf_event.h>
34 #include <asm/tlbflush.h>
36 #include <asm/debugreg.h>
37 #include <asm/kvm_para.h>
38 #include <asm/irq_remapping.h>
39 #include <asm/spec-ctrl.h>
40 #include <asm/cpu_device_id.h>
41 #include <asm/traps.h>
42 #include <asm/reboot.h>
43 #include <asm/fpu/api.h>
45 #include <trace/events/ipi.h>
59 static const struct x86_cpu_id svm_cpu_id[] = {
60 X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
63 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
66 #define SEG_TYPE_LDT 2
67 #define SEG_TYPE_BUSY_TSS16 3
81 #define X2APIC_MSR(x) (APIC_BASE_MSR + (x >> 4))
87 { .
index = MSR_STAR, .always =
true },
88 { .index = MSR_IA32_SYSENTER_CS, .always =
true },
89 { .index = MSR_IA32_SYSENTER_EIP, .always =
false },
90 { .index = MSR_IA32_SYSENTER_ESP, .always =
false },
92 { .index = MSR_GS_BASE, .always =
true },
93 { .index = MSR_FS_BASE, .always =
true },
94 { .index = MSR_KERNEL_GS_BASE, .always =
true },
95 { .index = MSR_LSTAR, .always =
true },
96 { .index = MSR_CSTAR, .always =
true },
97 { .index = MSR_SYSCALL_MASK, .always =
true },
99 { .index = MSR_IA32_SPEC_CTRL, .always =
false },
100 { .index = MSR_IA32_PRED_CMD, .always =
false },
101 { .index = MSR_IA32_FLUSH_CMD, .always =
false },
102 { .index = MSR_IA32_LASTBRANCHFROMIP, .always =
false },
103 { .index = MSR_IA32_LASTBRANCHTOIP, .always =
false },
104 { .index = MSR_IA32_LASTINTFROMIP, .always =
false },
105 { .index = MSR_IA32_LASTINTTOIP, .always =
false },
106 { .index = MSR_IA32_XSS, .always =
false },
107 { .index = MSR_EFER, .always =
false },
108 { .index = MSR_IA32_CR_PAT, .always =
false },
109 { .index = MSR_AMD64_SEV_ES_GHCB, .always =
true },
110 { .index = MSR_TSC_AUX, .always =
false },
111 { .index =
X2APIC_MSR(APIC_ID), .always =
false },
112 { .index =
X2APIC_MSR(APIC_LVR), .always =
false },
113 { .index =
X2APIC_MSR(APIC_TASKPRI), .always =
false },
114 { .index =
X2APIC_MSR(APIC_ARBPRI), .always =
false },
115 { .index =
X2APIC_MSR(APIC_PROCPRI), .always =
false },
116 { .index =
X2APIC_MSR(APIC_EOI), .always =
false },
117 { .index =
X2APIC_MSR(APIC_RRR), .always =
false },
118 { .index =
X2APIC_MSR(APIC_LDR), .always =
false },
119 { .index =
X2APIC_MSR(APIC_DFR), .always =
false },
120 { .index =
X2APIC_MSR(APIC_SPIV), .always =
false },
121 { .index =
X2APIC_MSR(APIC_ISR), .always =
false },
122 { .index =
X2APIC_MSR(APIC_TMR), .always =
false },
123 { .index =
X2APIC_MSR(APIC_IRR), .always =
false },
124 { .index =
X2APIC_MSR(APIC_ESR), .always =
false },
125 { .index =
X2APIC_MSR(APIC_ICR), .always =
false },
126 { .index =
X2APIC_MSR(APIC_ICR2), .always =
false },
135 { .index =
X2APIC_MSR(APIC_LVTTHMR), .always =
false },
136 { .index =
X2APIC_MSR(APIC_LVTPC), .always =
false },
137 { .index =
X2APIC_MSR(APIC_LVT0), .always =
false },
138 { .index =
X2APIC_MSR(APIC_LVT1), .always =
false },
139 { .index =
X2APIC_MSR(APIC_LVTERR), .always =
false },
140 { .index =
X2APIC_MSR(APIC_TMICT), .always =
false },
141 { .index =
X2APIC_MSR(APIC_TMCCT), .always =
false },
142 { .index =
X2APIC_MSR(APIC_TDCR), .always =
false },
260 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
261 #define MSRS_RANGE_SIZE 2048
262 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
299 u64 old_efer =
vcpu->arch.efer;
300 vcpu->arch.efer = efer;
306 if (!(efer & EFER_LMA))
310 if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
311 if (!(efer & EFER_SVME)) {
330 vcpu->arch.efer = old_efer;
343 svm->
vmcb->save.efer = efer | EFER_SVME;
353 if (svm->
vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
354 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
363 svm->
vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
365 svm->
vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
370 bool commit_side_effects)
373 unsigned long old_rflags;
382 if (
nrips && svm->
vmcb->control.next_rip != 0) {
383 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
388 if (unlikely(!commit_side_effects))
389 old_rflags = svm->
vmcb->save.rflags;
394 if (unlikely(!commit_side_effects))
395 svm->
vmcb->save.rflags = old_rflags;
401 if (likely(commit_side_effects))
451 if (static_cpu_has(X86_FEATURE_NRIPS))
452 svm->
vmcb->control.next_rip = rip;
459 struct kvm_queued_exception *ex = &vcpu->arch.exception;
468 svm->
vmcb->control.event_inj = ex->vector
470 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
471 | SVM_EVTINJ_TYPE_EXEPT;
472 svm->
vmcb->control.event_inj_err = ex->error_code;
481 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
485 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
491 low = lower_32_bits(val);
492 high = upper_32_bits(val);
494 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
496 erratum_383_found =
true;
516 if (
osvw_len == 0 && boot_cpu_data.x86 == 0x10)
517 vcpu->arch.osvw.status |= 1;
522 int cpu = smp_processor_id();
523 struct cpuinfo_x86 *c = &cpu_data(cpu);
525 if (c->x86_vendor != X86_VENDOR_AMD &&
526 c->x86_vendor != X86_VENDOR_HYGON) {
527 pr_err(
"CPU %d isn't AMD or Hygon\n", cpu);
531 if (!cpu_has(c, X86_FEATURE_SVM)) {
532 pr_err(
"SVM not supported by CPU %d\n", cpu);
536 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
537 pr_info(
"KVM is unsupported when running as an SEV guest\n");
565 if (multiplier == __this_cpu_read(current_tsc_ratio))
568 wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
569 __this_cpu_write(current_tsc_ratio, multiplier);
576 wrmsrl(MSR_VM_HSAVE_PA, 0);
577 rdmsrl(MSR_EFER, efer);
578 if (efer & EFER_SVME) {
584 wrmsrl(MSR_EFER, efer & ~EFER_SVME);
590 kvm_rebooting =
true;
603 amd_pmu_disable_virt();
611 int me = raw_smp_processor_id();
613 rdmsrl(MSR_EFER, efer);
614 if (efer & EFER_SVME)
617 sd = per_cpu_ptr(&svm_data, me);
619 sd->
max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
623 wrmsrl(MSR_EFER, efer | EFER_SVME);
627 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
645 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
646 uint64_t len, status = 0;
649 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
651 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
667 amd_pmu_enable_virt();
675 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
676 struct sev_es_save_area *hostsa;
677 u32 __maybe_unused msr_hi;
679 hostsa = (
struct sev_es_save_area *)(page_address(sd->
save_area) + 0x400);
681 rdmsr(MSR_TSC_AUX, hostsa->tsc_aux, msr_hi);
706 sd->
save_area = alloc_page(GFP_KERNEL | __GFP_ZERO);
752 vmcb->control.intercepts[INTERCEPT_DR] = 0;
814 bit_write = 2 * (msr & 0x0f) + 1;
819 return test_bit(bit_write, &tmp);
823 u32 msr,
int read,
int write)
826 u8 bit_read, bit_write;
844 bit_read = 2 * (msr & 0x0f);
845 bit_write = 2 * (msr & 0x0f) + 1;
850 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
851 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
869 struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
875 msrpm = page_address(pages);
876 memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
905 if ((index < APIC_BASE_MSR) ||
906 (index > APIC_BASE_MSR + 0xff))
909 !intercept, !intercept);
917 __free_pages(virt_to_page(msrpm), get_order(
MSRPM_SIZE));
946 if (msrpm_offsets[i] == offset)
954 msrpm_offsets[i] = offset;
970 memset(msrpm_offsets, 0xff,
sizeof(msrpm_offsets));
984 to_vmcb->save.dbgctl = from_vmcb->save.dbgctl;
985 to_vmcb->save.br_from = from_vmcb->save.br_from;
986 to_vmcb->save.br_to = from_vmcb->save.br_to;
987 to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from;
988 to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to;
997 svm->
vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1012 svm->
vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
1033 return svm->
vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->
vmcb :
1040 bool current_enable_lbrv = svm->
vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
1041 bool enable_lbrv = (
svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
1045 if (enable_lbrv == current_enable_lbrv)
1058 if (!(svm->
vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1061 svm->
vmcb->save.rflags &= ~X86_EFLAGS_TF;
1063 svm->
vmcb->save.rflags &= ~X86_EFLAGS_RF;
1070 struct vmcb_control_area *
control = &svm->
vmcb->control;
1071 int old =
control->pause_filter_count;
1081 if (
control->pause_filter_count != old) {
1083 trace_kvm_ple_window_update(vcpu->vcpu_id,
1084 control->pause_filter_count, old);
1091 struct vmcb_control_area *
control = &svm->
vmcb->control;
1092 int old =
control->pause_filter_count;
1102 if (
control->pause_filter_count != old) {
1104 trace_kvm_ple_window_update(vcpu->vcpu_id,
1105 control->pause_filter_count, old);
1115 for_each_possible_cpu(cpu)
1118 __free_pages(pfn_to_page(
iopm_base >> PAGE_SHIFT),
1126 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1127 SVM_SELECTOR_WRITE_MASK;
1128 seg->limit = 0xffff;
1135 seg->attrib = SVM_SELECTOR_P_MASK | type;
1136 seg->limit = 0xffff;
1158 svm->
vmcb01.
ptr->control.tsc_offset =
vcpu->arch.l1_tsc_offset;
1159 svm->
vmcb->control.tsc_offset =
vcpu->arch.tsc_offset;
1207 svm->
vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1219 svm->
vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1231 struct vmcb_control_area *
control = &vmcb->control;
1232 struct vmcb_save_area *save = &vmcb->save;
1297 control->int_ctl = V_INTR_MASKING_MASK;
1305 save->cs.selector = 0xf000;
1306 save->cs.base = 0xffff0000;
1308 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1309 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1310 save->cs.limit = 0xffff;
1312 save->gdtr.base = 0;
1313 save->gdtr.limit = 0xffff;
1314 save->idtr.base = 0;
1315 save->idtr.limit = 0xffff;
1322 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1327 save->g_pat = vcpu->arch.pat;
1351 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
1358 svm->
vmcb->control.int_ctl |= V_NMI_ENABLE_MASK;
1363 svm->
vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1384 vcpu->arch.microcode_version = 0x01000065;
1416 struct page *vmcb01_page;
1417 struct page *vmsa_page = NULL;
1420 BUILD_BUG_ON(offsetof(
struct vcpu_svm, vcpu) != 0);
1424 vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1433 vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1435 goto error_free_vmcb_page;
1443 fpstate_set_confidential(&vcpu->arch.guest_fpu);
1448 goto error_free_vmsa_page;
1453 goto error_free_vmsa_page;
1458 svm->
vmcb01.
ptr = page_address(vmcb01_page);
1459 svm->
vmcb01.
pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
1469 error_free_vmsa_page:
1471 __free_page(vmsa_page);
1472 error_free_vmcb_page:
1473 __free_page(vmcb01_page);
1482 for_each_online_cpu(i)
1483 cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL);
1502 __free_page(pfn_to_page(__sme_clr(svm->
vmcb01.
pa) >> PAGE_SHIFT));
1509 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
1523 struct sev_es_save_area *hostsa;
1524 hostsa = (
struct sev_es_save_area *)(page_address(sd->
save_area) + 0x400);
1538 if (likely(tsc_aux_uret_slot >= 0) &&
1539 (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !
sev_es_guest(vcpu->kvm)))
1558 if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
1559 indirect_branch_prediction_barrier();
1572 ++vcpu->stat.host_state_reload;
1578 unsigned long rflags = svm->
vmcb->save.rflags;
1583 rflags &= ~X86_EFLAGS_TF;
1585 rflags &= ~X86_EFLAGS_RF;
1593 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1608 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
1617 case VCPU_EXREG_PDPTR:
1626 KVM_BUG_ON(1, vcpu->kvm);
1632 struct vmcb_control_area *
control;
1657 control->int_ctl &= ~V_INTR_PRIO_MASK;
1658 control->int_ctl |= V_IRQ_MASK |
1659 (( 0xf) << V_INTR_PRIO_SHIFT);
1668 svm->
vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1670 svm->
vmcb01.
ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1672 WARN_ON((svm->
vmcb->control.int_ctl & V_TPR_MASK) !=
1676 V_IRQ_INJECTION_BITS_MASK;
1684 static struct vmcb_seg *
svm_seg(
struct kvm_vcpu *vcpu,
int seg)
1686 struct vmcb_save_area *save = &
to_svm(vcpu)->
vmcb->save;
1690 case VCPU_SREG_CS:
return &save->cs;
1691 case VCPU_SREG_DS:
return &save->ds;
1692 case VCPU_SREG_ES:
return &save->es;
1693 case VCPU_SREG_FS:
return &save01->fs;
1694 case VCPU_SREG_GS:
return &save01->gs;
1695 case VCPU_SREG_SS:
return &save->ss;
1696 case VCPU_SREG_TR:
return &save01->tr;
1697 case VCPU_SREG_LDTR:
return &save01->ldtr;
1705 struct vmcb_seg *s =
svm_seg(vcpu, seg);
1711 struct kvm_segment *var,
int seg)
1713 struct vmcb_seg *s =
svm_seg(vcpu, seg);
1715 var->base = s->base;
1716 var->limit = s->limit;
1717 var->selector = s->selector;
1718 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1719 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1720 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1721 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1722 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1723 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1724 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1734 var->g = s->limit > 0xfffff;
1740 var->unusable = !var->present;
1781 struct vmcb_save_area *save = &
to_svm(vcpu)->
vmcb->save;
1788 struct kvm_segment cs;
1799 dt->size = svm->
vmcb->save.idtr.limit;
1800 dt->address = svm->
vmcb->save.idtr.base;
1807 svm->
vmcb->save.idtr.limit = dt->size;
1808 svm->
vmcb->save.idtr.base = dt->address ;
1816 dt->size = svm->
vmcb->save.gdtr.limit;
1817 dt->address = svm->
vmcb->save.gdtr.base;
1824 svm->
vmcb->save.gdtr.limit = dt->size;
1825 svm->
vmcb->save.gdtr.base = dt->address ;
1842 svm->
vmcb->save.cr3 = cr3;
1858 #ifdef CONFIG_X86_64
1859 if (
vcpu->arch.efer & EFER_LME) {
1861 vcpu->arch.efer |= EFER_LMA;
1862 if (!
vcpu->arch.guest_state_protected)
1863 svm->
vmcb->save.efer |= EFER_LMA | EFER_LME;
1867 vcpu->arch.efer &= ~EFER_LMA;
1868 if (!
vcpu->arch.guest_state_protected)
1869 svm->
vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1873 vcpu->arch.cr0 = cr0;
1876 hcr0 |= X86_CR0_PG | X86_CR0_WP;
1887 hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1889 svm->
vmcb->save.cr0 = hcr0;
1916 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1917 unsigned long old_cr4 =
vcpu->arch.cr4;
1919 if (
npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1922 vcpu->arch.cr4 = cr4;
1927 cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
1929 cr4 |= host_cr4_mce;
1933 if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1938 struct kvm_segment *var,
int seg)
1941 struct vmcb_seg *s =
svm_seg(vcpu, seg);
1943 s->base = var->base;
1944 s->limit = var->limit;
1945 s->selector = var->selector;
1946 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1947 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1948 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1949 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1950 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1951 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1952 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1953 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1961 if (seg == VCPU_SREG_SS)
1963 svm->
vmcb->save.cpl = (var->dpl & 3);
1974 if (
vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1975 if (
vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1985 svm->
vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1995 struct vmcb *vmcb = svm->
vmcb;
1997 if (svm->
vcpu.arch.guest_state_protected)
2000 if (unlikely(value != vmcb->save.dr6)) {
2001 vmcb->save.dr6 = value;
2013 get_debugreg(
vcpu->arch.db[0], 0);
2014 get_debugreg(
vcpu->arch.db[1], 1);
2015 get_debugreg(
vcpu->arch.db[2], 2);
2016 get_debugreg(
vcpu->arch.db[3], 3);
2021 vcpu->arch.dr6 = svm->
vmcb->save.dr6;
2022 vcpu->arch.dr7 = svm->
vmcb->save.dr7;
2023 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2031 if (
vcpu->arch.guest_state_protected)
2034 svm->
vmcb->save.dr7 = value;
2042 u64 fault_address = svm->
vmcb->control.exit_info_2;
2043 u64 error_code = svm->
vmcb->control.exit_info_1;
2046 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2047 svm->
vmcb->control.insn_bytes : NULL,
2048 svm->
vmcb->control.insn_len);
2055 u64 fault_address = svm->
vmcb->control.exit_info_2;
2056 u64 error_code = svm->
vmcb->control.exit_info_1;
2058 trace_kvm_page_fault(
vcpu, fault_address, error_code);
2060 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2061 svm->
vmcb->control.insn_bytes : NULL,
2062 svm->
vmcb->control.insn_len);
2067 struct kvm_run *kvm_run = vcpu->run;
2070 if (!(
vcpu->guest_debug &
2071 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2073 u32 payload = svm->
vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
2081 kvm_make_request(KVM_REQ_EVENT,
vcpu);
2084 if (
vcpu->guest_debug &
2085 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2086 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2087 kvm_run->debug.arch.dr6 = svm->
vmcb->save.dr6;
2088 kvm_run->debug.arch.dr7 = svm->
vmcb->save.dr7;
2089 kvm_run->debug.arch.pc =
2090 svm->
vmcb->save.cs.base + svm->
vmcb->save.rip;
2091 kvm_run->debug.arch.exception = DB_VECTOR;
2101 struct kvm_run *kvm_run = vcpu->run;
2103 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2104 kvm_run->debug.arch.pc = svm->
vmcb->save.cs.base + svm->
vmcb->save.rip;
2105 kvm_run->debug.arch.exception = BP_VECTOR;
2125 if (!erratum_383_found)
2128 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2133 value &= ~(1ULL << 62);
2135 if (value != 0xb600000000010015ULL)
2139 for (i = 0; i < 6; ++i)
2140 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2142 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2146 value &= ~(1ULL << 2);
2147 low = lower_32_bits(value);
2148 high = upper_32_bits(value);
2150 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2166 pr_err(
"Guest triggered AMD Erratum 383\n");
2168 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2187 struct kvm_run *kvm_run = vcpu->run;
2203 clear_page(svm->
vmcb);
2207 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2214 u32 io_info = svm->
vmcb->control.exit_info_1;
2215 int size, in, string;
2218 ++
vcpu->stat.io_exits;
2219 string = (io_info & SVM_IOIO_STR_MASK) != 0;
2220 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2221 port = io_info >> 16;
2222 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2248 ++
vcpu->stat.irq_exits;
2255 struct vmcb *vmcb12;
2256 struct kvm_host_map map;
2265 kvm_inject_gp(vcpu, 0);
2319 switch (ctxt->
modrm) {
2335 const int guest_mode_exit_codes[] = {
2340 int (*
const svm_instr_handlers[])(
struct kvm_vcpu *vcpu) = {
2369 u32 error_code = svm->
vmcb->control.exit_info_1;
2392 EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2395 if (svm->
vmcb->save.rax & ~PAGE_MASK)
2421 if (svm->
vcpu.arch.smi_pending ||
2422 svm->
vcpu.arch.nmi_pending ||
2425 kvm_make_request(KVM_REQ_EVENT, &svm->
vcpu);
2465 gva_t gva = kvm_rax_read(
vcpu);
2493 int int_type = svm->
vmcb->control.exit_int_info &
2494 SVM_EXITINTINFO_TYPE_MASK;
2495 int int_vec = svm->
vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2497 svm->
vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2499 svm->
vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2500 bool has_error_code =
false;
2503 tss_selector = (u16)svm->
vmcb->control.exit_info_1;
2505 if (svm->
vmcb->control.exit_info_2 &
2506 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2507 reason = TASK_SWITCH_IRET;
2508 else if (svm->
vmcb->control.exit_info_2 &
2509 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2510 reason = TASK_SWITCH_JMP;
2512 reason = TASK_SWITCH_GATE;
2514 reason = TASK_SWITCH_CALL;
2516 if (reason == TASK_SWITCH_GATE) {
2518 case SVM_EXITINTINFO_TYPE_NMI:
2519 vcpu->arch.nmi_injected =
false;
2521 case SVM_EXITINTINFO_TYPE_EXEPT:
2522 if (svm->
vmcb->control.exit_info_2 &
2523 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2524 has_error_code =
true;
2526 (u32)svm->
vmcb->control.exit_info_2;
2530 case SVM_EXITINTINFO_TYPE_INTR:
2531 case SVM_EXITINTINFO_TYPE_SOFT:
2539 if (reason != TASK_SWITCH_GATE ||
2540 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2541 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2542 (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2547 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2551 has_error_code, error_code);
2572 ++
vcpu->stat.nmi_window_exits;
2578 kvm_make_request(KVM_REQ_EVENT,
vcpu);
2584 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2605 unsigned long cr0 =
vcpu->arch.cr0;
2612 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2613 val &= ~SVM_CR0_SELECTIVE_MASK;
2616 svm->
vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2623 #define CR_VALID (1ULL << 63)
2632 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2635 if (unlikely((svm->
vmcb->control.exit_info_1 &
CR_VALID) == 0))
2638 reg = svm->
vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2639 if (svm->
vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2640 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2642 cr = svm->
vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2667 WARN(1,
"unhandled write to CR%d", cr);
2677 val =
vcpu->arch.cr2;
2689 WARN(1,
"unhandled read from CR%d", cr);
2702 unsigned long old_value, new_value;
2706 new_value = (
unsigned long)svm->
vmcb->control.exit_info_1;
2708 cr = svm->
vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2726 WARN(1,
"unhandled CR%d write trap", cr);
2748 if (
vcpu->guest_debug == 0) {
2755 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2759 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2762 reg = svm->
vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2763 dr = svm->
vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2787 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
2793 struct msr_data msr_info;
2802 msr_info.host_initiated =
false;
2803 msr_info.index = MSR_EFER;
2804 msr_info.data =
to_svm(vcpu)->
vmcb->control.exit_info_1 & ~EFER_SVME;
2814 switch (msr->index) {
2815 case MSR_AMD64_DE_CFG:
2816 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
2817 msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
2826 static int svm_get_msr(
struct kvm_vcpu *vcpu,
struct msr_data *msr_info)
2830 switch (msr_info->index) {
2831 case MSR_AMD64_TSC_RATIO:
2832 if (!msr_info->host_initiated &&
2838 msr_info->data = svm->
vmcb01.
ptr->save.star;
2840 #ifdef CONFIG_X86_64
2842 msr_info->data = svm->
vmcb01.
ptr->save.lstar;
2845 msr_info->data = svm->
vmcb01.
ptr->save.cstar;
2847 case MSR_KERNEL_GS_BASE:
2848 msr_info->data = svm->
vmcb01.
ptr->save.kernel_gs_base;
2850 case MSR_SYSCALL_MASK:
2851 msr_info->data = svm->
vmcb01.
ptr->save.sfmask;
2854 case MSR_IA32_SYSENTER_CS:
2855 msr_info->data = svm->
vmcb01.
ptr->save.sysenter_cs;
2857 case MSR_IA32_SYSENTER_EIP:
2858 msr_info->data = (u32)svm->
vmcb01.
ptr->save.sysenter_eip;
2862 case MSR_IA32_SYSENTER_ESP:
2863 msr_info->data = svm->
vmcb01.
ptr->save.sysenter_esp;
2868 msr_info->data = svm->
tsc_aux;
2870 case MSR_IA32_DEBUGCTLMSR:
2873 case MSR_IA32_LASTBRANCHFROMIP:
2876 case MSR_IA32_LASTBRANCHTOIP:
2879 case MSR_IA32_LASTINTFROMIP:
2882 case MSR_IA32_LASTINTTOIP:
2885 case MSR_VM_HSAVE_PA:
2891 case MSR_IA32_SPEC_CTRL:
2892 if (!msr_info->host_initiated &&
2896 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2897 msr_info->data = svm->
vmcb->save.spec_ctrl;
2901 case MSR_AMD64_VIRT_SPEC_CTRL:
2902 if (!msr_info->host_initiated &&
2908 case MSR_F15H_IC_CFG: {
2915 if (family < 0 || model < 0)
2920 if (family == 0x15 &&
2921 (model >= 0x2 && model < 0x20))
2922 msr_info->data = 0x1E;
2925 case MSR_AMD64_DE_CFG:
2940 ghcb_set_sw_exit_info_1(svm->
sev_es.
ghcb, 1);
2943 SVM_EVTINJ_TYPE_EXEPT |
2951 int svm_dis, chg_mask;
2953 if (data & ~SVM_VM_CR_VALID_MASK)
2956 chg_mask = SVM_VM_CR_VALID_MASK;
2959 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2967 if (svm_dis && (
vcpu->arch.efer & EFER_SVME))
2978 u32 ecx = msr->index;
2979 u64 data = msr->data;
2981 case MSR_AMD64_TSC_RATIO:
2985 if (!msr->host_initiated)
3000 if (data & SVM_TSC_RATIO_RSVD)
3010 case MSR_IA32_CR_PAT:
3020 case MSR_IA32_SPEC_CTRL:
3021 if (!msr->host_initiated &&
3028 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
3029 svm->
vmcb->save.spec_ctrl = data;
3048 case MSR_AMD64_VIRT_SPEC_CTRL:
3049 if (!msr->host_initiated &&
3053 if (data & ~SPEC_CTRL_SSBD)
3061 #ifdef CONFIG_X86_64
3068 case MSR_KERNEL_GS_BASE:
3069 svm->
vmcb01.
ptr->save.kernel_gs_base = data;
3071 case MSR_SYSCALL_MASK:
3075 case MSR_IA32_SYSENTER_CS:
3076 svm->
vmcb01.
ptr->save.sysenter_cs = data;
3078 case MSR_IA32_SYSENTER_EIP:
3079 svm->
vmcb01.
ptr->save.sysenter_eip = (u32)data;
3089 case MSR_IA32_SYSENTER_ESP:
3090 svm->
vmcb01.
ptr->save.sysenter_esp = (u32)data;
3117 case MSR_IA32_DEBUGCTLMSR:
3128 case MSR_VM_HSAVE_PA:
3145 case MSR_AMD64_DE_CFG: {
3146 struct kvm_msr_entry msr_entry;
3148 msr_entry.index = msr->index;
3153 if (data & ~msr_entry.data)
3157 if (!msr->host_initiated && (data ^ msr_entry.data))
3171 if (
to_svm(vcpu)->vmcb->control.exit_info_1)
3179 kvm_make_request(KVM_REQ_EVENT, vcpu);
3194 kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3196 ++vcpu->stat.irq_window_exits;
3232 type = svm->
vmcb->control.exit_info_2;
3233 gva = svm->
vmcb->control.exit_info_1;
3301 [SVM_EXIT_CR0_WRITE_TRAP] =
cr_trap,
3302 [SVM_EXIT_CR4_WRITE_TRAP] =
cr_trap,
3303 [SVM_EXIT_CR8_WRITE_TRAP] =
cr_trap,
3315 struct vmcb_control_area *
control = &svm->
vmcb->control;
3316 struct vmcb_save_area *save = &svm->
vmcb->save;
3317 struct vmcb_save_area *save01 = &svm->
vmcb01.
ptr->save;
3320 pr_warn_ratelimited(
"set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3324 pr_err(
"VMCB %p, last attempted VMRUN on CPU %d\n",
3326 pr_err(
"VMCB Control Area:\n");
3327 pr_err(
"%-20s%04x\n",
"cr_read:",
control->intercepts[INTERCEPT_CR] & 0xffff);
3328 pr_err(
"%-20s%04x\n",
"cr_write:",
control->intercepts[INTERCEPT_CR] >> 16);
3329 pr_err(
"%-20s%04x\n",
"dr_read:",
control->intercepts[INTERCEPT_DR] & 0xffff);
3330 pr_err(
"%-20s%04x\n",
"dr_write:",
control->intercepts[INTERCEPT_DR] >> 16);
3331 pr_err(
"%-20s%08x\n",
"exceptions:",
control->intercepts[INTERCEPT_EXCEPTION]);
3332 pr_err(
"%-20s%08x %08x\n",
"intercepts:",
3333 control->intercepts[INTERCEPT_WORD3],
3334 control->intercepts[INTERCEPT_WORD4]);
3335 pr_err(
"%-20s%d\n",
"pause filter count:",
control->pause_filter_count);
3336 pr_err(
"%-20s%d\n",
"pause filter threshold:",
3337 control->pause_filter_thresh);
3338 pr_err(
"%-20s%016llx\n",
"iopm_base_pa:",
control->iopm_base_pa);
3339 pr_err(
"%-20s%016llx\n",
"msrpm_base_pa:",
control->msrpm_base_pa);
3340 pr_err(
"%-20s%016llx\n",
"tsc_offset:",
control->tsc_offset);
3341 pr_err(
"%-20s%d\n",
"asid:",
control->asid);
3342 pr_err(
"%-20s%d\n",
"tlb_ctl:",
control->tlb_ctl);
3343 pr_err(
"%-20s%08x\n",
"int_ctl:",
control->int_ctl);
3344 pr_err(
"%-20s%08x\n",
"int_vector:",
control->int_vector);
3345 pr_err(
"%-20s%08x\n",
"int_state:",
control->int_state);
3346 pr_err(
"%-20s%08x\n",
"exit_code:",
control->exit_code);
3347 pr_err(
"%-20s%016llx\n",
"exit_info1:",
control->exit_info_1);
3348 pr_err(
"%-20s%016llx\n",
"exit_info2:",
control->exit_info_2);
3349 pr_err(
"%-20s%08x\n",
"exit_int_info:",
control->exit_int_info);
3350 pr_err(
"%-20s%08x\n",
"exit_int_info_err:",
control->exit_int_info_err);
3351 pr_err(
"%-20s%lld\n",
"nested_ctl:",
control->nested_ctl);
3352 pr_err(
"%-20s%016llx\n",
"nested_cr3:",
control->nested_cr3);
3353 pr_err(
"%-20s%016llx\n",
"avic_vapic_bar:",
control->avic_vapic_bar);
3354 pr_err(
"%-20s%016llx\n",
"ghcb:",
control->ghcb_gpa);
3355 pr_err(
"%-20s%08x\n",
"event_inj:",
control->event_inj);
3356 pr_err(
"%-20s%08x\n",
"event_inj_err:",
control->event_inj_err);
3357 pr_err(
"%-20s%lld\n",
"virt_ext:",
control->virt_ext);
3358 pr_err(
"%-20s%016llx\n",
"next_rip:",
control->next_rip);
3359 pr_err(
"%-20s%016llx\n",
"avic_backing_page:",
control->avic_backing_page);
3360 pr_err(
"%-20s%016llx\n",
"avic_logical_id:",
control->avic_logical_id);
3361 pr_err(
"%-20s%016llx\n",
"avic_physical_id:",
control->avic_physical_id);
3362 pr_err(
"%-20s%016llx\n",
"vmsa_pa:",
control->vmsa_pa);
3363 pr_err(
"VMCB State Save Area:\n");
3364 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3366 save->es.selector, save->es.attrib,
3367 save->es.limit, save->es.base);
3368 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3370 save->cs.selector, save->cs.attrib,
3371 save->cs.limit, save->cs.base);
3372 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3374 save->ss.selector, save->ss.attrib,
3375 save->ss.limit, save->ss.base);
3376 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3378 save->ds.selector, save->ds.attrib,
3379 save->ds.limit, save->ds.base);
3380 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3382 save01->fs.selector, save01->fs.attrib,
3383 save01->fs.limit, save01->fs.base);
3384 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3386 save01->gs.selector, save01->gs.attrib,
3387 save01->gs.limit, save01->gs.base);
3388 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3390 save->gdtr.selector, save->gdtr.attrib,
3391 save->gdtr.limit, save->gdtr.base);
3392 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3394 save01->ldtr.selector, save01->ldtr.attrib,
3395 save01->ldtr.limit, save01->ldtr.base);
3396 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3398 save->idtr.selector, save->idtr.attrib,
3399 save->idtr.limit, save->idtr.base);
3400 pr_err(
"%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3402 save01->tr.selector, save01->tr.attrib,
3403 save01->tr.limit, save01->tr.base);
3404 pr_err(
"vmpl: %d cpl: %d efer: %016llx\n",
3405 save->vmpl, save->cpl, save->efer);
3406 pr_err(
"%-15s %016llx %-13s %016llx\n",
3407 "cr0:", save->cr0,
"cr2:", save->cr2);
3408 pr_err(
"%-15s %016llx %-13s %016llx\n",
3409 "cr3:", save->cr3,
"cr4:", save->cr4);
3410 pr_err(
"%-15s %016llx %-13s %016llx\n",
3411 "dr6:", save->dr6,
"dr7:", save->dr7);
3412 pr_err(
"%-15s %016llx %-13s %016llx\n",
3413 "rip:", save->rip,
"rflags:", save->rflags);
3414 pr_err(
"%-15s %016llx %-13s %016llx\n",
3415 "rsp:", save->rsp,
"rax:", save->rax);
3416 pr_err(
"%-15s %016llx %-13s %016llx\n",
3417 "star:", save01->star,
"lstar:", save01->lstar);
3418 pr_err(
"%-15s %016llx %-13s %016llx\n",
3419 "cstar:", save01->cstar,
"sfmask:", save01->sfmask);
3420 pr_err(
"%-15s %016llx %-13s %016llx\n",
3421 "kernel_gs_base:", save01->kernel_gs_base,
3422 "sysenter_cs:", save01->sysenter_cs);
3423 pr_err(
"%-15s %016llx %-13s %016llx\n",
3424 "sysenter_esp:", save01->sysenter_esp,
3425 "sysenter_eip:", save01->sysenter_eip);
3426 pr_err(
"%-15s %016llx %-13s %016llx\n",
3427 "gpat:", save->g_pat,
"dbgctl:", save->dbgctl);
3428 pr_err(
"%-15s %016llx %-13s %016llx\n",
3429 "br_from:", save->br_from,
"br_to:", save->br_to);
3430 pr_err(
"%-15s %016llx %-13s %016llx\n",
3431 "excp_from:", save->last_excp_from,
3432 "excp_to:", save->last_excp_to);
3443 vcpu_unimpl(vcpu,
"svm: unexpected exit reason 0x%llx\n", exit_code);
3445 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3446 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
3447 vcpu->run->internal.ndata = 2;
3448 vcpu->run->internal.data[0] = exit_code;
3449 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
3458 #ifdef CONFIG_RETPOLINE
3459 if (exit_code == SVM_EXIT_MSR)
3461 else if (exit_code == SVM_EXIT_VINTR)
3463 else if (exit_code == SVM_EXIT_INTR)
3465 else if (exit_code == SVM_EXIT_HLT)
3467 else if (exit_code == SVM_EXIT_NPF)
3474 u64 *info1, u64 *info2,
3475 u32 *intr_info, u32 *error_code)
3480 *info1 =
control->exit_info_1;
3481 *info2 =
control->exit_info_2;
3482 *intr_info =
control->exit_int_info;
3483 if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3484 (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3485 *error_code =
control->exit_int_info_err;
3493 struct kvm_run *kvm_run = vcpu->run;
3494 u32 exit_code = svm->
vmcb->control.exit_code;
3499 vcpu->arch.cr0 = svm->
vmcb->save.cr0;
3501 vcpu->arch.cr3 = svm->
vmcb->save.cr3;
3518 if (svm->
vmcb->control.exit_code == SVM_EXIT_ERR) {
3519 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3520 kvm_run->fail_entry.hardware_entry_failure_reason
3521 = svm->
vmcb->control.exit_code;
3522 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
3527 if (exit_fastpath != EXIT_FASTPATH_NONE)
3535 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
3561 svm->
vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3575 ++
vcpu->stat.nmi_injections;
3585 return !!(svm->
vmcb->control.int_ctl & V_NMI_PENDING_MASK);
3595 if (svm->
vmcb->control.int_ctl & V_NMI_PENDING_MASK)
3598 svm->
vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
3606 ++
vcpu->stat.nmi_injections;
3616 if (
vcpu->arch.interrupt.soft) {
3620 type = SVM_EVTINJ_TYPE_SOFT;
3622 type = SVM_EVTINJ_TYPE_INTR;
3625 trace_kvm_inj_virq(
vcpu->arch.interrupt.nr,
3626 vcpu->arch.interrupt.soft, reinjected);
3627 ++
vcpu->stat.irq_injections;
3629 svm->
vmcb->control.event_inj =
vcpu->arch.interrupt.nr |
3630 SVM_EVTINJ_VALID | type;
3634 int trig_mode,
int vector)
3640 bool in_guest_mode = (smp_load_acquire(&
vcpu->mode) == IN_GUEST_MODE);
3643 if (!READ_ONCE(
vcpu->arch.apic->apicv_active)) {
3645 kvm_make_request(KVM_REQ_EVENT,
vcpu);
3650 trace_kvm_apicv_accept_irq(
vcpu->vcpu_id, delivery_mode, trig_mode, vector);
3651 if (in_guest_mode) {
3668 int trig_mode,
int vector)
3679 smp_mb__after_atomic();
3711 return svm->
vmcb->control.int_ctl & V_NMI_BLOCKING_MASK;
3722 svm->
vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK;
3724 svm->
vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
3738 struct vmcb *vmcb = svm->
vmcb;
3749 return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK;
3770 struct vmcb *vmcb = svm->
vmcb;
3778 ? !(svm->
vmcb01.
ptr->save.rflags & X86_EFLAGS_IF)
3790 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3837 kvm_set_apicv_inhibit(
vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3884 svm->
vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3905 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3906 svm->
vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3913 hpa_t root_tdp =
vcpu->arch.mmu->root.hpa;
3922 hyperv_flush_guest_mapping(root_tdp);
3957 int cr8 = svm->
vmcb->control.int_ctl & V_TPR_MASK;
3972 svm->
vmcb->control.int_ctl &= ~V_TPR_MASK;
3973 svm->
vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3979 bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT);
3980 bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT);
4003 else if (!
nrips && (is_soft || is_exception) &&
4013 u32 exitintinfo = svm->
vmcb->control.exit_int_info;
4028 kvm_make_request(KVM_REQ_EVENT,
vcpu);
4031 vcpu->arch.nmi_injected =
false;
4035 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
4038 kvm_make_request(KVM_REQ_EVENT,
vcpu);
4040 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
4041 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
4047 case SVM_EXITINTINFO_TYPE_NMI:
4048 vcpu->arch.nmi_injected =
true;
4051 case SVM_EXITINTINFO_TYPE_EXEPT:
4055 if (vector == X86_TRAP_VC)
4058 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
4059 u32 err = svm->
vmcb->control.exit_int_info_err;
4065 case SVM_EXITINTINFO_TYPE_INTR:
4068 case SVM_EXITINTINFO_TYPE_SOFT:
4080 struct vmcb_control_area *
control = &svm->
vmcb->control;
4095 if (
to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
4096 to_svm(vcpu)->vmcb->control.exit_info_1)
4099 return EXIT_FASTPATH_NONE;
4106 guest_state_enter_irqoff();
4108 amd_clear_divider();
4115 guest_state_exit_irqoff();
4123 trace_kvm_entry(
vcpu);
4125 svm->
vmcb->save.rax =
vcpu->arch.regs[VCPU_REGS_RAX];
4126 svm->
vmcb->save.rsp =
vcpu->arch.regs[VCPU_REGS_RSP];
4127 svm->
vmcb->save.rip =
vcpu->arch.regs[VCPU_REGS_RIP];
4142 smp_send_reschedule(
vcpu->cpu);
4149 if (unlikely(svm->
asid != svm->
vmcb->control.asid)) {
4150 svm->
vmcb->control.asid = svm->
asid;
4153 svm->
vmcb->save.cr2 =
vcpu->arch.cr2;
4161 if (unlikely(
vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
4177 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4182 if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4186 vcpu->arch.cr2 = svm->
vmcb->save.cr2;
4187 vcpu->arch.regs[VCPU_REGS_RAX] = svm->
vmcb->save.rax;
4188 vcpu->arch.regs[VCPU_REGS_RSP] = svm->
vmcb->save.rsp;
4189 vcpu->arch.regs[VCPU_REGS_RIP] = svm->
vmcb->save.rip;
4191 vcpu->arch.regs_dirty = 0;
4193 if (unlikely(svm->
vmcb->control.exit_code == SVM_EXIT_NMI))
4201 if (unlikely(svm->
vmcb->control.exit_code == SVM_EXIT_NMI))
4212 svm->
vmcb->control.exit_code != SVM_EXIT_ERR)
4213 ++
vcpu->stat.nested_run;
4218 svm->
vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4222 if (svm->
vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4223 vcpu->arch.apf.host_apf_flags =
4224 kvm_read_and_reset_apf_flags();
4232 if (unlikely(svm->
vmcb->control.exit_code ==
4233 SVM_EXIT_EXCP_BASE + MC_VECTOR))
4241 return EXIT_FASTPATH_NONE;
4253 svm->
vmcb->control.nested_cr3 = __sme_set(root_hpa);
4258 cr3 =
vcpu->arch.cr3;
4267 svm->
vmcb->save.cr3 = cr3;
4277 hypercall[0] = 0x0f;
4278 hypercall[1] = 0x01;
4279 hypercall[2] = 0xd9;
4289 case MSR_IA32_MCG_EXT_CTL:
4292 case MSR_IA32_SMBASE:
4293 if (!IS_ENABLED(CONFIG_KVM_SMM))
4320 if (boot_cpu_has(X86_FEATURE_XSAVE) &&
4321 boot_cpu_has(X86_FEATURE_XSAVES) &&
4344 if (boot_cpu_has(X86_FEATURE_IBPB))
4348 if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
4363 #define PRE_EX(exit) { .exit_code = (exit), \
4364 .stage = X86_ICPT_PRE_EXCEPT, }
4365 #define POST_EX(exit) { .exit_code = (exit), \
4366 .stage = X86_ICPT_POST_EXCEPT, }
4367 #define POST_MEM(exit) { .exit_code = (exit), \
4368 .stage = X86_ICPT_POST_MEMACCESS, }
4435 struct vmcb *vmcb = svm->
vmcb;
4442 if (stage != icpt_info.
stage)
4446 case SVM_EXIT_READ_CR0:
4450 case SVM_EXIT_WRITE_CR0: {
4451 unsigned long cr0, val;
4456 if (icpt_info.
exit_code != SVM_EXIT_WRITE_CR0 ||
4461 INTERCEPT_SELECTIVE_CR0)))
4464 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4465 val = info->
src_val & ~SVM_CR0_SELECTIVE_MASK;
4471 if (cr0 & X86_CR0_PE)
4476 icpt_info.
exit_code = SVM_EXIT_CR0_SEL_WRITE;
4480 case SVM_EXIT_READ_DR0:
4481 case SVM_EXIT_WRITE_DR0:
4486 vmcb->control.exit_info_1 = 1;
4488 vmcb->control.exit_info_1 = 0;
4490 case SVM_EXIT_PAUSE:
4498 case SVM_EXIT_IOIO: {
4504 exit_info = ((info->
src_val & 0xffff) << 16) |
4508 exit_info = (info->
dst_val & 0xffff) << 16;
4514 exit_info |= SVM_IOIO_STR_MASK;
4517 exit_info |= SVM_IOIO_REP_MASK;
4519 bytes = min(bytes, 4u);
4521 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4523 exit_info |= (u32)info->
ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4525 vmcb->control.exit_info_1 = exit_info;
4526 vmcb->control.exit_info_2 = info->
next_rip;
4535 if (static_cpu_has(X86_FEATURE_NRIPS))
4536 vmcb->control.next_rip = info->
next_rip;
4537 vmcb->control.exit_code = icpt_info.
exit_code;
4549 if (
to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
4550 vcpu->arch.at_instruction_boundary =
true;
4562 vcpu->arch.mcg_cap &= 0x1ff;
4565 #ifdef CONFIG_KVM_SMM
4577 static int svm_smi_allowed(
struct kvm_vcpu *
vcpu,
bool for_injection)
4593 static int svm_enter_smm(
struct kvm_vcpu *
vcpu,
union kvm_smram *smram)
4596 struct kvm_host_map map_save;
4610 smram->smram64.svm_guest_flag = 1;
4613 svm->
vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4614 svm->
vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4615 svm->
vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4636 BUILD_BUG_ON(offsetof(
struct vmcb, save) != 0x400);
4645 static int svm_leave_smm(
struct kvm_vcpu *vcpu,
const union kvm_smram *smram)
4648 struct kvm_host_map map, map_save;
4649 struct vmcb *vmcb12;
4652 const struct kvm_smram_state_64 *smram64 = &smram->smram64;
4658 if (!smram64->svm_guest_flag)
4664 if (!(smram64->efer & EFER_SVME))
4667 if (
kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map))
4707 static void svm_enable_smi_window(
struct kvm_vcpu *vcpu)
4722 void *insn,
int insn_len)
4724 bool smep, smap, is_user;
4732 WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
4733 EMULTYPE_TRAP_UD_FORCED |
4734 EMULTYPE_VMWARE_GP));
4747 if (emul_type & EMULTYPE_NO_DECODE)
4773 if (unlikely(!insn)) {
4774 if (emul_type & EMULTYPE_SKIP)
4788 if (likely(insn_len))
4824 if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
4830 if (smap && (!smep || is_user)) {
4831 pr_err_ratelimited(
"SEV Guest triggered AMD Erratum 1096\n");
4844 kvm_inject_gp(
vcpu, 0);
4846 kvm_make_request(KVM_REQ_TRIPLE_FAULT,
vcpu);
4892 kvm->arch.pause_in_guest =
true;
4903 static struct kvm_x86_ops svm_x86_ops
__initdata = {
4904 .name = KBUILD_MODNAME,
4917 .vm_size =
sizeof(
struct kvm_svm),
4962 .update_emulated_instruction = NULL,
5010 #ifdef CONFIG_KVM_SMM
5011 .smi_allowed = svm_smi_allowed,
5012 .enter_smm = svm_enter_smm,
5013 .leave_smm = svm_leave_smm,
5014 .enable_smi_window = svm_enable_smi_window,
5044 unsigned int enc_bit, mask_bit;
5048 if (cpuid_eax(0x80000000) < 0x8000001f)
5052 rdmsrl(MSR_AMD64_SYSCFG, msr);
5053 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
5056 enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
5057 mask_bit = boot_cpu_data.x86_phys_bits;
5060 if (enc_bit == mask_bit)
5110 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
5113 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
5127 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5128 boot_cpu_has(X86_FEATURE_AMD_SSBD))
5137 if (
kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
5138 kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
5155 struct page *iopm_pages;
5158 unsigned int order = get_order(
IOPM_SIZE);
5164 if (!boot_cpu_has(X86_FEATURE_NX)) {
5165 pr_err_ratelimited(
"NX (Execute Disable) not supported\n");
5170 iopm_pages = alloc_pages(GFP_KERNEL, order);
5175 iopm_va = page_address(iopm_pages);
5176 memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
5177 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
5182 XFEATURE_MASK_BNDCSR);
5184 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
5188 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
5191 pr_info(
"TSC scaling supported\n");
5200 if (boot_cpu_has(X86_FEATURE_AUTOIBRS))
5204 if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
5207 }
else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
5212 pr_info(
"Nested Virtualization enabled\n");
5221 if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
5224 if (!boot_cpu_has(X86_FEATURE_NPT))
5230 pr_info(
"Nested Paging %sabled\n",
npt_enabled ?
"en" :
"dis");
5237 nrips =
nrips && boot_cpu_has(X86_FEATURE_NRIPS);
5247 for_each_possible_cpu(cpu) {
5256 svm_x86_ops.vcpu_blocking = NULL;
5257 svm_x86_ops.vcpu_unblocking = NULL;
5258 svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
5260 svm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization =
true;
5265 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
5266 !IS_ENABLED(CONFIG_X86_64)) {
5269 pr_info(
"Virtual VMLOAD VMSAVE supported\n");
5273 if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
5277 if (!boot_cpu_has(X86_FEATURE_VGIF))
5280 pr_info(
"Virtual GIF supported\n");
5285 pr_info(
"Virtual NMI enabled\n");
5288 svm_x86_ops.is_vnmi_pending = NULL;
5289 svm_x86_ops.set_vnmi_pending = NULL;
5294 if (!boot_cpu_has(X86_FEATURE_LBRV))
5297 pr_info(
"LBR virtualization supported\n");
5301 pr_info(
"PMU virtualization is disabled\n");
5328 static struct kvm_x86_init_ops svm_init_ops
__initdata = {
5331 .runtime_ops = &svm_x86_ops,
5346 __unused_size_checks();
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
int avic_vm_init(struct kvm *kvm)
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set)
void avic_vm_destroy(struct kvm *kvm)
void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
void avic_vcpu_put(struct kvm_vcpu *vcpu)
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
int avic_init_vcpu(struct vcpu_svm *svm)
void avic_ring_doorbell(struct kvm_vcpu *vcpu)
bool avic_hardware_setup(void)
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu)
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
void kvm_set_cpu_caps(void)
static int guest_cpuid_model(struct kvm_vcpu *vcpu)
static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
static bool guest_has_pred_cmd_msr(struct kvm_vcpu *vcpu)
static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static bool guest_has_spec_ctrl_msr(struct kvm_vcpu *vcpu)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static int guest_cpuid_family(struct kvm_vcpu *vcpu)
static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
static void kvm_hv_vcpu_purge_flush_tlb(struct kvm_vcpu *vcpu)
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
static void kvm_register_mark_available(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
static ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
static ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
static ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
#define X86EMUL_PROPAGATE_FAULT
#define X86EMUL_UNHANDLEABLE
#define X86EMUL_INTERCEPTED
#define X86EMUL_RETRY_INSTR
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
int hv_flush_remote_tlbs(struct kvm *kvm)
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
static bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
static void kvm_lapic_set_irr(int vec, struct kvm_lapic *apic)
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
static bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len)
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len)
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level, int tdp_max_root_level, int tdp_huge_page_level)
static __always_inline u64 rsvd_bits(int s, int e)
static unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
struct x86_pmu_capability __read_mostly kvm_pmu_cap
struct kvm_pmu_ops amd_pmu_ops
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
void sev_hardware_unsetup(void)
int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
unsigned int max_sev_asid
int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
int sev_cpu_init(struct svm_cpu_data *sd)
int sev_mem_enc_unregister_region(struct kvm *kvm, struct kvm_enc_region *range)
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
void __init sev_hardware_setup(void)
void sev_es_vcpu_reset(struct vcpu_svm *svm)
int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
int sev_mem_enc_register_region(struct kvm *kvm, struct kvm_enc_region *range)
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
void sev_init_vmcb(struct vcpu_svm *svm)
int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
void pre_sev_run(struct vcpu_svm *svm, int cpu)
void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
void sev_guest_memory_reclaimed(struct kvm *kvm)
void __init sev_set_cpu_caps(void)
void sev_vm_destroy(struct kvm *kvm)
void sev_free_vcpu(struct kvm_vcpu *vcpu)
static bool is_smm(struct kvm_vcpu *vcpu)
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
enum x86_intercept_stage stage
u64 default_tsc_scaling_ratio
u64 max_tsc_scaling_ratio
u8 tsc_scaling_ratio_frac_bits
struct vmcb * current_vmcb
unsigned long save_area_pa
struct vmcb_ctrl_area_cached ctl
bool force_msr_bitmap_recalc
struct sev_es_save_area * vmsa
struct kvm_vmcb_info * current_vmcb
unsigned long soft_int_old_rip
unsigned long soft_int_next_rip
unsigned long soft_int_csbase
u64 nmi_singlestep_guest_rflags
struct svm_nested_state nested
struct vcpu_svm::@33 shadow_msr_intercept
struct vcpu_sev_es_state sev_es
bool awaiting_iret_completion
struct kvm_vmcb_info vmcb01
bool x2avic_msrs_intercepted
struct kvm_x86_nested_ops svm_nested_ops
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
int nested_svm_vmrun(struct kvm_vcpu *vcpu)
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
void svm_free_nested(struct vcpu_svm *svm)
int nested_svm_exit_handled(struct vcpu_svm *svm)
int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, struct vmcb *vmcb12, bool from_vmrun)
int svm_allocate_nested(struct vcpu_svm *svm)
int nested_svm_exit_special(struct vcpu_svm *svm)
void recalc_intercepts(struct vcpu_svm *svm)
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, struct vmcb_control_area *control)
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, struct vmcb_save_area *save)
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
void svm_copy_vmrun_state(struct vmcb_save_area *to_save, struct vmcb_save_area *from_save)
void svm_leave_nested(struct kvm_vcpu *vcpu)
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static void clr_dr_intercepts(struct vcpu_svm *svm)
static int task_switch_interception(struct kvm_vcpu *vcpu)
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu, unsigned long val)
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int stgi_interception(struct kvm_vcpu *vcpu)
static int svm_instr_opcode(struct kvm_vcpu *vcpu)
static void init_msrpm_offsets(void)
static bool __kvm_is_svm_supported(void)
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
static void svm_vm_destroy(struct kvm *kvm)
static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, struct vcpu_svm *svm)
static void svm_inject_exception(struct kvm_vcpu *vcpu)
static void pre_svm_run(struct kvm_vcpu *vcpu)
module_param_named(npt, npt_enabled, bool, 0444)
static DEFINE_PER_CPU(u64, current_tsc_ratio)
static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
static unsigned long iopm_base
static int msr_interception(struct kvm_vcpu *vcpu)
void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
static int get_npt_level(void)
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu, bool commit_side_effects)
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, int trig_mode, int vector)
static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read, int write)
static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static int intr_interception(struct kvm_vcpu *vcpu)
static int vmload_interception(struct kvm_vcpu *vcpu)
static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
static int invlpga_interception(struct kvm_vcpu *vcpu)
static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
static void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
static bool svm_gp_erratum_intercept
static void svm_set_vintr(struct vcpu_svm *svm)
static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type, void *insn, int insn_len)
static int skinit_interception(struct kvm_vcpu *vcpu)
static int svm_vcpu_create(struct kvm_vcpu *vcpu)
static void init_seg(struct vmcb_seg *seg)
static int shutdown_interception(struct kvm_vcpu *vcpu)
static void svm_clear_current_vmcb(struct vmcb *vmcb)
static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
static void svm_setup_mce(struct kvm_vcpu *vcpu)
u32 svm_msrpm_offset(u32 msr)
bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
static void svm_vcpu_free(struct kvm_vcpu *vcpu)
static unsigned short pause_filter_count
static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
static bool valid_msr_intercept(u32 index)
static int clgi_interception(struct kvm_vcpu *vcpu)
static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, int trig_mode, int vector)
static const struct __x86_intercept x86_intercept_map[]
static void svm_cancel_injection(struct kvm_vcpu *vcpu)
static void kvm_cpu_svm_disable(void)
static int nmi_interception(struct kvm_vcpu *vcpu)
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu)
static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
static int pf_interception(struct kvm_vcpu *vcpu)
static int emulate_on_interception(struct kvm_vcpu *vcpu)
static void __svm_exit(void)
void svm_set_gif(struct vcpu_svm *svm, bool value)
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
static int vmrun_interception(struct kvm_vcpu *vcpu)
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
static void svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
static void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
static unsigned short pause_filter_count_grow
static bool erratum_383_found __read_mostly
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
static void __svm_write_tsc_multiplier(u64 multiplier)
static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
bool __read_mostly dump_invalid_vmcb
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
void svm_vcpu_free_msrpm(u32 *msrpm)
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
static int invpcid_interception(struct kvm_vcpu *vcpu)
static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static u8 rsm_ins_bytes[]
static int direct_access_msr_slot(u32 msr)
static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
static void svm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
static int npf_interception(struct kvm_vcpu *vcpu)
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
static int io_interception(struct kvm_vcpu *vcpu)
static unsigned short pause_filter_thresh
static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write)
static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static int gp_interception(struct kvm_vcpu *vcpu)
static void svm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
static void set_dr_intercepts(struct vcpu_svm *svm)
static void svm_init_osvw(struct kvm_vcpu *vcpu)
void disable_nmi_singlestep(struct vcpu_svm *svm)
static int vmsave_interception(struct kvm_vcpu *vcpu)
static void add_msr_offset(u32 offset)
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
static void svm_init_erratum_383(void)
static int dr_interception(struct kvm_vcpu *vcpu)
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
MODULE_AUTHOR("Qumranet")
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
module_param(pause_filter_thresh, ushort, 0444)
static bool svm_has_wbinvd_exit(void)
static int ud_interception(struct kvm_vcpu *vcpu)
static void svm_hardware_unsetup(void)
static int mc_interception(struct kvm_vcpu *vcpu)
static int rsm_interception(struct kvm_vcpu *vcpu)
static struct vmcb * svm_get_lbr_vmcb(struct vcpu_svm *svm)
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
#define SEG_TYPE_BUSY_TSS16
static struct vmcb_seg * svm_seg(struct kvm_vcpu *vcpu, int seg)
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
static void init_vmcb(struct kvm_vcpu *vcpu)
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
static __init int svm_hardware_setup(void)
static int __init svm_init(void)
static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
static int iret_interception(struct kvm_vcpu *vcpu)
static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
static void shrink_ple_window(struct kvm_vcpu *vcpu)
static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
u32 * svm_vcpu_alloc_msrpm(void)
static int interrupt_window_interception(struct kvm_vcpu *vcpu)
static __init void svm_set_cpu_caps(void)
static void grow_ple_window(struct kvm_vcpu *vcpu)
static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
static void svm_emergency_disable(void)
static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
static int svm_get_msr_feature(struct kvm_msr_entry *msr)
static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
static unsigned short pause_filter_count_max
static bool is_erratum_383(void)
static struct kvm_x86_ops svm_x86_ops __initdata
static void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
static int efer_trap(struct kvm_vcpu *vcpu)
static void svm_hardware_disable(void)
static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
static int svm_hardware_enable(void)
static int cr_interception(struct kvm_vcpu *vcpu)
static bool kvm_is_svm_supported(void)
void svm_update_lbrv(struct kvm_vcpu *vcpu)
static int pause_interception(struct kvm_vcpu *vcpu)
static unsigned short pause_filter_count_shrink
static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
static const struct svm_direct_access_msrs direct_access_msrs[MAX_DIRECT_ACCESS_MSRS]
static uint64_t osvw_status
static int svm_cpu_init(int cpu)
static bool svm_check_exit_valid(u64 exit_code)
static int svm_check_processor_compat(void)
static int svm_vm_init(struct kvm *kvm)
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
static int invlpg_interception(struct kvm_vcpu *vcpu)
static int smi_interception(struct kvm_vcpu *vcpu)
static void dump_vmcb(struct kvm_vcpu *vcpu)
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector, int type)
static void svm_cpu_uninit(int cpu)
static int svm_check_intercept(struct kvm_vcpu *vcpu, struct x86_instruction_info *info, enum x86_intercept_stage stage, struct x86_exception *exception)
static int ac_interception(struct kvm_vcpu *vcpu)
static void __exit svm_exit(void)
static int db_interception(struct kvm_vcpu *vcpu)
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write)
static __init void svm_adjust_mmio_mask(void)
static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
static void svm_clear_vintr(struct vcpu_svm *svm)
static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static int svm_get_cpl(struct kvm_vcpu *vcpu)
static const u32 msrpm_ranges[]
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
static void svm_handle_mce(struct kvm_vcpu *vcpu)
static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
static int cr8_write_interception(struct kvm_vcpu *vcpu)
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
static void svm_set_iret_intercept(struct vcpu_svm *svm)
static int cr_trap(struct kvm_vcpu *vcpu)
static int(*const svm_exit_handlers[])(struct kvm_vcpu *vcpu)
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason, u64 *info1, u64 *info2, u32 *intr_info, u32 *error_code)
static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
static void svm_clr_iret_intercept(struct vcpu_svm *svm)
static int bp_interception(struct kvm_vcpu *vcpu)
static bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
static void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
static bool nested_exit_on_intr(struct vcpu_svm *svm)
static __always_inline bool sev_es_guest(struct kvm *kvm)
static __always_inline bool sev_guest(struct kvm *kvm)
static bool svm_is_intercept(struct vcpu_svm *svm, int bit)
#define SVM_REGS_LAZY_LOAD_SET
static void svm_clr_intercept(struct vcpu_svm *svm, int bit)
static void vmcb_mark_all_dirty(struct vmcb *vmcb)
#define MAX_DIRECT_ACCESS_MSRS
static bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
static bool nested_exit_on_smi(struct vcpu_svm *svm)
static void vmcb_mark_all_clean(struct vmcb *vmcb)
static bool is_vnmi_enabled(struct vcpu_svm *svm)
static void disable_gif(struct vcpu_svm *svm)
static int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
static __always_inline struct vcpu_svm * to_svm(struct kvm_vcpu *vcpu)
bool svm_smi_blocked(struct kvm_vcpu *vcpu)
static void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted)
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted)
#define NESTED_EXIT_CONTINUE
#define DEBUGCTL_RESERVED_BITS
static void enable_gif(struct vcpu_svm *svm)
static bool gif_set(struct vcpu_svm *svm)
static bool nested_exit_on_nmi(struct vcpu_svm *svm)
static void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
#define AVIC_REQUIRED_APICV_INHIBITS
static void svm_set_intercept(struct vcpu_svm *svm, int bit)
static void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
static __init void svm_hv_hardware_setup(void)
static void svm_hv_init_vmcb(struct vmcb *vmcb)
static bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
static void svm_hv_vmcb_dirty_nested_enlightenments(struct kvm_vcpu *vcpu)
static void svm_hv_update_vp_id(struct vmcb *vmcb, struct kvm_vcpu *vcpu)
static __always_inline void vmsave(unsigned long pa)
static void invlpga(unsigned long addr, u32 asid)
#define trace_kvm_cr_read(cr, val)
#define trace_kvm_cr_write(cr, val)
bool __read_mostly allow_smaller_maxphyaddr
int kvm_spec_ctrl_test_value(u64 value)
bool __read_mostly enable_apicv
int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, void *insn, int insn_len)
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code)
bool __read_mostly enable_vmware_backdoor
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, void *insn, int insn_len)
int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
void kvm_x86_vendor_exit(void)
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
bool __read_mostly enable_pmu
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, struct kvm_queued_exception *ex)
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
int kvm_add_user_return_msr(u32 msr)
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
int kvm_emulate_invd(struct kvm_vcpu *vcpu)
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
int handle_ud(struct kvm_vcpu *vcpu)
bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload)
void kvm_enable_efer_bits(u64 mask)
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
static unsigned int __grow_ple_window(unsigned int val, unsigned int base, unsigned int modifier, unsigned int max)
static __always_inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
static bool kvm_exception_is_soft(unsigned int nr)
#define KVM_SVM_DEFAULT_PLE_WINDOW
#define KVM_FIRST_EMULATED_VMX_MSR
#define KVM_DEFAULT_PLE_WINDOW_SHRINK
static bool is_paging(struct kvm_vcpu *vcpu)
static __always_inline void kvm_before_interrupt(struct kvm_vcpu *vcpu, enum kvm_intr_type intr)
static void kvm_register_write(struct kvm_vcpu *vcpu, int reg, unsigned long val)
static bool kvm_mwait_in_guest(struct kvm *kvm)
#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX
#define KVM_DEFAULT_PLE_WINDOW_GROW
static void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
static unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
static void kvm_machine_check(void)
static void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, bool soft)
static bool is_long_mode(struct kvm_vcpu *vcpu)
static bool kvm_pause_in_guest(struct kvm *kvm)
#define KVM_MSR_RET_INVALID
#define KVM_DEFAULT_PLE_GAP
static void kvm_pr_unimpl_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
static void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
static bool kvm_hlt_in_guest(struct kvm *kvm)
#define KVM_LAST_EMULATED_VMX_MSR
static bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
static unsigned int __shrink_ple_window(unsigned int val, unsigned int base, unsigned int modifier, unsigned int min)