15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
34 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
40 struct vmcb *vmcb = svm->
vmcb;
42 if (vmcb->control.exit_code != SVM_EXIT_NPF) {
47 vmcb->control.exit_code = SVM_EXIT_NPF;
48 vmcb->control.exit_code_hi = 0;
49 vmcb->control.exit_info_1 = (1ULL << 32);
50 vmcb->control.exit_info_2 = fault->
address;
53 vmcb->control.exit_info_1 &= ~0xffffffffULL;
54 vmcb->control.exit_info_1 |= fault->
error_code;
67 offset_in_page(cr3) + index * 8, 8);
86 vcpu->arch.mmu = &
vcpu->arch.guest_mmu;
99 vcpu->arch.walk_mmu = &
vcpu->arch.nested_mmu;
104 vcpu->arch.mmu = &
vcpu->arch.root_mmu;
105 vcpu->arch.walk_mmu = &
vcpu->arch.root_mmu;
124 struct vmcb_control_area *c, *h;
133 c = &svm->
vmcb->control;
137 for (i = 0; i < MAX_INTERCEPT; i++)
140 if (g->
int_ctl & V_INTR_MASKING_MASK) {
152 if (!(svm->
vmcb01.
ptr->save.rflags & X86_EFLAGS_IF))
163 for (i = 0; i < MAX_INTERCEPT; i++)
179 WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
200 #ifdef CONFIG_KVM_HYPERV
202 struct hv_vmcb_enlightenments *hve = &svm->
nested.
ctl.hv_enlightenments;
205 hve->hv_enlightenments_control.msr_bitmap &&
207 goto set_msrpm_base_pa;
218 if (msrpm_offsets[i] == 0xffffffff)
221 p = msrpm_offsets[i];
237 #ifdef CONFIG_KVM_HYPERV
250 u64 addr = PAGE_ALIGN(pa);
275 if (
CC((
control->int_ctl & V_NMI_ENABLE_MASK) &&
287 if (
CC(!(save->
efer & EFER_SVME)))
290 if (
CC((save->
cr0 & X86_CR0_CD) == 0 && (save->
cr0 & X86_CR0_NW)) ||
291 CC(save->
cr0 & ~0xffffffffULL))
302 if ((save->
efer & EFER_LME) && (save->
cr0 & X86_CR0_PG)) {
303 if (
CC(!(save->
cr4 & X86_CR4_PAE)) ||
304 CC(!(save->
cr0 & X86_CR0_PE)) ||
338 struct vmcb_control_area *from)
342 for (i = 0; i < MAX_INTERCEPT; i++)
368 to->
asid = from->asid;
372 #ifdef CONFIG_KVM_HYPERV
375 to->
clean = from->clean;
376 memcpy(&to->hv_enlightenments, &from->hv_enlightenments,
377 sizeof(to->hv_enlightenments));
383 struct vmcb_control_area *
control)
389 struct vmcb_save_area *from)
395 to->
efer = from->efer;
405 struct vmcb_save_area *save)
421 mask = V_IRQ_MASK | V_TPR_MASK;
438 mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK;
451 struct kvm_vcpu *vcpu = &svm->
vcpu;
452 u32 exit_int_info = 0;
455 if (vcpu->arch.exception.injected) {
456 nr = vcpu->arch.exception.vector;
457 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
459 if (vcpu->arch.exception.has_error_code) {
460 exit_int_info |= SVM_EVTINJ_VALID_ERR;
461 vmcb12->control.exit_int_info_err =
462 vcpu->arch.exception.error_code;
465 }
else if (vcpu->arch.nmi_injected) {
466 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
468 }
else if (vcpu->arch.interrupt.injected) {
469 nr = vcpu->arch.interrupt.nr;
470 exit_int_info = nr | SVM_EVTINJ_VALID;
472 if (vcpu->arch.interrupt.soft)
473 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
475 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
478 vmcb12->control.exit_int_info = exit_int_info;
499 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
500 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
508 bool nested_npt,
bool reload_pdptrs)
517 vcpu->arch.cr3 = cr3;
539 bool new_vmcb12 =
false;
542 struct kvm_vcpu *vcpu = &svm->
vcpu;
554 vmcb02->save.es = vmcb12->save.es;
555 vmcb02->save.cs = vmcb12->save.cs;
556 vmcb02->save.ss = vmcb12->save.ss;
557 vmcb02->save.ds = vmcb12->save.ds;
558 vmcb02->save.cpl = vmcb12->save.cpl;
563 vmcb02->save.gdtr = vmcb12->save.gdtr;
564 vmcb02->save.idtr = vmcb12->save.idtr;
575 svm->
vcpu.arch.cr2 = vmcb12->save.cr2;
577 kvm_rax_write(vcpu, vmcb12->save.rax);
582 vmcb02->save.rax = vmcb12->save.rax;
583 vmcb02->save.rsp = vmcb12->save.rsp;
584 vmcb02->save.rip = vmcb12->save.rip;
603 }
else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
610 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
611 u8 vector = evtinj & SVM_EVTINJ_VEC_MASK;
613 if (!(evtinj & SVM_EVTINJ_VALID))
616 if (type == SVM_EVTINJ_TYPE_SOFT)
624 u32 type = evtinj & SVM_EVTINJ_TYPE_MASK;
626 if (!(evtinj & SVM_EVTINJ_VALID))
629 return type == SVM_EVTINJ_TYPE_NMI;
633 unsigned long vmcb12_rip,
634 unsigned long vmcb12_csbase)
636 u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
637 u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
639 struct kvm_vcpu *vcpu = &svm->
vcpu;
652 int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
654 int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
657 if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) {
658 svm->
vcpu.arch.nmi_pending++;
659 kvm_make_request(KVM_REQ_EVENT, &svm->
vcpu);
662 int_ctl_vmcb12_bits |= (V_NMI_PENDING_MASK |
664 V_NMI_BLOCKING_MASK);
668 vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
669 vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
670 vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
675 vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
682 vcpu->arch.l1_tsc_offset,
686 vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
692 vmcb02->control.int_ctl =
694 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
711 else if (boot_cpu_has(X86_FEATURE_NRIPS))
712 vmcb02->control.next_rip = vmcb12_rip;
725 vmcb02->control.virt_ext = vmcb01->control.virt_ext &
728 vmcb02->control.virt_ext |=
732 vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
744 vmcb02->control.pause_filter_count = pause_count12;
745 vmcb02->control.pause_filter_thresh = pause_thresh12;
749 vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
750 vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
755 vmcb02->control.pause_filter_count = 0;
757 vmcb02->control.pause_filter_thresh = 0;
782 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
786 struct vmcb *vmcb12,
bool from_vmrun)
791 trace_kvm_nested_vmenter(svm->
vmcb->save.rip,
794 vmcb12->control.int_ctl,
795 vmcb12->control.event_inj,
796 vmcb12->control.nested_ctl,
797 vmcb12->control.nested_cr3,
801 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
802 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
803 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
804 vmcb12->control.intercepts[INTERCEPT_WORD3],
805 vmcb12->control.intercepts[INTERCEPT_WORD4],
806 vmcb12->control.intercepts[INTERCEPT_WORD5]);
825 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES,
vcpu);
830 kvm_make_request(KVM_REQ_APICV_UPDATE,
vcpu);
842 struct kvm_host_map map;
847 kvm_inject_gp(vcpu, 0);
859 kvm_inject_gp(vcpu, 0);
863 vmcb12_gpa = svm->
vmcb->save.rax;
865 if (ret == -EINVAL) {
866 kvm_inject_gp(vcpu, 0);
884 vmcb12->control.exit_code = SVM_EXIT_ERR;
885 vmcb12->control.exit_code_hi = 0;
886 vmcb12->control.exit_info_1 = 0;
887 vmcb12->control.exit_info_2 = 0;
895 vmcb01->save.efer = vcpu->arch.efer;
897 vmcb01->save.cr4 = vcpu->arch.cr4;
917 svm->
vmcb->control.exit_code = SVM_EXIT_ERR;
918 svm->
vmcb->control.exit_code_hi = 0;
919 svm->
vmcb->control.exit_info_1 = 0;
920 svm->
vmcb->control.exit_info_2 = 0;
932 struct vmcb_save_area *from_save)
934 to_save->es = from_save->es;
935 to_save->cs = from_save->cs;
936 to_save->ss = from_save->ss;
937 to_save->ds = from_save->ds;
938 to_save->gdtr = from_save->gdtr;
939 to_save->idtr = from_save->idtr;
940 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
941 to_save->efer = from_save->efer;
942 to_save->cr0 = from_save->cr0;
943 to_save->cr3 = from_save->cr3;
944 to_save->cr4 = from_save->cr4;
945 to_save->rax = from_save->rax;
946 to_save->rsp = from_save->rsp;
947 to_save->rip = from_save->rip;
953 to_vmcb->save.fs = from_vmcb->save.fs;
954 to_vmcb->save.gs = from_vmcb->save.gs;
955 to_vmcb->save.tr = from_vmcb->save.tr;
956 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
957 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
958 to_vmcb->save.star = from_vmcb->save.star;
959 to_vmcb->save.lstar = from_vmcb->save.lstar;
960 to_vmcb->save.cstar = from_vmcb->save.cstar;
961 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
962 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
963 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
964 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
969 struct kvm_vcpu *vcpu = &svm->
vcpu;
973 struct kvm_host_map map;
979 kvm_inject_gp(vcpu, 0);
990 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
993 svm->
vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
997 vmcb12->save.es = vmcb02->save.es;
998 vmcb12->save.cs = vmcb02->save.cs;
999 vmcb12->save.ss = vmcb02->save.ss;
1000 vmcb12->save.ds = vmcb02->save.ds;
1001 vmcb12->save.gdtr = vmcb02->save.gdtr;
1002 vmcb12->save.idtr = vmcb02->save.idtr;
1003 vmcb12->save.efer = svm->
vcpu.arch.efer;
1006 vmcb12->save.cr2 = vmcb02->save.cr2;
1007 vmcb12->save.cr4 = svm->
vcpu.arch.cr4;
1011 vmcb12->save.rax = kvm_rax_read(vcpu);
1012 vmcb12->save.dr7 = vmcb02->save.dr7;
1013 vmcb12->save.dr6 = svm->
vcpu.arch.dr6;
1014 vmcb12->save.cpl = vmcb02->save.cpl;
1016 vmcb12->control.int_state = vmcb02->control.int_state;
1017 vmcb12->control.exit_code = vmcb02->control.exit_code;
1018 vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
1019 vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
1020 vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
1022 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
1026 vmcb12->control.next_rip = vmcb02->control.next_rip;
1033 vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1062 kvm_make_request(KVM_REQ_EVENT, &svm->
vcpu);
1068 }
else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1074 if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
1075 vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
1077 vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
1079 if (vcpu->arch.nmi_pending) {
1080 vcpu->arch.nmi_pending--;
1081 vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
1083 vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
1092 vmcb01->control.exit_int_info = 0;
1094 svm->
vcpu.arch.tsc_offset = svm->
vcpu.arch.l1_tsc_offset;
1095 if (vmcb01->control.tsc_offset != svm->
vcpu.arch.tsc_offset) {
1096 vmcb01->control.tsc_offset = svm->
vcpu.arch.tsc_offset;
1101 vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
1102 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
1115 kvm_rax_write(vcpu, vmcb01->save.rax);
1119 svm->
vcpu.arch.dr7 = DR7_FIXED_1;
1122 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
1123 vmcb12->control.exit_info_1,
1124 vmcb12->control.exit_info_2,
1125 vmcb12->control.exit_int_info,
1126 vmcb12->control.exit_int_info_err,
1143 svm->
vcpu.arch.nmi_injected =
false;
1153 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1173 kvm_clear_request(KVM_REQ_TRIPLE_FAULT,
vcpu);
1179 struct page *vmcb02_page;
1184 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1188 svm->
nested.
vmcb02.
pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1192 goto err_free_vmcb02;
1199 __free_page(vmcb02_page);
1245 kvm_make_request(KVM_REQ_APICV_UPDATE,
vcpu);
1248 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES,
vcpu);
1253 u32 offset, msr, value;
1259 msr = svm->
vcpu.arch.regs[VCPU_REGS_RCX];
1261 write = svm->
vmcb->control.exit_info_1 & 1;
1262 mask = 1 << ((2 * (msr & 0xf)) + write);
1278 unsigned port, size, iopm_len;
1286 port = svm->
vmcb->control.exit_info_1 >> 16;
1287 size = (svm->
vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1288 SVM_IOIO_SIZE_SHIFT;
1290 start_bit = port % 8;
1291 iopm_len = (start_bit + size > 8) ? 2 : 1;
1292 mask = (0xf >> (4 - size)) << start_bit;
1303 u32 exit_code = svm->
vmcb->control.exit_code;
1306 switch (exit_code) {
1313 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1318 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1323 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1332 case SVM_EXIT_ERR: {
1365 kvm_inject_gp(
vcpu, 0);
1382 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
1384 struct vmcb *vmcb = svm->
vmcb;
1386 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
1387 vmcb->control.exit_code_hi = 0;
1389 if (ex->has_error_code)
1390 vmcb->control.exit_info_1 = ex->error_code;
1396 if (ex->vector == PF_VECTOR) {
1397 if (ex->has_payload)
1398 vmcb->control.exit_info_2 = ex->payload;
1400 vmcb->control.exit_info_2 = vcpu->arch.cr2;
1401 }
else if (ex->vector == DB_VECTOR) {
1405 if (vcpu->arch.dr7 & DR7_GD) {
1406 vcpu->arch.dr7 &= ~DR7_GD;
1410 WARN_ON(ex->has_payload);
1437 bool block_nested_events = block_nested_exceptions ||
1442 if (block_nested_events)
1450 if (
vcpu->arch.exception_vmexit.pending) {
1451 if (block_nested_exceptions)
1457 if (
vcpu->arch.exception.pending) {
1458 if (block_nested_exceptions)
1463 #ifdef CONFIG_KVM_SMM
1465 if (block_nested_events)
1475 if (block_nested_events)
1484 if (block_nested_events)
1488 trace_kvm_nested_intr_vmexit(svm->
vmcb->save.rip);
1498 u32 exit_code = svm->
vmcb->control.exit_code;
1499 struct kvm_vcpu *vcpu = &svm->
vcpu;
1501 switch (exit_code) {
1506 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1507 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1509 if (svm->
vmcb01.
ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1512 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1513 svm->
vcpu.arch.apf.host_apf_flags)
1518 case SVM_EXIT_VMMCALL:
1536 vcpu->arch.tsc_scaling_ratio =
1548 memset(dst, 0,
sizeof(
struct vmcb_control_area));
1550 for (i = 0; i < MAX_INTERCEPT; i++)
1556 dst->asid = from->
asid;
1579 struct kvm_nested_state __user *user_kvm_nested_state,
1583 struct vmcb_control_area *ctl;
1585 struct kvm_nested_state kvm_state = {
1587 .format = KVM_STATE_NESTED_FORMAT_SVM,
1588 .size =
sizeof(kvm_state),
1590 struct vmcb __user *user_vmcb = (
struct vmcb __user *)
1591 &user_kvm_nested_state->data.svm[0];
1594 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1598 if (user_data_size < kvm_state.size)
1604 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1605 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1608 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1612 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1614 if (copy_to_user(user_kvm_nested_state, &kvm_state,
sizeof(kvm_state)))
1624 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1627 ctl = kzalloc(
sizeof(*ctl), GFP_KERNEL);
1632 r = copy_to_user(&user_vmcb->control, ctl,
1633 sizeof(user_vmcb->control));
1638 if (copy_to_user(&user_vmcb->save, &svm->
vmcb01.
ptr->save,
1639 sizeof(user_vmcb->save)))
1642 return kvm_state.size;
1646 struct kvm_nested_state __user *user_kvm_nested_state,
1647 struct kvm_nested_state *kvm_state)
1650 struct vmcb __user *user_vmcb = (
struct vmcb __user *)
1651 &user_kvm_nested_state->data.svm[0];
1652 struct vmcb_control_area *ctl;
1653 struct vmcb_save_area *save;
1659 BUILD_BUG_ON(
sizeof(
struct vmcb_control_area) +
sizeof(
struct vmcb_save_area) >
1660 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1662 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1665 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1666 KVM_STATE_NESTED_RUN_PENDING |
1667 KVM_STATE_NESTED_GIF_SET))
1674 if (!(vcpu->arch.efer & EFER_SVME)) {
1676 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1681 if (
is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1684 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1686 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1692 if (kvm_state->size <
sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1696 ctl = kzalloc(
sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1697 save = kzalloc(
sizeof(*save), GFP_KERNEL_ACCOUNT);
1702 if (copy_from_user(ctl, &user_vmcb->control,
sizeof(*ctl)))
1704 if (copy_from_user(save, &user_vmcb->save,
sizeof(*save)))
1717 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1725 if (!(save->cr0 & X86_CR0_PG) ||
1726 !(save->cr0 & X86_CR0_PE) ||
1727 (save->rflags & X86_EFLAGS_VM) ||
1744 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1747 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1766 if (WARN_ON_ONCE(ret))
1771 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1787 if (!
vcpu->arch.pdptrs_from_userspace &&
1798 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1799 vcpu->run->internal.suberror =
1800 KVM_INTERNAL_ERROR_EMULATION;
1801 vcpu->run->internal.ndata = 0;
static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
static bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
static bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
static int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
static bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
static bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
static void kvm_hv_nested_transtion_tlb_flush(struct kvm_vcpu *vcpu, bool tdp_enabled)
int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
static unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
static ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
static ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
static void enter_guest_mode(struct kvm_vcpu *vcpu)
static void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static void leave_guest_mode(struct kvm_vcpu *vcpu)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len)
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
static bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, unsigned long cr4, u64 efer, gpa_t nested_cr3)
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
void kvm_init_mmu(struct kvm_vcpu *vcpu)
static bool is_smm(struct kvm_vcpu *vcpu)
u64 default_tsc_scaling_ratio
unsigned long pending_events
struct vmcb_ctrl_area_cached ctl
struct vmcb_save_area_cached save
struct kvm_vmcb_info vmcb02
bool force_msr_bitmap_recalc
unsigned long soft_int_old_rip
unsigned long soft_int_next_rip
unsigned long soft_int_csbase
struct svm_nested_state nested
struct kvm_vmcb_info vmcb01
u32 intercepts[MAX_INTERCEPT]
void svm_hv_inject_synthetic_vmexit_post_tlb_flush(struct kvm_vcpu *vcpu)
static void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu)
static bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
struct kvm_x86_nested_ops svm_nested_ops
static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
static void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, struct vmcb_ctrl_area_cached *to, struct vmcb_control_area *from)
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector, u32 error_code)
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_npt, bool reload_pdptrs)
static int svm_check_nested_events(struct kvm_vcpu *vcpu)
static bool is_evtinj_soft(u32 evtinj)
static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
int nested_svm_vmrun(struct kvm_vcpu *vcpu)
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst, struct vmcb_ctrl_area_cached *from)
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu, struct x86_exception *fault)
static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, struct vmcb *vmcb12)
static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to, struct vmcb_save_area *from)
void svm_free_nested(struct vcpu_svm *svm)
int nested_svm_vmexit(struct vcpu_svm *svm)
int nested_svm_exit_handled(struct vcpu_svm *svm)
static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
static int svm_set_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, struct kvm_nested_state *kvm_state)
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, struct vmcb *vmcb12, bool from_vmrun)
static int svm_get_nested_state(struct kvm_vcpu *vcpu, struct kvm_nested_state __user *user_kvm_nested_state, u32 user_data_size)
int svm_allocate_nested(struct vcpu_svm *svm)
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, unsigned long vmcb12_rip, unsigned long vmcb12_csbase)
int nested_svm_exit_special(struct vcpu_svm *svm)
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
static int nested_svm_intercept(struct vcpu_svm *svm)
static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
void recalc_intercepts(struct vcpu_svm *svm)
static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, struct vmcb_save_area_cached *save)
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, struct vmcb_control_area *control)
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, struct vmcb_save_area *save)
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
static bool is_evtinj_nmi(u32 evtinj)
static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
static bool nested_exit_on_init(struct vcpu_svm *svm)
void svm_copy_vmrun_state(struct vmcb_save_area *to_save, struct vmcb_save_area *from_save)
static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu, struct vmcb_ctrl_area_cached *control)
static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
void svm_leave_nested(struct kvm_vcpu *vcpu)
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
u32 svm_msrpm_offset(u32 msr)
bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
void svm_set_gif(struct vcpu_svm *svm, bool value)
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
void svm_vcpu_free_msrpm(u32 *msrpm)
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
u32 * svm_vcpu_alloc_msrpm(void)
void svm_update_lbrv(struct kvm_vcpu *vcpu)
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static bool nested_exit_on_intr(struct vcpu_svm *svm)
static bool svm_is_intercept(struct vcpu_svm *svm, int bit)
static void vmcb_mark_all_dirty(struct vmcb *vmcb)
static bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
static bool nested_exit_on_smi(struct vcpu_svm *svm)
static bool is_x2apic_msrpm_offset(u32 offset)
static int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
static __always_inline struct vcpu_svm * to_svm(struct kvm_vcpu *vcpu)
static void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
bool svm_smi_blocked(struct kvm_vcpu *vcpu)
static bool nested_npt_enabled(struct vcpu_svm *svm)
static bool nested_vnmi_enabled(struct vcpu_svm *svm)
static void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
#define NESTED_EXIT_CONTINUE
#define DEBUGCTL_RESERVED_BITS
static bool nested_vgif_enabled(struct vcpu_svm *svm)
static bool gif_set(struct vcpu_svm *svm)
static bool nested_exit_on_nmi(struct vcpu_svm *svm)
static bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
static void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
bool kvm_apicv_activated(struct kvm *kvm)
u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, struct kvm_queued_exception *ex)
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
void kvm_update_dr7(struct kvm_vcpu *vcpu)
static bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
static bool kvm_dr6_valid(u64 data)
static bool kvm_exception_is_soft(unsigned int nr)
static bool mmu_is_nested(struct kvm_vcpu *vcpu)
static bool kvm_dr7_valid(u64 data)
static bool is_paging(struct kvm_vcpu *vcpu)
static void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
static bool is_pae_paging(struct kvm_vcpu *vcpu)
static bool kvm_pause_in_guest(struct kvm *kvm)
static void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)