969 struct kvm_vcpu *vcpu = &svm->
vcpu;
973 struct kvm_host_map map;
979 kvm_inject_gp(vcpu, 0);
990 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
993 svm->
vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
997 vmcb12->save.es = vmcb02->save.es;
998 vmcb12->save.cs = vmcb02->save.cs;
999 vmcb12->save.ss = vmcb02->save.ss;
1000 vmcb12->save.ds = vmcb02->save.ds;
1001 vmcb12->save.gdtr = vmcb02->save.gdtr;
1002 vmcb12->save.idtr = vmcb02->save.idtr;
1003 vmcb12->save.efer = svm->
vcpu.arch.efer;
1006 vmcb12->save.cr2 = vmcb02->save.cr2;
1007 vmcb12->save.cr4 = svm->
vcpu.arch.cr4;
1011 vmcb12->save.rax = kvm_rax_read(vcpu);
1012 vmcb12->save.dr7 = vmcb02->save.dr7;
1013 vmcb12->save.dr6 = svm->
vcpu.arch.dr6;
1014 vmcb12->save.cpl = vmcb02->save.cpl;
1016 vmcb12->control.int_state = vmcb02->control.int_state;
1017 vmcb12->control.exit_code = vmcb02->control.exit_code;
1018 vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
1019 vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
1020 vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
1022 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
1026 vmcb12->control.next_rip = vmcb02->control.next_rip;
1033 vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1062 kvm_make_request(KVM_REQ_EVENT, &svm->
vcpu);
1068 }
else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1074 if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
1075 vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
1077 vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
1079 if (vcpu->arch.nmi_pending) {
1080 vcpu->arch.nmi_pending--;
1081 vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
1083 vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
1092 vmcb01->control.exit_int_info = 0;
1094 svm->
vcpu.arch.tsc_offset = svm->
vcpu.arch.l1_tsc_offset;
1095 if (vmcb01->control.tsc_offset != svm->
vcpu.arch.tsc_offset) {
1096 vmcb01->control.tsc_offset = svm->
vcpu.arch.tsc_offset;
1101 vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
1102 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
1115 kvm_rax_write(vcpu, vmcb01->save.rax);
1119 svm->
vcpu.arch.dr7 = DR7_FIXED_1;
1122 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
1123 vmcb12->control.exit_info_1,
1124 vmcb12->control.exit_info_2,
1125 vmcb12->control.exit_int_info,
1126 vmcb12->control.exit_int_info_err,
1143 svm->
vcpu.arch.nmi_injected =
false;
1153 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
static ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
static ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
static void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static void leave_guest_mode(struct kvm_vcpu *vcpu)
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, struct vmcb *vmcb12)
static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
void svm_update_lbrv(struct kvm_vcpu *vcpu)
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
static bool nested_exit_on_intr(struct vcpu_svm *svm)
static void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
bool kvm_apicv_activated(struct kvm *kvm)
void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
static void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
static bool kvm_pause_in_guest(struct kvm *kvm)
static void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)