283 struct kvm_segment cs, ds;
286 union kvm_smram smram;
290 memset(smram.bytes, 0,
sizeof(smram.bytes));
294 enter_smm_save_state_64(vcpu, &smram.smram64);
307 if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
315 if (static_call(kvm_x86_get_nmi_mask)(vcpu))
316 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
318 static_call(kvm_x86_set_nmi_mask)(vcpu,
true);
323 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
325 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
326 static_call(kvm_x86_set_cr0)(vcpu, cr0);
328 static_call(kvm_x86_set_cr4)(vcpu, 0);
331 dt.address = dt.size = 0;
332 static_call(kvm_x86_set_idt)(vcpu, &dt);
334 if (WARN_ON_ONCE(
kvm_set_dr(vcpu, 7, DR7_FIXED_1)))
337 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
338 cs.base = vcpu->arch.smbase;
343 cs.limit = ds.limit = 0xffffffff;
344 cs.type = ds.type = 0x3;
351 cs.present = ds.present = 1;
352 cs.unusable = ds.unusable = 0;
353 cs.padding = ds.padding = 0;
364 if (static_call(kvm_x86_set_efer)(vcpu, 0))
372 kvm_vm_dead(vcpu->kvm);
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len)
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_state_32 *smram)
static void check_smram_offsets(void)
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)