2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/kvm_host.h>
12 #define CHECK_SMRAM32_OFFSET(field, offset) \
13 ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00)
15 #define CHECK_SMRAM64_OFFSET(field, offset) \
16 ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00)
105 BUILD_BUG_ON(
sizeof(
union kvm_smram) != 512);
108 #undef CHECK_SMRAM64_OFFSET
109 #undef CHECK_SMRAM32_OFFSET
114 trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
117 vcpu->arch.hflags |= HF_SMM_MASK;
119 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
122 kvm_make_request(KVM_REQ_EVENT, vcpu);
129 vcpu->arch.pdptrs_from_userspace =
false;
137 vcpu->arch.smi_pending =
true;
138 kvm_make_request(KVM_REQ_EVENT, vcpu);
144 flags |= seg->g << 23;
145 flags |= seg->db << 22;
146 flags |= seg->l << 21;
147 flags |= seg->avl << 20;
148 flags |= seg->present << 15;
149 flags |= seg->dpl << 13;
150 flags |= seg->s << 12;
151 flags |= seg->type << 8;
156 struct kvm_smm_seg_state_32 *
state,
157 u32 *selector,
int n)
159 struct kvm_segment seg;
162 *selector = seg.selector;
163 state->base = seg.base;
164 state->limit = seg.limit;
169 static void enter_smm_save_seg_64(
struct kvm_vcpu *vcpu,
170 struct kvm_smm_seg_state_64 *
state,
173 struct kvm_segment seg;
176 state->selector = seg.selector;
178 state->limit = seg.limit;
179 state->base = seg.base;
184 struct kvm_smram_state_32 *smram)
195 for (i = 0; i < 8; i++)
199 smram->dr6 = (u32)val;
201 smram->dr7 = (u32)val;
206 static_call(kvm_x86_get_gdt)(vcpu, &dt);
207 smram->gdtr.base = dt.address;
208 smram->gdtr.limit = dt.size;
210 static_call(kvm_x86_get_idt)(vcpu, &dt);
211 smram->idtr.base = dt.address;
212 smram->idtr.limit = dt.size;
223 smram->smm_revision = 0x00020000;
224 smram->smbase = vcpu->arch.smbase;
226 smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
230 static void enter_smm_save_state_64(
struct kvm_vcpu *vcpu,
231 struct kvm_smram_state_64 *smram)
237 for (i = 0; i < 16; i++)
253 smram->smbase = vcpu->arch.smbase;
254 smram->smm_revison = 0x00020064;
256 smram->efer = vcpu->arch.efer;
258 enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR);
260 static_call(kvm_x86_get_idt)(vcpu, &dt);
261 smram->idtr.limit = dt.size;
262 smram->idtr.base = dt.address;
264 enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR);
266 static_call(kvm_x86_get_gdt)(vcpu, &dt);
267 smram->gdtr.limit = dt.size;
268 smram->gdtr.base = dt.address;
270 enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES);
271 enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS);
272 enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS);
273 enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
274 enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
275 enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
277 smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
283 struct kvm_segment cs, ds;
286 union kvm_smram smram;
290 memset(smram.bytes, 0,
sizeof(smram.bytes));
294 enter_smm_save_state_64(vcpu, &smram.smram64);
307 if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
315 if (static_call(kvm_x86_get_nmi_mask)(vcpu))
316 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
318 static_call(kvm_x86_set_nmi_mask)(vcpu,
true);
323 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
325 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
326 static_call(kvm_x86_set_cr0)(vcpu, cr0);
328 static_call(kvm_x86_set_cr4)(vcpu, 0);
331 dt.address = dt.size = 0;
332 static_call(kvm_x86_set_idt)(vcpu, &dt);
334 if (WARN_ON_ONCE(
kvm_set_dr(vcpu, 7, DR7_FIXED_1)))
337 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
338 cs.base = vcpu->arch.smbase;
343 cs.limit = ds.limit = 0xffffffff;
344 cs.type = ds.type = 0x3;
351 cs.present = ds.present = 1;
352 cs.unusable = ds.unusable = 0;
353 cs.padding = ds.padding = 0;
364 if (static_call(kvm_x86_set_efer)(vcpu, 0))
372 kvm_vm_dead(vcpu->kvm);
377 desc->g = (
flags >> 23) & 1;
378 desc->db = (
flags >> 22) & 1;
379 desc->l = (
flags >> 21) & 1;
380 desc->avl = (
flags >> 20) & 1;
381 desc->present = (
flags >> 15) & 1;
382 desc->dpl = (
flags >> 13) & 3;
383 desc->s = (
flags >> 12) & 1;
384 desc->type = (
flags >> 8) & 15;
386 desc->unusable = !desc->present;
391 const struct kvm_smm_seg_state_32 *
state,
394 struct kvm_segment desc;
396 desc.selector = selector;
397 desc.base =
state->base;
398 desc.limit =
state->limit;
406 static int rsm_load_seg_64(
struct kvm_vcpu *vcpu,
407 const struct kvm_smm_seg_state_64 *
state,
410 struct kvm_segment desc;
412 desc.selector =
state->selector;
414 desc.limit =
state->limit;
415 desc.base =
state->base;
422 u64 cr0, u64 cr3, u64 cr4)
429 if (cr4 & X86_CR4_PCIDE) {
451 if (cr4 & X86_CR4_PCIDE) {
467 const struct kvm_smram_state_32 *smstate)
469 struct kvm_vcpu *vcpu = ctxt->
vcpu;
473 ctxt->
eflags = smstate->eflags | X86_EFLAGS_FIXED;
474 ctxt->
_eip = smstate->eip;
476 for (i = 0; i < 8; i++)
485 rsm_load_seg_32(vcpu, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR);
487 dt.address = smstate->gdtr.base;
488 dt.size = smstate->gdtr.limit;
489 static_call(kvm_x86_set_gdt)(vcpu, &dt);
491 dt.address = smstate->idtr.base;
492 dt.size = smstate->idtr.limit;
493 static_call(kvm_x86_set_idt)(vcpu, &dt);
503 vcpu->arch.smbase = smstate->smbase;
506 smstate->cr3, smstate->cr4);
511 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
519 const struct kvm_smram_state_64 *smstate)
521 struct kvm_vcpu *vcpu = ctxt->
vcpu;
525 for (i = 0; i < 16; i++)
526 *
reg_write(ctxt, i) = smstate->gprs[15 - i];
528 ctxt->
_eip = smstate->rip;
529 ctxt->
eflags = smstate->rflags | X86_EFLAGS_FIXED;
536 vcpu->arch.smbase = smstate->smbase;
538 if (
kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
541 rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR);
543 dt.size = smstate->idtr.limit;
544 dt.address = smstate->idtr.base;
545 static_call(kvm_x86_set_idt)(vcpu, &dt);
547 rsm_load_seg_64(vcpu, &smstate->ldtr, VCPU_SREG_LDTR);
549 dt.size = smstate->gdtr.limit;
550 dt.address = smstate->gdtr.base;
551 static_call(kvm_x86_set_gdt)(vcpu, &dt);
557 rsm_load_seg_64(vcpu, &smstate->es, VCPU_SREG_ES);
558 rsm_load_seg_64(vcpu, &smstate->cs, VCPU_SREG_CS);
559 rsm_load_seg_64(vcpu, &smstate->ss, VCPU_SREG_SS);
560 rsm_load_seg_64(vcpu, &smstate->ds, VCPU_SREG_DS);
561 rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS);
562 rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS);
564 static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
573 struct kvm_vcpu *vcpu = ctxt->
vcpu;
575 union kvm_smram smram;
579 smbase = vcpu->arch.smbase;
585 if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
586 static_call(kvm_x86_set_nmi_mask)(vcpu,
false);
597 struct kvm_segment cs_desc;
602 if (cr4 & X86_CR4_PCIDE)
606 memset(&cs_desc, 0,
sizeof(cs_desc));
608 cs_desc.s = cs_desc.g = cs_desc.present = 1;
615 if (cr0 & X86_CR0_PE)
616 kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
620 unsigned long cr4, efer;
624 if (cr4 & X86_CR4_PAE)
638 if (static_call(kvm_x86_leave_smm)(vcpu, &smram))
643 return rsm_load_state_64(ctxt, &smram.smram64);
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
static ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
static ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
static ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
#define X86EMUL_UNHANDLEABLE
static ulong * reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len)
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu, u64 cr0, u64 cr3, u64 cr4)
static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, struct kvm_smm_seg_state_32 *state, u32 *selector, int n)
static void rsm_set_desc_flags(struct kvm_segment *desc, u32 flags)
static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_state_32 *smram)
#define CHECK_SMRAM32_OFFSET(field, offset)
void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
void process_smi(struct kvm_vcpu *vcpu)
static void check_smram_offsets(void)
#define CHECK_SMRAM64_OFFSET(field, offset)
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, const struct kvm_smram_state_32 *smstate)
void enter_smm(struct kvm_vcpu *vcpu)
static int rsm_load_seg_32(struct kvm_vcpu *vcpu, const struct kvm_smm_seg_state_32 *state, u16 selector, int n)
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)