KVM
Macros | Functions
smm.c File Reference
#include <linux/kvm_host.h>
#include "x86.h"
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include "smm.h"
#include "cpuid.h"
#include "trace.h"
Include dependency graph for smm.c:

Go to the source code of this file.

Macros

#define pr_fmt(fmt)   KBUILD_MODNAME ": " fmt
 
#define CHECK_SMRAM32_OFFSET(field, offset)    ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00)
 
#define CHECK_SMRAM64_OFFSET(field, offset)    ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00)
 

Functions

static void check_smram_offsets (void)
 
void kvm_smm_changed (struct kvm_vcpu *vcpu, bool entering_smm)
 
void process_smi (struct kvm_vcpu *vcpu)
 
static u32 enter_smm_get_segment_flags (struct kvm_segment *seg)
 
static void enter_smm_save_seg_32 (struct kvm_vcpu *vcpu, struct kvm_smm_seg_state_32 *state, u32 *selector, int n)
 
static void enter_smm_save_state_32 (struct kvm_vcpu *vcpu, struct kvm_smram_state_32 *smram)
 
void enter_smm (struct kvm_vcpu *vcpu)
 
static void rsm_set_desc_flags (struct kvm_segment *desc, u32 flags)
 
static int rsm_load_seg_32 (struct kvm_vcpu *vcpu, const struct kvm_smm_seg_state_32 *state, u16 selector, int n)
 
static int rsm_enter_protected_mode (struct kvm_vcpu *vcpu, u64 cr0, u64 cr3, u64 cr4)
 
static int rsm_load_state_32 (struct x86_emulate_ctxt *ctxt, const struct kvm_smram_state_32 *smstate)
 
int emulator_leave_smm (struct x86_emulate_ctxt *ctxt)
 

Macro Definition Documentation

◆ CHECK_SMRAM32_OFFSET

#define CHECK_SMRAM32_OFFSET (   field,
  offset 
)     ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00)

Definition at line 12 of file smm.c.

◆ CHECK_SMRAM64_OFFSET

#define CHECK_SMRAM64_OFFSET (   field,
  offset 
)     ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00)

Definition at line 15 of file smm.c.

◆ pr_fmt

#define pr_fmt (   fmt)    KBUILD_MODNAME ": " fmt

Definition at line 2 of file smm.c.

Function Documentation

◆ check_smram_offsets()

static void check_smram_offsets ( void  )
static

Definition at line 18 of file smm.c.

19 {
20  /* 32 bit SMRAM image */
21  CHECK_SMRAM32_OFFSET(reserved1, 0xFE00);
22  CHECK_SMRAM32_OFFSET(smbase, 0xFEF8);
23  CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC);
24  CHECK_SMRAM32_OFFSET(io_inst_restart, 0xFF00);
25  CHECK_SMRAM32_OFFSET(auto_hlt_restart, 0xFF02);
26  CHECK_SMRAM32_OFFSET(io_restart_rdi, 0xFF04);
27  CHECK_SMRAM32_OFFSET(io_restart_rcx, 0xFF08);
28  CHECK_SMRAM32_OFFSET(io_restart_rsi, 0xFF0C);
29  CHECK_SMRAM32_OFFSET(io_restart_rip, 0xFF10);
30  CHECK_SMRAM32_OFFSET(cr4, 0xFF14);
31  CHECK_SMRAM32_OFFSET(reserved2, 0xFF18);
32  CHECK_SMRAM32_OFFSET(int_shadow, 0xFF1A);
33  CHECK_SMRAM32_OFFSET(reserved3, 0xFF1B);
34  CHECK_SMRAM32_OFFSET(ds, 0xFF2C);
35  CHECK_SMRAM32_OFFSET(fs, 0xFF38);
36  CHECK_SMRAM32_OFFSET(gs, 0xFF44);
37  CHECK_SMRAM32_OFFSET(idtr, 0xFF50);
38  CHECK_SMRAM32_OFFSET(tr, 0xFF5C);
39  CHECK_SMRAM32_OFFSET(gdtr, 0xFF6C);
40  CHECK_SMRAM32_OFFSET(ldtr, 0xFF78);
41  CHECK_SMRAM32_OFFSET(es, 0xFF84);
42  CHECK_SMRAM32_OFFSET(cs, 0xFF90);
43  CHECK_SMRAM32_OFFSET(ss, 0xFF9C);
44  CHECK_SMRAM32_OFFSET(es_sel, 0xFFA8);
45  CHECK_SMRAM32_OFFSET(cs_sel, 0xFFAC);
46  CHECK_SMRAM32_OFFSET(ss_sel, 0xFFB0);
47  CHECK_SMRAM32_OFFSET(ds_sel, 0xFFB4);
48  CHECK_SMRAM32_OFFSET(fs_sel, 0xFFB8);
49  CHECK_SMRAM32_OFFSET(gs_sel, 0xFFBC);
50  CHECK_SMRAM32_OFFSET(ldtr_sel, 0xFFC0);
51  CHECK_SMRAM32_OFFSET(tr_sel, 0xFFC4);
52  CHECK_SMRAM32_OFFSET(dr7, 0xFFC8);
53  CHECK_SMRAM32_OFFSET(dr6, 0xFFCC);
54  CHECK_SMRAM32_OFFSET(gprs, 0xFFD0);
55  CHECK_SMRAM32_OFFSET(eip, 0xFFF0);
56  CHECK_SMRAM32_OFFSET(eflags, 0xFFF4);
57  CHECK_SMRAM32_OFFSET(cr3, 0xFFF8);
58  CHECK_SMRAM32_OFFSET(cr0, 0xFFFC);
59 
60  /* 64 bit SMRAM image */
61  CHECK_SMRAM64_OFFSET(es, 0xFE00);
62  CHECK_SMRAM64_OFFSET(cs, 0xFE10);
63  CHECK_SMRAM64_OFFSET(ss, 0xFE20);
64  CHECK_SMRAM64_OFFSET(ds, 0xFE30);
65  CHECK_SMRAM64_OFFSET(fs, 0xFE40);
66  CHECK_SMRAM64_OFFSET(gs, 0xFE50);
67  CHECK_SMRAM64_OFFSET(gdtr, 0xFE60);
68  CHECK_SMRAM64_OFFSET(ldtr, 0xFE70);
69  CHECK_SMRAM64_OFFSET(idtr, 0xFE80);
70  CHECK_SMRAM64_OFFSET(tr, 0xFE90);
71  CHECK_SMRAM64_OFFSET(io_restart_rip, 0xFEA0);
72  CHECK_SMRAM64_OFFSET(io_restart_rcx, 0xFEA8);
73  CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0);
74  CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8);
75  CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0);
76  CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4);
77  CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8);
78  CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9);
79  CHECK_SMRAM64_OFFSET(amd_nmi_mask, 0xFECA);
80  CHECK_SMRAM64_OFFSET(int_shadow, 0xFECB);
81  CHECK_SMRAM64_OFFSET(reserved2, 0xFECC);
82  CHECK_SMRAM64_OFFSET(efer, 0xFED0);
83  CHECK_SMRAM64_OFFSET(svm_guest_flag, 0xFED8);
84  CHECK_SMRAM64_OFFSET(svm_guest_vmcb_gpa, 0xFEE0);
85  CHECK_SMRAM64_OFFSET(svm_guest_virtual_int, 0xFEE8);
86  CHECK_SMRAM64_OFFSET(reserved3, 0xFEF0);
87  CHECK_SMRAM64_OFFSET(smm_revison, 0xFEFC);
88  CHECK_SMRAM64_OFFSET(smbase, 0xFF00);
89  CHECK_SMRAM64_OFFSET(reserved4, 0xFF04);
90  CHECK_SMRAM64_OFFSET(ssp, 0xFF18);
91  CHECK_SMRAM64_OFFSET(svm_guest_pat, 0xFF20);
92  CHECK_SMRAM64_OFFSET(svm_host_efer, 0xFF28);
93  CHECK_SMRAM64_OFFSET(svm_host_cr4, 0xFF30);
94  CHECK_SMRAM64_OFFSET(svm_host_cr3, 0xFF38);
95  CHECK_SMRAM64_OFFSET(svm_host_cr0, 0xFF40);
96  CHECK_SMRAM64_OFFSET(cr4, 0xFF48);
97  CHECK_SMRAM64_OFFSET(cr3, 0xFF50);
98  CHECK_SMRAM64_OFFSET(cr0, 0xFF58);
99  CHECK_SMRAM64_OFFSET(dr7, 0xFF60);
100  CHECK_SMRAM64_OFFSET(dr6, 0xFF68);
101  CHECK_SMRAM64_OFFSET(rflags, 0xFF70);
102  CHECK_SMRAM64_OFFSET(rip, 0xFF78);
103  CHECK_SMRAM64_OFFSET(gprs, 0xFF80);
104 
105  BUILD_BUG_ON(sizeof(union kvm_smram) != 512);
106 }
#define CHECK_SMRAM32_OFFSET(field, offset)
Definition: smm.c:12
#define CHECK_SMRAM64_OFFSET(field, offset)
Definition: smm.c:15
Here is the caller graph for this function:

◆ emulator_leave_smm()

int emulator_leave_smm ( struct x86_emulate_ctxt ctxt)

Definition at line 571 of file smm.c.

572 {
573  struct kvm_vcpu *vcpu = ctxt->vcpu;
574  unsigned long cr0;
575  union kvm_smram smram;
576  u64 smbase;
577  int ret;
578 
579  smbase = vcpu->arch.smbase;
580 
581  ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram));
582  if (ret < 0)
583  return X86EMUL_UNHANDLEABLE;
584 
585  if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
586  static_call(kvm_x86_set_nmi_mask)(vcpu, false);
587 
588  kvm_smm_changed(vcpu, false);
589 
590  /*
591  * Get back to real mode, to prepare a safe state in which to load
592  * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
593  * supports long mode.
594  */
595 #ifdef CONFIG_X86_64
596  if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
597  struct kvm_segment cs_desc;
598  unsigned long cr4;
599 
600  /* Zero CR4.PCIDE before CR0.PG. */
601  cr4 = kvm_read_cr4(vcpu);
602  if (cr4 & X86_CR4_PCIDE)
603  kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE);
604 
605  /* A 32-bit code segment is required to clear EFER.LMA. */
606  memset(&cs_desc, 0, sizeof(cs_desc));
607  cs_desc.type = 0xb;
608  cs_desc.s = cs_desc.g = cs_desc.present = 1;
609  kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS);
610  }
611 #endif
612 
613  /* For the 64-bit case, this will clear EFER.LMA. */
614  cr0 = kvm_read_cr0(vcpu);
615  if (cr0 & X86_CR0_PE)
616  kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
617 
618 #ifdef CONFIG_X86_64
619  if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
620  unsigned long cr4, efer;
621 
622  /* Clear CR4.PAE before clearing EFER.LME. */
623  cr4 = kvm_read_cr4(vcpu);
624  if (cr4 & X86_CR4_PAE)
625  kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE);
626 
627  /* And finally go back to 32-bit mode. */
628  efer = 0;
629  kvm_set_msr(vcpu, MSR_EFER, efer);
630  }
631 #endif
632 
633  /*
634  * Give leave_smm() a chance to make ISA-specific changes to the vCPU
635  * state (e.g. enter guest mode) before loading state from the SMM
636  * state-save area.
637  */
638  if (static_call(kvm_x86_leave_smm)(vcpu, &smram))
639  return X86EMUL_UNHANDLEABLE;
640 
641 #ifdef CONFIG_X86_64
642  if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
643  return rsm_load_state_64(ctxt, &smram.smram64);
644  else
645 #endif
646  return rsm_load_state_32(ctxt, &smram.smram32);
647 }
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:83
static ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
static ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
#define X86EMUL_UNHANDLEABLE
Definition: kvm_emulate.h:83
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
Definition: kvm_main.c:3366
void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
Definition: smm.c:112
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, const struct kvm_smram_state_32 *smstate)
Definition: smm.c:466
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Definition: x86.c:1192
void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
Definition: x86.c:7456
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Definition: x86.c:971
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
Definition: x86.c:1987
Here is the call graph for this function:

◆ enter_smm()

void enter_smm ( struct kvm_vcpu *  vcpu)

Definition at line 281 of file smm.c.

282 {
283  struct kvm_segment cs, ds;
284  struct desc_ptr dt;
285  unsigned long cr0;
286  union kvm_smram smram;
287 
289 
290  memset(smram.bytes, 0, sizeof(smram.bytes));
291 
292 #ifdef CONFIG_X86_64
293  if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
294  enter_smm_save_state_64(vcpu, &smram.smram64);
295  else
296 #endif
297  enter_smm_save_state_32(vcpu, &smram.smram32);
298 
299  /*
300  * Give enter_smm() a chance to make ISA-specific changes to the vCPU
301  * state (e.g. leave guest mode) after we've saved the state into the
302  * SMM state-save area.
303  *
304  * Kill the VM in the unlikely case of failure, because the VM
305  * can be in undefined state in this case.
306  */
307  if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
308  goto error;
309 
310  kvm_smm_changed(vcpu, true);
311 
312  if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)))
313  goto error;
314 
315  if (static_call(kvm_x86_get_nmi_mask)(vcpu))
316  vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
317  else
318  static_call(kvm_x86_set_nmi_mask)(vcpu, true);
319 
320  kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
321  kvm_rip_write(vcpu, 0x8000);
322 
323  static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
324 
325  cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
326  static_call(kvm_x86_set_cr0)(vcpu, cr0);
327 
328  static_call(kvm_x86_set_cr4)(vcpu, 0);
329 
330  /* Undocumented: IDT limit is set to zero on entry to SMM. */
331  dt.address = dt.size = 0;
332  static_call(kvm_x86_set_idt)(vcpu, &dt);
333 
334  if (WARN_ON_ONCE(kvm_set_dr(vcpu, 7, DR7_FIXED_1)))
335  goto error;
336 
337  cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
338  cs.base = vcpu->arch.smbase;
339 
340  ds.selector = 0;
341  ds.base = 0;
342 
343  cs.limit = ds.limit = 0xffffffff;
344  cs.type = ds.type = 0x3;
345  cs.dpl = ds.dpl = 0;
346  cs.db = ds.db = 0;
347  cs.s = ds.s = 1;
348  cs.l = ds.l = 0;
349  cs.g = ds.g = 1;
350  cs.avl = ds.avl = 0;
351  cs.present = ds.present = 1;
352  cs.unusable = ds.unusable = 0;
353  cs.padding = ds.padding = 0;
354 
355  kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
356  kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
357  kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
358  kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
359  kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
360  kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
361 
362 #ifdef CONFIG_X86_64
363  if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
364  if (static_call(kvm_x86_set_efer)(vcpu, 0))
365  goto error;
366 #endif
367 
369  kvm_mmu_reset_context(vcpu);
370  return;
371 error:
372  kvm_vm_dead(vcpu->kvm);
373 }
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
Definition: cpuid.c:309
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len)
Definition: kvm_main.c:3470
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
Definition: mmu.c:5581
static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_state_32 *smram)
Definition: smm.c:183
static void check_smram_offsets(void)
Definition: smm.c:18
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
Definition: x86.c:1373
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
Definition: x86.c:13189
Here is the call graph for this function:
Here is the caller graph for this function:

◆ enter_smm_get_segment_flags()

static u32 enter_smm_get_segment_flags ( struct kvm_segment *  seg)
static

Definition at line 141 of file smm.c.

142 {
143  u32 flags = 0;
144  flags |= seg->g << 23;
145  flags |= seg->db << 22;
146  flags |= seg->l << 21;
147  flags |= seg->avl << 20;
148  flags |= seg->present << 15;
149  flags |= seg->dpl << 13;
150  flags |= seg->s << 12;
151  flags |= seg->type << 8;
152  return flags;
153 }
uint32_t flags
Definition: xen.c:1
Here is the caller graph for this function:

◆ enter_smm_save_seg_32()

static void enter_smm_save_seg_32 ( struct kvm_vcpu *  vcpu,
struct kvm_smm_seg_state_32 *  state,
u32 *  selector,
int  n 
)
static

Definition at line 155 of file smm.c.

158 {
159  struct kvm_segment seg;
160 
161  kvm_get_segment(vcpu, &seg, n);
162  *selector = seg.selector;
163  state->base = seg.base;
164  state->limit = seg.limit;
165  state->flags = enter_smm_get_segment_flags(&seg);
166 }
static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
Definition: smm.c:141
void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
Definition: x86.c:7462
int state
Definition: xen.h:0
Here is the call graph for this function:
Here is the caller graph for this function:

◆ enter_smm_save_state_32()

static void enter_smm_save_state_32 ( struct kvm_vcpu *  vcpu,
struct kvm_smram_state_32 *  smram 
)
static

Definition at line 183 of file smm.c.

185 {
186  struct desc_ptr dt;
187  unsigned long val;
188  int i;
189 
190  smram->cr0 = kvm_read_cr0(vcpu);
191  smram->cr3 = kvm_read_cr3(vcpu);
192  smram->eflags = kvm_get_rflags(vcpu);
193  smram->eip = kvm_rip_read(vcpu);
194 
195  for (i = 0; i < 8; i++)
196  smram->gprs[i] = kvm_register_read_raw(vcpu, i);
197 
198  kvm_get_dr(vcpu, 6, &val);
199  smram->dr6 = (u32)val;
200  kvm_get_dr(vcpu, 7, &val);
201  smram->dr7 = (u32)val;
202 
203  enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR);
204  enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR);
205 
206  static_call(kvm_x86_get_gdt)(vcpu, &dt);
207  smram->gdtr.base = dt.address;
208  smram->gdtr.limit = dt.size;
209 
210  static_call(kvm_x86_get_idt)(vcpu, &dt);
211  smram->idtr.base = dt.address;
212  smram->idtr.limit = dt.size;
213 
214  enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES);
215  enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS);
216  enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS);
217 
218  enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS);
219  enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS);
220  enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS);
221 
222  smram->cr4 = kvm_read_cr4(vcpu);
223  smram->smm_revision = 0x00020000;
224  smram->smbase = vcpu->arch.smbase;
225 
226  smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
227 }
static unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
static ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, struct kvm_smm_seg_state_32 *state, u32 *selector, int n)
Definition: smm.c:155
void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
Definition: x86.c:1402
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
Definition: x86.c:13170
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_smm_changed()

void kvm_smm_changed ( struct kvm_vcpu *  vcpu,
bool  entering_smm 
)

Definition at line 112 of file smm.c.

113 {
114  trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
115 
116  if (entering_smm) {
117  vcpu->arch.hflags |= HF_SMM_MASK;
118  } else {
119  vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
120 
121  /* Process a latched INIT or SMI, if any. */
122  kvm_make_request(KVM_REQ_EVENT, vcpu);
123 
124  /*
125  * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
126  * on SMM exit we still need to reload them from
127  * guest memory
128  */
129  vcpu->arch.pdptrs_from_userspace = false;
130  }
131 
132  kvm_mmu_reset_context(vcpu);
133 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ process_smi()

void process_smi ( struct kvm_vcpu *  vcpu)

Definition at line 135 of file smm.c.

136 {
137  vcpu->arch.smi_pending = true;
138  kvm_make_request(KVM_REQ_EVENT, vcpu);
139 }
Here is the caller graph for this function:

◆ rsm_enter_protected_mode()

static int rsm_enter_protected_mode ( struct kvm_vcpu *  vcpu,
u64  cr0,
u64  cr3,
u64  cr4 
)
static

Definition at line 421 of file smm.c.

423 {
424  int bad;
425  u64 pcid;
426 
427  /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
428  pcid = 0;
429  if (cr4 & X86_CR4_PCIDE) {
430  pcid = cr3 & 0xfff;
431  cr3 &= ~0xfff;
432  }
433 
434  bad = kvm_set_cr3(vcpu, cr3);
435  if (bad)
436  return X86EMUL_UNHANDLEABLE;
437 
438  /*
439  * First enable PAE, long mode needs it before CR0.PG = 1 is set.
440  * Then enable protected mode. However, PCID cannot be enabled
441  * if EFER.LMA=0, so set it separately.
442  */
443  bad = kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE);
444  if (bad)
445  return X86EMUL_UNHANDLEABLE;
446 
447  bad = kvm_set_cr0(vcpu, cr0);
448  if (bad)
449  return X86EMUL_UNHANDLEABLE;
450 
451  if (cr4 & X86_CR4_PCIDE) {
452  bad = kvm_set_cr4(vcpu, cr4);
453  if (bad)
454  return X86EMUL_UNHANDLEABLE;
455  if (pcid) {
456  bad = kvm_set_cr3(vcpu, cr3 | pcid);
457  if (bad)
458  return X86EMUL_UNHANDLEABLE;
459  }
460 
461  }
462 
463  return X86EMUL_CONTINUE;
464 }
#define X86EMUL_CONTINUE
Definition: kvm_emulate.h:81
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
Definition: x86.c:1266
Here is the call graph for this function:
Here is the caller graph for this function:

◆ rsm_load_seg_32()

static int rsm_load_seg_32 ( struct kvm_vcpu *  vcpu,
const struct kvm_smm_seg_state_32 *  state,
u16  selector,
int  n 
)
static

Definition at line 390 of file smm.c.

393 {
394  struct kvm_segment desc;
395 
396  desc.selector = selector;
397  desc.base = state->base;
398  desc.limit = state->limit;
399  rsm_set_desc_flags(&desc, state->flags);
400  kvm_set_segment(vcpu, &desc, n);
401  return X86EMUL_CONTINUE;
402 }
static void rsm_set_desc_flags(struct kvm_segment *desc, u32 flags)
Definition: smm.c:375
Here is the call graph for this function:
Here is the caller graph for this function:

◆ rsm_load_state_32()

static int rsm_load_state_32 ( struct x86_emulate_ctxt ctxt,
const struct kvm_smram_state_32 *  smstate 
)
static

Definition at line 466 of file smm.c.

468 {
469  struct kvm_vcpu *vcpu = ctxt->vcpu;
470  struct desc_ptr dt;
471  int i, r;
472 
473  ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
474  ctxt->_eip = smstate->eip;
475 
476  for (i = 0; i < 8; i++)
477  *reg_write(ctxt, i) = smstate->gprs[i];
478 
479  if (kvm_set_dr(vcpu, 6, smstate->dr6))
480  return X86EMUL_UNHANDLEABLE;
481  if (kvm_set_dr(vcpu, 7, smstate->dr7))
482  return X86EMUL_UNHANDLEABLE;
483 
484  rsm_load_seg_32(vcpu, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR);
485  rsm_load_seg_32(vcpu, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR);
486 
487  dt.address = smstate->gdtr.base;
488  dt.size = smstate->gdtr.limit;
489  static_call(kvm_x86_set_gdt)(vcpu, &dt);
490 
491  dt.address = smstate->idtr.base;
492  dt.size = smstate->idtr.limit;
493  static_call(kvm_x86_set_idt)(vcpu, &dt);
494 
495  rsm_load_seg_32(vcpu, &smstate->es, smstate->es_sel, VCPU_SREG_ES);
496  rsm_load_seg_32(vcpu, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS);
497  rsm_load_seg_32(vcpu, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS);
498 
499  rsm_load_seg_32(vcpu, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS);
500  rsm_load_seg_32(vcpu, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS);
501  rsm_load_seg_32(vcpu, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS);
502 
503  vcpu->arch.smbase = smstate->smbase;
504 
505  r = rsm_enter_protected_mode(vcpu, smstate->cr0,
506  smstate->cr3, smstate->cr4);
507 
508  if (r != X86EMUL_CONTINUE)
509  return r;
510 
511  static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
512  ctxt->interruptibility = (u8)smstate->int_shadow;
513 
514  return r;
515 }
static ulong * reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
Definition: kvm_emulate.h:531
static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu, u64 cr0, u64 cr3, u64 cr4)
Definition: smm.c:421
static int rsm_load_seg_32(struct kvm_vcpu *vcpu, const struct kvm_smm_seg_state_32 *state, u16 selector, int n)
Definition: smm.c:390
unsigned long eflags
Definition: kvm_emulate.h:312
unsigned long _eip
Definition: kvm_emulate.h:362
Here is the call graph for this function:
Here is the caller graph for this function:

◆ rsm_set_desc_flags()

static void rsm_set_desc_flags ( struct kvm_segment *  desc,
u32  flags 
)
static

Definition at line 375 of file smm.c.

376 {
377  desc->g = (flags >> 23) & 1;
378  desc->db = (flags >> 22) & 1;
379  desc->l = (flags >> 21) & 1;
380  desc->avl = (flags >> 20) & 1;
381  desc->present = (flags >> 15) & 1;
382  desc->dpl = (flags >> 13) & 3;
383  desc->s = (flags >> 12) & 1;
384  desc->type = (flags >> 8) & 15;
385 
386  desc->unusable = !desc->present;
387  desc->padding = 0;
388 }
Here is the caller graph for this function: