KVM
smm.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ASM_KVM_SMM_H
3 #define ASM_KVM_SMM_H
4 
5 #include <linux/build_bug.h>
6 
7 #ifdef CONFIG_KVM_SMM
8 
9 
10 /*
11  * 32 bit KVM's emulated SMM layout. Based on Intel P6 layout
12  * (https://www.sandpile.org/x86/smm.htm).
13  */
14 
15 struct kvm_smm_seg_state_32 {
16  u32 flags;
17  u32 limit;
18  u32 base;
19 } __packed;
20 
21 struct kvm_smram_state_32 {
22  u32 reserved1[62];
23  u32 smbase;
24  u32 smm_revision;
25  u16 io_inst_restart;
26  u16 auto_hlt_restart;
27  u32 io_restart_rdi;
28  u32 io_restart_rcx;
29  u32 io_restart_rsi;
30  u32 io_restart_rip;
31  u32 cr4;
32 
33  /* A20M#, CPL, shutdown and other reserved/undocumented fields */
34  u16 reserved2;
35  u8 int_shadow; /* KVM extension */
36  u8 reserved3[17];
37 
38  struct kvm_smm_seg_state_32 ds;
39  struct kvm_smm_seg_state_32 fs;
40  struct kvm_smm_seg_state_32 gs;
41  struct kvm_smm_seg_state_32 idtr; /* IDTR has only base and limit */
42  struct kvm_smm_seg_state_32 tr;
43  u32 reserved;
44  struct kvm_smm_seg_state_32 gdtr; /* GDTR has only base and limit */
45  struct kvm_smm_seg_state_32 ldtr;
46  struct kvm_smm_seg_state_32 es;
47  struct kvm_smm_seg_state_32 cs;
48  struct kvm_smm_seg_state_32 ss;
49 
50  u32 es_sel;
51  u32 cs_sel;
52  u32 ss_sel;
53  u32 ds_sel;
54  u32 fs_sel;
55  u32 gs_sel;
56  u32 ldtr_sel;
57  u32 tr_sel;
58 
59  u32 dr7;
60  u32 dr6;
61  u32 gprs[8]; /* GPRS in the "natural" X86 order (EAX/ECX/EDX.../EDI) */
62  u32 eip;
63  u32 eflags;
64  u32 cr3;
65  u32 cr0;
66 } __packed;
67 
68 
69 /* 64 bit KVM's emulated SMM layout. Based on AMD64 layout */
70 
71 struct kvm_smm_seg_state_64 {
72  u16 selector;
73  u16 attributes;
74  u32 limit;
75  u64 base;
76 };
77 
78 struct kvm_smram_state_64 {
79 
80  struct kvm_smm_seg_state_64 es;
81  struct kvm_smm_seg_state_64 cs;
82  struct kvm_smm_seg_state_64 ss;
83  struct kvm_smm_seg_state_64 ds;
84  struct kvm_smm_seg_state_64 fs;
85  struct kvm_smm_seg_state_64 gs;
86  struct kvm_smm_seg_state_64 gdtr; /* GDTR has only base and limit*/
87  struct kvm_smm_seg_state_64 ldtr;
88  struct kvm_smm_seg_state_64 idtr; /* IDTR has only base and limit*/
89  struct kvm_smm_seg_state_64 tr;
90 
91  /* I/O restart and auto halt restart are not implemented by KVM */
92  u64 io_restart_rip;
93  u64 io_restart_rcx;
94  u64 io_restart_rsi;
95  u64 io_restart_rdi;
96  u32 io_restart_dword;
97  u32 reserved1;
98  u8 io_inst_restart;
99  u8 auto_hlt_restart;
100  u8 amd_nmi_mask; /* Documented in AMD BKDG as NMI mask, not used by KVM */
101  u8 int_shadow;
102  u32 reserved2;
103 
104  u64 efer;
105 
106  /*
107  * Two fields below are implemented on AMD only, to store
108  * SVM guest vmcb address if the #SMI was received while in the guest mode.
109  */
110  u64 svm_guest_flag;
111  u64 svm_guest_vmcb_gpa;
112  u64 svm_guest_virtual_int; /* unknown purpose, not implemented */
113 
114  u32 reserved3[3];
115  u32 smm_revison;
116  u32 smbase;
117  u32 reserved4[5];
118 
119  /* ssp and svm_* fields below are not implemented by KVM */
120  u64 ssp;
121  u64 svm_guest_pat;
122  u64 svm_host_efer;
123  u64 svm_host_cr4;
124  u64 svm_host_cr3;
125  u64 svm_host_cr0;
126 
127  u64 cr4;
128  u64 cr3;
129  u64 cr0;
130  u64 dr7;
131  u64 dr6;
132  u64 rflags;
133  u64 rip;
134  u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */
135 };
136 
137 union kvm_smram {
138  struct kvm_smram_state_64 smram64;
139  struct kvm_smram_state_32 smram32;
140  u8 bytes[512];
141 };
142 
143 static inline int kvm_inject_smi(struct kvm_vcpu *vcpu)
144 {
145  kvm_make_request(KVM_REQ_SMI, vcpu);
146  return 0;
147 }
148 
149 static inline bool is_smm(struct kvm_vcpu *vcpu)
150 {
151  return vcpu->arch.hflags & HF_SMM_MASK;
152 }
153 
154 void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm);
155 void enter_smm(struct kvm_vcpu *vcpu);
156 int emulator_leave_smm(struct x86_emulate_ctxt *ctxt);
157 void process_smi(struct kvm_vcpu *vcpu);
158 #else
159 static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; }
160 static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; }
161 
162 /*
163  * emulator_leave_smm is used as a function pointer, so the
164  * stub is defined in x86.c.
165  */
166 #endif
167 
168 #endif
int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
Definition: smm.c:571
void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
Definition: smm.c:112
void process_smi(struct kvm_vcpu *vcpu)
Definition: smm.c:135
void enter_smm(struct kvm_vcpu *vcpu)
Definition: smm.c:281
static bool is_smm(struct kvm_vcpu *vcpu)
Definition: smm.h:160
static int kvm_inject_smi(struct kvm_vcpu *vcpu)
Definition: smm.h:159
uint32_t flags
Definition: xen.c:1