3 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 int size,
int alignment, gva_t *gva)
37 if (!IS_ALIGNED(*gva, alignment)) {
44 fault = (s.unusable) ||
45 (s.type != 2 && s.type != 3) ||
47 ((s.base != 0 || s.limit != 0xffffffff) &&
48 (((u64)*gva + size - 1) > s.limit + 1));
51 kvm_inject_gp(vcpu, 0);
52 return fault ? -EINVAL : 0;
58 uint64_t data[2] = { addr, size };
63 static int sgx_read_hva(
struct kvm_vcpu *vcpu,
unsigned long hva,
void *data,
66 if (__copy_from_user(data, (
void __user *)hva, size)) {
84 if (*gpa == INVALID_GPA) {
92 static int sgx_gpa_to_hva(
struct kvm_vcpu *vcpu, gpa_t gpa,
unsigned long *hva)
95 if (kvm_is_error_hva(*hva)) {
100 *hva |= gpa & ~PAGE_MASK;
114 if (trapnr == PF_VECTOR && !boot_cpu_has(X86_FEATURE_SGX2)) {
125 if ((trapnr == PF_VECTOR || !boot_cpu_has(X86_FEATURE_SGX2)) &&
127 memset(&ex, 0,
sizeof(ex));
129 ex.
error_code = PFERR_PRESENT_MASK | PFERR_WRITE_MASK |
136 kvm_inject_gp(vcpu, 0);
142 struct sgx_pageinfo *pageinfo,
143 unsigned long secs_hva,
146 struct sgx_secs *contents = (
struct sgx_secs *)pageinfo->contents;
147 struct kvm_cpuid_entry2 *sgx_12_0, *sgx_12_1;
148 u64 attributes, xfrm, size;
155 if (!sgx_12_0 || !sgx_12_1) {
160 miscselect = contents->miscselect;
161 attributes = contents->attributes;
162 xfrm = contents->xfrm;
163 size = contents->size;
166 if (!vcpu->kvm->arch.sgx_provisioning_allowed &&
167 (attributes & SGX_ATTR_PROVISIONKEY)) {
168 if (sgx_12_1->eax & SGX_ATTR_PROVISIONKEY)
169 pr_warn_once(
"SGX PROVISIONKEY advertised but not allowed\n");
170 kvm_inject_gp(vcpu, 0);
180 if ((u32)miscselect & ~sgx_12_0->ebx ||
181 (u32)attributes & ~sgx_12_1->eax ||
182 (u32)(attributes >> 32) & ~sgx_12_1->ebx ||
183 (u32)xfrm & ~sgx_12_1->ecx ||
184 (u32)(xfrm >> 32) & ~sgx_12_1->edx ||
185 xfrm & ~(vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE) ||
186 (xfrm & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
187 kvm_inject_gp(vcpu, 0);
192 max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 :
194 if (size >= BIT_ULL(max_size_log2)) {
195 kvm_inject_gp(vcpu, 0);
208 ret = sgx_virt_ecreate(pageinfo, (
void __user *)secs_hva, &trapnr);
219 gva_t pageinfo_gva, secs_gva;
220 gva_t metadata_gva, contents_gva;
221 gpa_t metadata_gpa, contents_gpa, secs_gpa;
222 unsigned long metadata_hva, contents_hva, secs_hva;
223 struct sgx_pageinfo pageinfo;
224 struct sgx_secs *contents;
237 sizeof(pageinfo), &ex);
277 contents = (
struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT);
282 if (
sgx_read_hva(vcpu, contents_hva, (
void *)contents, PAGE_SIZE)) {
283 free_page((
unsigned long)contents);
287 pageinfo.metadata = metadata_hva;
288 pageinfo.contents = (u64)contents;
292 free_page((
unsigned long)contents);
299 unsigned long sig_hva, secs_hva, token_hva, rflags;
301 gva_t sig_gva, secs_gva, token_gva;
302 gpa_t sig_gpa, secs_gpa, token_gpa;
330 ret = sgx_virt_einit((
void __user *)sig_hva, (
void __user *)token_hva,
331 (
void __user *)secs_hva,
347 X86_EFLAGS_AF | X86_EFLAGS_SF |
355 kvm_rax_write(
vcpu, ret);
365 if (leaf >= ECREATE && leaf <= ETRACK)
368 if (leaf >= EAUG && leaf <= EMODT)
376 const u64 bits = FEAT_CTL_SGX_ENABLED | FEAT_CTL_LOCKED;
383 u32 leaf = (u32)kvm_rax_read(
vcpu);
390 kvm_inject_gp(
vcpu, 0);
396 WARN_ONCE(1,
"unexpected exit on ENCLS[%u]", leaf);
397 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
398 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_ENCLS;
414 if (!
enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) ||
415 rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) {
416 sgx_pubkey_hash[0] = 0xa6053e051270b7acULL;
417 sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL;
418 sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL;
419 sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL;
422 rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]);
423 rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]);
424 rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]);
433 sizeof(sgx_pubkey_hash));
442 struct kvm_cpuid_entry2 *guest_cpuid;
443 u32 eax, ebx, ecx, edx;
445 if (!vcpu->kvm->arch.sgx_provisioning_allowed)
452 cpuid_count(0x12, 0, &eax, &ebx, &ecx, &edx);
453 if (guest_cpuid->ebx != ebx || guest_cpuid->edx != edx)
460 cpuid_count(0x12, 1, &eax, &ebx, &ecx, &edx);
461 if (guest_cpuid->eax != eax || guest_cpuid->ebx != ebx ||
462 guest_cpuid->ecx != ecx || guest_cpuid->edx != edx)
486 bitmap &= ~GENMASK_ULL(ETRACK, ECREATE);
488 bitmap |= (1 << ECREATE);
492 bitmap &= ~GENMASK_ULL(EMODT, EAUG);
501 if (boot_cpu_has(X86_FEATURE_SGX_LC))
502 bitmap |= (1 << EINIT);
static bool cpu_has_vmx_encls_vmexit(void)
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
#define X86EMUL_PROPAGATE_FAULT
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
static struct vmcs12 * get_vmcs12(struct kvm_vcpu *vcpu)
static bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12)
static int sgx_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long *hva)
void setup_default_sgx_lepubkeyhash(void)
bool __read_mostly enable_sgx
void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu)
static int __handle_encls_ecreate(struct kvm_vcpu *vcpu, struct sgx_pageinfo *pageinfo, unsigned long secs_hva, gva_t secs_gva)
int handle_encls(struct kvm_vcpu *vcpu)
static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, int size, int alignment, gva_t *gva)
module_param_named(sgx, enable_sgx, bool, 0444)
static bool sgx_enabled_in_guest_bios(struct kvm_vcpu *vcpu)
static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write, gpa_t *gpa)
static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu)
static int handle_encls_ecreate(struct kvm_vcpu *vcpu)
static int sgx_read_hva(struct kvm_vcpu *vcpu, unsigned long hva, void *data, unsigned int size)
static u64 sgx_pubkey_hash[4] __ro_after_init
void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr)
static int handle_encls_einit(struct kvm_vcpu *vcpu)
static void sgx_handle_emulation_failure(struct kvm_vcpu *vcpu, u64 addr, unsigned int size)
static bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf)
u64 msr_ia32_sgxlepubkeyhash[4]
u64 msr_ia32_feature_control
gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags)
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
static __always_inline struct vcpu_vmx * to_vmx(struct kvm_vcpu *vcpu)
static __always_inline void vmcs_write64(unsigned long field, u64 value)
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception)
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception)
int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, u8 ndata)
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
static bool is_paging(struct kvm_vcpu *vcpu)
static bool is_64_bit_mode(struct kvm_vcpu *vcpu)
static bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)