9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/kvm_types.h>
12 #include <linux/kvm_host.h>
13 #include <linux/kernel.h>
14 #include <linux/highmem.h>
15 #include <linux/psp.h>
16 #include <linux/psp-sev.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/misc_cgroup.h>
20 #include <linux/processor.h>
21 #include <linux/trace_events.h>
24 #include <asm/trapnr.h>
25 #include <asm/fpu/xcr.h>
26 #include <asm/debugreg.h>
35 #ifndef CONFIG_KVM_AMD_SEV
46 #define MISC_CG_RES_SEV MISC_CG_RES_TYPES
47 #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
50 #ifdef CONFIG_KVM_AMD_SEV
63 #define sev_enabled false
64 #define sev_es_enabled false
65 #define sev_es_debug_swap_enabled false
79 struct list_head
list;
101 down_write(&sev_deactivate_lock);
103 wbinvd_on_all_cpus();
104 ret = sev_guest_df_flush(&error);
106 up_write(&sev_deactivate_lock);
109 pr_err(
"SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
136 return misc_cg_try_charge(type, sev->
misc_cg, 1);
142 misc_cg_uncharge(type, sev->
misc_cg, 1);
159 if (min_asid > max_asid)
163 sev->
misc_cg = get_current_misc_cg();
171 mutex_lock(&sev_bitmap_lock);
175 if (asid > max_asid) {
180 mutex_unlock(&sev_bitmap_lock);
187 mutex_unlock(&sev_bitmap_lock);
209 mutex_lock(&sev_bitmap_lock);
213 for_each_possible_cpu(cpu) {
214 sd = per_cpu_ptr(&svm_data, cpu);
218 mutex_unlock(&sev_bitmap_lock);
227 struct sev_data_decommission decommission;
232 decommission.handle = handle;
233 sev_guest_decommission(&decommission, NULL);
238 struct sev_data_deactivate deactivate;
243 deactivate.handle = handle;
246 down_read(&sev_deactivate_lock);
247 sev_guest_deactivate(&deactivate, NULL);
248 up_read(&sev_deactivate_lock);
258 if (kvm->created_vcpus)
262 if (unlikely(sev->
active))
266 sev->
es_active = argp->id == KVM_SEV_ES_INIT;
272 ret = sev_platform_init(&argp->error);
279 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
295 struct sev_data_activate activate;
299 activate.handle = handle;
300 activate.asid = asid;
301 ret = sev_guest_activate(&activate, error);
315 ret = sev_issue_cmd_external_user(f.file,
id, data, error);
331 struct sev_data_launch_start start;
332 struct kvm_sev_launch_start params;
333 void *dh_blob, *session_blob;
334 int *error = &argp->error;
340 if (copy_from_user(¶ms, (
void __user *)(uintptr_t)argp->data,
sizeof(params)))
343 memset(&start, 0,
sizeof(start));
346 if (params.dh_uaddr) {
347 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
349 return PTR_ERR(dh_blob);
351 start.dh_cert_address = __sme_set(__pa(dh_blob));
352 start.dh_cert_len = params.dh_len;
356 if (params.session_uaddr) {
357 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
358 if (IS_ERR(session_blob)) {
359 ret = PTR_ERR(session_blob);
363 start.session_address = __sme_set(__pa(session_blob));
364 start.session_len = params.session_len;
367 start.handle = params.handle;
368 start.policy = params.policy;
371 ret =
__sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
383 params.handle = start.handle;
384 if (copy_to_user((
void __user *)(uintptr_t)argp->data, ¶ms,
sizeof(params))) {
390 sev->
handle = start.handle;
391 sev->
fd = argp->sev_fd;
401 unsigned long ulen,
unsigned long *n,
405 unsigned long npages, size;
407 unsigned long locked, lock_limit;
409 unsigned long first, last;
412 lockdep_assert_held(&kvm->lock);
414 if (ulen == 0 || uaddr + ulen < uaddr)
415 return ERR_PTR(-EINVAL);
418 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
419 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
420 npages = (last - first + 1);
423 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
424 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
425 pr_err(
"SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
426 return ERR_PTR(-ENOMEM);
429 if (WARN_ON_ONCE(npages > INT_MAX))
430 return ERR_PTR(-EINVAL);
433 size = npages *
sizeof(
struct page *);
434 if (size > PAGE_SIZE)
435 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
437 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
440 return ERR_PTR(-ENOMEM);
443 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
444 if (npinned != npages) {
445 pr_err(
"SEV: Failure locking %lu pages.\n", npages);
457 unpin_user_pages(pages, npinned);
464 unsigned long npages)
468 unpin_user_pages(pages, npages);
475 uint8_t *page_virtual;
478 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
482 for (i = 0; i < npages; i++) {
483 page_virtual = kmap_local_page(pages[i]);
484 clflush_cache_range(page_virtual, PAGE_SIZE);
485 kunmap_local(page_virtual);
491 struct page **inpages,
unsigned long npages)
493 unsigned long paddr, next_paddr;
494 unsigned long i = idx + 1, pages = 1;
500 if ((paddr + PAGE_SIZE) == next_paddr) {
513 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
515 struct kvm_sev_launch_update_data params;
516 struct sev_data_launch_update_data data;
517 struct page **inpages;
523 if (copy_from_user(¶ms, (
void __user *)(uintptr_t)argp->data,
sizeof(params)))
526 vaddr = params.uaddr;
528 vaddr_end = vaddr + size;
533 return PTR_ERR(inpages);
542 data.handle = sev->
handle;
544 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
551 offset = vaddr & (PAGE_SIZE - 1);
556 len = min_t(
size_t, ((pages * PAGE_SIZE) - offset), size);
560 ret =
sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
565 next_vaddr = vaddr + len;
570 for (i = 0; i < npages; i++) {
571 set_page_dirty_lock(inpages[i]);
572 mark_page_accessed(inpages[i]);
581 struct sev_es_save_area *save = svm->
sev_es.
vmsa;
584 if (svm->
vcpu.guest_debug || (svm->
vmcb->save.dr7 & ~DR7_FIXED_1))
593 memcpy(save, &svm->
vmcb->save,
sizeof(svm->
vmcb->save));
596 save->rax = svm->
vcpu.arch.regs[VCPU_REGS_RAX];
597 save->rbx = svm->
vcpu.arch.regs[VCPU_REGS_RBX];
598 save->rcx = svm->
vcpu.arch.regs[VCPU_REGS_RCX];
599 save->rdx = svm->
vcpu.arch.regs[VCPU_REGS_RDX];
600 save->rsp = svm->
vcpu.arch.regs[VCPU_REGS_RSP];
601 save->rbp = svm->
vcpu.arch.regs[VCPU_REGS_RBP];
602 save->rsi = svm->
vcpu.arch.regs[VCPU_REGS_RSI];
603 save->rdi = svm->
vcpu.arch.regs[VCPU_REGS_RDI];
605 save->r8 = svm->
vcpu.arch.regs[VCPU_REGS_R8];
606 save->r9 = svm->
vcpu.arch.regs[VCPU_REGS_R9];
607 save->r10 = svm->
vcpu.arch.regs[VCPU_REGS_R10];
608 save->r11 = svm->
vcpu.arch.regs[VCPU_REGS_R11];
609 save->r12 = svm->
vcpu.arch.regs[VCPU_REGS_R12];
610 save->r13 = svm->
vcpu.arch.regs[VCPU_REGS_R13];
611 save->r14 = svm->
vcpu.arch.regs[VCPU_REGS_R14];
612 save->r15 = svm->
vcpu.arch.regs[VCPU_REGS_R15];
614 save->rip = svm->
vcpu.arch.regs[VCPU_REGS_RIP];
617 save->xcr0 = svm->
vcpu.arch.xcr0;
618 save->pkru = svm->
vcpu.arch.pkru;
619 save->xss = svm->
vcpu.arch.ia32_xss;
620 save->dr6 = svm->
vcpu.arch.dr6;
623 save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
624 pr_warn_once(
"Enabling DebugSwap with KVM_SEV_ES_INIT. "
625 "This will not work starting with Linux 6.10\n");
628 pr_debug(
"Virtual Machine Save Area (VMSA):\n");
629 print_hex_dump_debug(
"", DUMP_PREFIX_NONE, 16, 1, save,
sizeof(*save),
false);
637 struct sev_data_launch_update_vmsa vmsa;
641 if (
vcpu->guest_debug) {
642 pr_warn_once(
"KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported");
656 clflush_cache_range(svm->
sev_es.
vmsa, PAGE_SIZE);
661 vmsa.len = PAGE_SIZE;
662 ret =
sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
666 vcpu->arch.guest_state_protected =
true;
672 struct kvm_vcpu *vcpu;
679 kvm_for_each_vcpu(i, vcpu, kvm) {
680 ret = mutex_lock_killable(&vcpu->mutex);
686 mutex_unlock(&vcpu->mutex);
696 void __user *measure = (
void __user *)(uintptr_t)argp->data;
698 struct sev_data_launch_measure data;
699 struct kvm_sev_launch_measure params;
700 void __user *p = NULL;
707 if (copy_from_user(¶ms, measure,
sizeof(params)))
710 memset(&data, 0,
sizeof(data));
716 p = (
void __user *)(uintptr_t)params.uaddr;
718 if (params.len > SEV_FW_BLOB_MAX_SIZE)
721 blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
725 data.address = __psp_pa(blob);
726 data.len = params.len;
730 data.handle = sev->
handle;
731 ret =
sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
743 if (copy_to_user(p, blob, params.len))
748 params.len = data.len;
749 if (copy_to_user(measure, ¶ms,
sizeof(params)))
759 struct sev_data_launch_finish data;
765 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
771 struct kvm_sev_guest_status params;
772 struct sev_data_guest_status data;
778 memset(&data, 0,
sizeof(data));
780 data.handle = sev->
handle;
781 ret =
sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
785 params.policy = data.policy;
786 params.state = data.state;
787 params.handle = data.handle;
789 if (copy_to_user((
void __user *)(uintptr_t)argp->data, ¶ms,
sizeof(params)))
796 unsigned long dst,
int size,
797 int *error,
bool enc)
800 struct sev_data_dbg data;
809 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
814 unsigned long dst_paddr,
int sz,
int *err)
822 offset = src_paddr & 15;
823 src_paddr = round_down(src_paddr, 16);
824 sz = round_up(sz + offset, 16);
830 void __user *dst_uaddr,
831 unsigned long dst_paddr,
834 struct page *tpage = NULL;
838 if (!IS_ALIGNED(dst_paddr, 16) ||
839 !IS_ALIGNED(paddr, 16) ||
840 !IS_ALIGNED(size, 16)) {
841 tpage = (
void *)alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
854 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
867 unsigned long dst_paddr,
868 void __user *dst_vaddr,
869 int size,
int *error)
871 struct page *src_tpage = NULL;
872 struct page *dst_tpage = NULL;
876 if (!IS_ALIGNED((
unsigned long)vaddr, 16)) {
877 src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
881 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
882 __free_page(src_tpage);
895 if (!IS_ALIGNED((
unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
898 dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
913 dst_offset = dst_paddr & 15;
916 memcpy(page_address(dst_tpage) + dst_offset,
917 page_address(src_tpage), size);
919 if (copy_from_user(page_address(dst_tpage) + dst_offset,
927 dst_paddr = round_down(dst_paddr, 16);
928 len = round_up(size, 16);
935 __free_page(src_tpage);
937 __free_page(dst_tpage);
941 static int sev_dbg_crypt(
struct kvm *kvm,
struct kvm_sev_cmd *argp,
bool dec)
943 unsigned long vaddr, vaddr_end, next_vaddr;
944 unsigned long dst_vaddr;
945 struct page **src_p, **dst_p;
946 struct kvm_sev_dbg debug;
954 if (copy_from_user(&debug, (
void __user *)(uintptr_t)argp->data,
sizeof(debug)))
957 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
959 if (!debug.dst_uaddr)
962 vaddr = debug.src_uaddr;
964 vaddr_end = vaddr + size;
965 dst_vaddr = debug.dst_uaddr;
967 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
968 int len, s_off, d_off;
973 return PTR_ERR(src_p);
975 dst_p =
sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
978 return PTR_ERR(dst_p);
993 s_off = vaddr & ~PAGE_MASK;
994 d_off = dst_vaddr & ~PAGE_MASK;
995 len = min_t(
size_t, (PAGE_SIZE - s_off), size);
1000 (
void __user *)dst_vaddr,
1006 (
void __user *)vaddr,
1008 (
void __user *)dst_vaddr,
1017 next_vaddr = vaddr + len;
1018 dst_vaddr = dst_vaddr + len;
1028 struct sev_data_launch_secret data;
1029 struct kvm_sev_launch_secret params;
1030 struct page **pages;
1038 if (copy_from_user(¶ms, (
void __user *)(uintptr_t)argp->data,
sizeof(params)))
1041 pages =
sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
1043 return PTR_ERR(pages);
1057 goto e_unpin_memory;
1060 memset(&data, 0,
sizeof(data));
1062 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1064 data.guest_len = params.guest_len;
1066 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1068 ret = PTR_ERR(blob);
1069 goto e_unpin_memory;
1072 data.trans_address = __psp_pa(blob);
1073 data.trans_len = params.trans_len;
1075 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1080 data.hdr_address = __psp_pa(hdr);
1081 data.hdr_len = params.hdr_len;
1083 data.handle = sev->
handle;
1084 ret =
sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1092 for (i = 0; i < n; i++) {
1093 set_page_dirty_lock(pages[i]);
1094 mark_page_accessed(pages[i]);
1102 void __user *report = (
void __user *)(uintptr_t)argp->data;
1104 struct sev_data_attestation_report data;
1105 struct kvm_sev_attestation_report params;
1113 if (copy_from_user(¶ms, (
void __user *)(uintptr_t)argp->data,
sizeof(params)))
1116 memset(&data, 0,
sizeof(data));
1122 p = (
void __user *)(uintptr_t)params.uaddr;
1124 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1127 blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
1131 data.address = __psp_pa(blob);
1132 data.len = params.len;
1133 memcpy(data.mnonce, params.mnonce,
sizeof(params.mnonce));
1136 data.handle = sev->
handle;
1137 ret =
sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1148 if (copy_to_user(p, blob, params.len))
1153 params.len = data.len;
1154 if (copy_to_user(report, ¶ms,
sizeof(params)))
1164 struct kvm_sev_send_start *params)
1167 struct sev_data_send_start data;
1170 memset(&data, 0,
sizeof(data));
1171 data.handle = sev->
handle;
1172 ret =
sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1174 params->session_len = data.session_len;
1175 if (copy_to_user((
void __user *)(uintptr_t)argp->data, params,
1176 sizeof(
struct kvm_sev_send_start)))
1185 struct sev_data_send_start data;
1186 struct kvm_sev_send_start params;
1187 void *amd_certs, *session_data;
1188 void *pdh_cert, *plat_certs;
1194 if (copy_from_user(¶ms, (
void __user *)(uintptr_t)argp->data,
1195 sizeof(
struct kvm_sev_send_start)))
1199 if (!params.session_len)
1204 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1205 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1209 session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1214 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1215 params.pdh_cert_len);
1216 if (IS_ERR(pdh_cert)) {
1217 ret = PTR_ERR(pdh_cert);
1218 goto e_free_session;
1221 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1222 params.plat_certs_len);
1223 if (IS_ERR(plat_certs)) {
1224 ret = PTR_ERR(plat_certs);
1228 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1229 params.amd_certs_len);
1230 if (IS_ERR(amd_certs)) {
1231 ret = PTR_ERR(amd_certs);
1232 goto e_free_plat_cert;
1236 memset(&data, 0,
sizeof(data));
1237 data.pdh_cert_address = __psp_pa(pdh_cert);
1238 data.pdh_cert_len = params.pdh_cert_len;
1239 data.plat_certs_address = __psp_pa(plat_certs);
1240 data.plat_certs_len = params.plat_certs_len;
1241 data.amd_certs_address = __psp_pa(amd_certs);
1242 data.amd_certs_len = params.amd_certs_len;
1243 data.session_address = __psp_pa(session_data);
1244 data.session_len = params.session_len;
1245 data.handle = sev->
handle;
1247 ret =
sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1249 if (!ret && copy_to_user((
void __user *)(uintptr_t)params.session_uaddr,
1250 session_data, params.session_len)) {
1252 goto e_free_amd_cert;
1255 params.policy = data.policy;
1256 params.session_len = data.session_len;
1257 if (copy_to_user((
void __user *)(uintptr_t)argp->data, ¶ms,
1258 sizeof(
struct kvm_sev_send_start)))
1268 kfree(session_data);
1275 struct kvm_sev_send_update_data *params)
1278 struct sev_data_send_update_data data;
1281 memset(&data, 0,
sizeof(data));
1282 data.handle = sev->
handle;
1283 ret =
sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1285 params->hdr_len = data.hdr_len;
1286 params->trans_len = data.trans_len;
1288 if (copy_to_user((
void __user *)(uintptr_t)argp->data, params,
1289 sizeof(
struct kvm_sev_send_update_data)))
1298 struct sev_data_send_update_data data;
1299 struct kvm_sev_send_update_data params;
1300 void *hdr, *trans_data;
1301 struct page **guest_page;
1308 if (copy_from_user(¶ms, (
void __user *)(uintptr_t)argp->data,
1309 sizeof(
struct kvm_sev_send_update_data)))
1313 if (!params.trans_len || !params.hdr_len)
1316 if (!params.trans_uaddr || !params.guest_uaddr ||
1317 !params.guest_len || !params.hdr_uaddr)
1321 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1322 if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1328 if (IS_ERR(guest_page))
1329 return PTR_ERR(guest_page);
1333 hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1337 trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1341 memset(&data, 0,
sizeof(data));
1342 data.hdr_address = __psp_pa(hdr);
1343 data.hdr_len = params.hdr_len;
1344 data.trans_address = __psp_pa(trans_data);
1345 data.trans_len = params.trans_len;
1348 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1350 data.guest_len = params.guest_len;
1351 data.handle = sev->
handle;
1353 ret =
sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1356 goto e_free_trans_data;
1359 if (copy_to_user((
void __user *)(uintptr_t)params.trans_uaddr,
1360 trans_data, params.trans_len)) {
1362 goto e_free_trans_data;
1366 if (copy_to_user((
void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1383 struct sev_data_send_finish data;
1389 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1395 struct sev_data_send_cancel data;
1401 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1407 struct sev_data_receive_start start;
1408 struct kvm_sev_receive_start params;
1409 int *error = &argp->error;
1418 if (copy_from_user(¶ms, (
void __user *)(uintptr_t)argp->data,
1419 sizeof(
struct kvm_sev_receive_start)))
1423 if (!params.pdh_uaddr || !params.pdh_len ||
1424 !params.session_uaddr || !params.session_len)
1427 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1428 if (IS_ERR(pdh_data))
1429 return PTR_ERR(pdh_data);
1431 session_data = psp_copy_user_blob(params.session_uaddr,
1432 params.session_len);
1433 if (IS_ERR(session_data)) {
1434 ret = PTR_ERR(session_data);
1438 memset(&start, 0,
sizeof(start));
1439 start.handle = params.handle;
1440 start.policy = params.policy;
1441 start.pdh_cert_address = __psp_pa(pdh_data);
1442 start.pdh_cert_len = params.pdh_len;
1443 start.session_address = __psp_pa(session_data);
1444 start.session_len = params.session_len;
1450 goto e_free_session;
1456 goto e_free_session;
1459 params.handle = start.handle;
1460 if (copy_to_user((
void __user *)(uintptr_t)argp->data,
1461 ¶ms,
sizeof(
struct kvm_sev_receive_start))) {
1464 goto e_free_session;
1467 sev->
handle = start.handle;
1468 sev->
fd = argp->sev_fd;
1471 kfree(session_data);
1481 struct kvm_sev_receive_update_data params;
1482 struct sev_data_receive_update_data data;
1483 void *hdr = NULL, *trans = NULL;
1484 struct page **guest_page;
1491 if (copy_from_user(¶ms, (
void __user *)(uintptr_t)argp->data,
1492 sizeof(
struct kvm_sev_receive_update_data)))
1495 if (!params.hdr_uaddr || !params.hdr_len ||
1496 !params.guest_uaddr || !params.guest_len ||
1497 !params.trans_uaddr || !params.trans_len)
1501 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1502 if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1505 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1507 return PTR_ERR(hdr);
1509 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1510 if (IS_ERR(trans)) {
1511 ret = PTR_ERR(trans);
1515 memset(&data, 0,
sizeof(data));
1516 data.hdr_address = __psp_pa(hdr);
1517 data.hdr_len = params.hdr_len;
1518 data.trans_address = __psp_pa(trans);
1519 data.trans_len = params.trans_len;
1524 if (IS_ERR(guest_page)) {
1525 ret = PTR_ERR(guest_page);
1537 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1539 data.guest_len = params.guest_len;
1540 data.handle = sev->
handle;
1542 ret =
sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1558 struct sev_data_receive_finish data;
1564 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1573 if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1574 cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1575 cmd_id == KVM_SEV_DBG_ENCRYPT)
1587 if (dst_kvm == src_kvm)
1601 if (mutex_lock_killable(&dst_kvm->lock))
1603 if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
1608 mutex_unlock(&dst_kvm->lock);
1621 mutex_unlock(&dst_kvm->lock);
1622 mutex_unlock(&src_kvm->lock);
1637 struct kvm_vcpu *vcpu;
1640 kvm_for_each_vcpu(i, vcpu, kvm) {
1641 if (mutex_lock_killable_nested(&vcpu->mutex, role))
1644 #ifdef CONFIG_PROVE_LOCKING
1652 mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1660 kvm_for_each_vcpu(j, vcpu, kvm) {
1664 #ifdef CONFIG_PROVE_LOCKING
1666 mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1669 mutex_unlock(&vcpu->mutex);
1676 struct kvm_vcpu *vcpu;
1680 kvm_for_each_vcpu(i, vcpu, kvm) {
1684 mutex_acquire(&vcpu->mutex.dep_map,
1687 mutex_unlock(&vcpu->mutex);
1695 struct kvm_vcpu *dst_vcpu, *src_vcpu;
1696 struct vcpu_svm *dst_svm, *src_svm;
1740 kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
1741 dst_svm =
to_svm(dst_vcpu);
1752 src_vcpu = kvm_get_vcpu(src_kvm, i);
1753 src_svm =
to_svm(src_vcpu);
1761 dst_svm->
vmcb->control.ghcb_gpa = src_svm->
vmcb->control.ghcb_gpa;
1762 dst_svm->
vmcb->control.vmsa_pa = src_svm->
vmcb->control.vmsa_pa;
1763 dst_vcpu->arch.guest_state_protected =
true;
1766 src_svm->
vmcb->control.ghcb_gpa = INVALID_PAGE;
1767 src_svm->
vmcb->control.vmsa_pa = INVALID_PAGE;
1768 src_vcpu->arch.guest_state_protected =
false;
1774 struct kvm_vcpu *src_vcpu;
1780 if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
1783 kvm_for_each_vcpu(i, src_vcpu, src) {
1784 if (!src_vcpu->arch.guest_state_protected)
1795 struct fd f = fdget(source_fd);
1796 struct kvm *source_kvm;
1797 bool charged =
false;
1808 source_kvm = f.file->private_data;
1820 dst_sev->
misc_cg = get_current_misc_cg();
1821 cg_cleanup_sev = dst_sev;
1825 goto out_dst_cgroup;
1831 goto out_dst_cgroup;
1838 goto out_source_vcpu;
1841 kvm_vm_dead(source_kvm);
1842 cg_cleanup_sev = src_sev;
1853 put_misc_cg(cg_cleanup_sev->
misc_cg);
1854 cg_cleanup_sev->
misc_cg = NULL;
1864 struct kvm_sev_cmd sev_cmd;
1873 if (copy_from_user(&sev_cmd, argp,
sizeof(
struct kvm_sev_cmd)))
1876 mutex_lock(&kvm->lock);
1885 switch (sev_cmd.id) {
1886 case KVM_SEV_ES_INIT:
1895 case KVM_SEV_LAUNCH_START:
1898 case KVM_SEV_LAUNCH_UPDATE_DATA:
1901 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1904 case KVM_SEV_LAUNCH_MEASURE:
1907 case KVM_SEV_LAUNCH_FINISH:
1910 case KVM_SEV_GUEST_STATUS:
1913 case KVM_SEV_DBG_DECRYPT:
1916 case KVM_SEV_DBG_ENCRYPT:
1919 case KVM_SEV_LAUNCH_SECRET:
1922 case KVM_SEV_GET_ATTESTATION_REPORT:
1925 case KVM_SEV_SEND_START:
1928 case KVM_SEV_SEND_UPDATE_DATA:
1931 case KVM_SEV_SEND_FINISH:
1934 case KVM_SEV_SEND_CANCEL:
1937 case KVM_SEV_RECEIVE_START:
1940 case KVM_SEV_RECEIVE_UPDATE_DATA:
1943 case KVM_SEV_RECEIVE_FINISH:
1951 if (copy_to_user(argp, &sev_cmd,
sizeof(
struct kvm_sev_cmd)))
1955 mutex_unlock(&kvm->lock);
1960 struct kvm_enc_region *range)
1973 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1976 region = kzalloc(
sizeof(*region), GFP_KERNEL_ACCOUNT);
1980 mutex_lock(&kvm->lock);
1982 if (IS_ERR(region->
pages)) {
1983 ret = PTR_ERR(region->
pages);
1984 mutex_unlock(&kvm->lock);
1998 region->
uaddr = range->addr;
1999 region->
size = range->size;
2002 mutex_unlock(&kvm->lock);
2018 list_for_each_entry(i, head,
list) {
2019 if (i->
uaddr == range->addr &&
2020 i->
size == range->size)
2031 list_del(®ion->
list);
2036 struct kvm_enc_region *range)
2045 mutex_lock(&kvm->lock);
2063 wbinvd_on_all_cpus();
2067 mutex_unlock(&kvm->lock);
2071 mutex_unlock(&kvm->lock);
2077 struct fd f = fdget(source_fd);
2078 struct kvm *source_kvm;
2090 source_kvm = f.file->private_data;
2118 mirror_sev->
active =
true;
2119 mirror_sev->
asid = source_sev->
asid;
2120 mirror_sev->
fd = source_sev->
fd;
2144 struct list_head *pos, *q;
2155 mutex_lock(&owner_kvm->lock);
2157 mutex_unlock(&owner_kvm->lock);
2167 wbinvd_on_all_cpus();
2173 if (!list_empty(head)) {
2174 list_for_each_safe(pos, q, head) {
2195 #ifdef CONFIG_KVM_AMD_SEV
2196 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
2197 bool sev_es_supported =
false;
2198 bool sev_supported =
false;
2210 if (!boot_cpu_has(X86_FEATURE_SEV) ||
2211 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) ||
2212 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_FLUSHBYASID)))
2216 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
2251 sev_supported =
true;
2267 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
2276 sev_es_supported =
true;
2279 if (boot_cpu_has(X86_FEATURE_SEV))
2280 pr_info(
"SEV %s (ASIDs %u - %u)\n",
2285 if (boot_cpu_has(X86_FEATURE_SEV_ES))
2286 pr_info(
"SEV-ES %s (ASIDs %u - %u)\n",
2287 sev_es_supported ?
"enabled" :
"disabled",
2292 if (!
sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) ||
2293 !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP))
2339 unsigned long addr = (
unsigned long)va;
2346 if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
2347 clflush_cache_range(va, PAGE_SIZE);
2356 if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
2362 wbinvd_on_all_cpus();
2370 wbinvd_on_all_cpus();
2382 if (
vcpu->arch.guest_state_protected)
2398 pr_warn_ratelimited(
"set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2402 nbits =
sizeof(ghcb->save.valid_bitmap) * 8;
2404 pr_err(
"GHCB (GPA=%016llx):\n", svm->
vmcb->control.ghcb_gpa);
2405 pr_err(
"%-20s%016llx is_valid: %u\n",
"sw_exit_code",
2406 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2407 pr_err(
"%-20s%016llx is_valid: %u\n",
"sw_exit_info_1",
2408 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2409 pr_err(
"%-20s%016llx is_valid: %u\n",
"sw_exit_info_2",
2410 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2411 pr_err(
"%-20s%016llx is_valid: %u\n",
"sw_scratch",
2412 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2413 pr_err(
"%-20s%*pb\n",
"valid_bitmap", nbits, ghcb->save.valid_bitmap);
2418 struct kvm_vcpu *vcpu = &svm->
vcpu;
2429 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2430 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2431 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2432 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
2437 struct vmcb_control_area *
control = &svm->
vmcb->control;
2438 struct kvm_vcpu *vcpu = &svm->
vcpu;
2454 memset(vcpu->arch.regs, 0,
sizeof(vcpu->arch.regs));
2457 memcpy(&svm->
sev_es.
valid_bitmap, &ghcb->save.valid_bitmap,
sizeof(ghcb->save.valid_bitmap));
2459 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
2460 vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
2461 vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
2462 vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
2463 vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
2465 svm->
vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
2467 if (kvm_ghcb_xcr0_is_valid(svm)) {
2468 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2473 exit_code = ghcb_get_sw_exit_code(ghcb);
2474 control->exit_code = lower_32_bits(exit_code);
2475 control->exit_code_hi = upper_32_bits(exit_code);
2476 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2477 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2481 memset(ghcb->save.valid_bitmap, 0,
sizeof(ghcb->save.valid_bitmap));
2486 return (((u64)
control->exit_code_hi) << 32) |
control->exit_code;
2491 struct vmcb_control_area *
control = &svm->
vmcb->control;
2492 struct kvm_vcpu *vcpu = &svm->
vcpu;
2504 reason = GHCB_ERR_INVALID_USAGE;
2508 reason = GHCB_ERR_MISSING_INPUT;
2510 if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
2511 !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
2512 !kvm_ghcb_sw_exit_info_2_is_valid(svm))
2515 switch (exit_code) {
2516 case SVM_EXIT_READ_DR7:
2518 case SVM_EXIT_WRITE_DR7:
2519 if (!kvm_ghcb_rax_is_valid(svm))
2522 case SVM_EXIT_RDTSC:
2524 case SVM_EXIT_RDPMC:
2525 if (!kvm_ghcb_rcx_is_valid(svm))
2528 case SVM_EXIT_CPUID:
2529 if (!kvm_ghcb_rax_is_valid(svm) ||
2530 !kvm_ghcb_rcx_is_valid(svm))
2532 if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
2533 if (!kvm_ghcb_xcr0_is_valid(svm))
2539 if (
control->exit_info_1 & SVM_IOIO_STR_MASK) {
2540 if (!kvm_ghcb_sw_scratch_is_valid(svm))
2543 if (!(
control->exit_info_1 & SVM_IOIO_TYPE_MASK))
2544 if (!kvm_ghcb_rax_is_valid(svm))
2549 if (!kvm_ghcb_rcx_is_valid(svm))
2552 if (!kvm_ghcb_rax_is_valid(svm) ||
2553 !kvm_ghcb_rdx_is_valid(svm))
2557 case SVM_EXIT_VMMCALL:
2558 if (!kvm_ghcb_rax_is_valid(svm) ||
2559 !kvm_ghcb_cpl_is_valid(svm))
2562 case SVM_EXIT_RDTSCP:
2564 case SVM_EXIT_WBINVD:
2566 case SVM_EXIT_MONITOR:
2567 if (!kvm_ghcb_rax_is_valid(svm) ||
2568 !kvm_ghcb_rcx_is_valid(svm) ||
2569 !kvm_ghcb_rdx_is_valid(svm))
2572 case SVM_EXIT_MWAIT:
2573 if (!kvm_ghcb_rax_is_valid(svm) ||
2574 !kvm_ghcb_rcx_is_valid(svm))
2577 case SVM_VMGEXIT_MMIO_READ:
2578 case SVM_VMGEXIT_MMIO_WRITE:
2579 if (!kvm_ghcb_sw_scratch_is_valid(svm))
2582 case SVM_VMGEXIT_NMI_COMPLETE:
2583 case SVM_VMGEXIT_AP_HLT_LOOP:
2584 case SVM_VMGEXIT_AP_JUMP_TABLE:
2585 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2588 reason = GHCB_ERR_INVALID_EVENT;
2595 if (reason == GHCB_ERR_INVALID_USAGE) {
2596 vcpu_unimpl(vcpu,
"vmgexit: ghcb usage %#x is not valid\n",
2598 }
else if (reason == GHCB_ERR_INVALID_EVENT) {
2599 vcpu_unimpl(vcpu,
"vmgexit: exit code %#llx is not valid\n",
2602 vcpu_unimpl(vcpu,
"vmgexit: exit code %#llx input is not valid\n",
2607 ghcb_set_sw_exit_info_1(svm->
sev_es.
ghcb, 2);
2608 ghcb_set_sw_exit_info_2(svm->
sev_es.
ghcb, reason);
2661 svm->
vcpu.arch.last_vmentry_cpu == cpu)
2665 svm->
vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2669 #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2672 struct vmcb_control_area *
control = &svm->
vmcb->control;
2673 u64 ghcb_scratch_beg, ghcb_scratch_end;
2674 u64 scratch_gpa_beg, scratch_gpa_end;
2678 if (!scratch_gpa_beg) {
2679 pr_err(
"vmgexit: scratch gpa not provided\n");
2683 scratch_gpa_end = scratch_gpa_beg + len;
2684 if (scratch_gpa_end < scratch_gpa_beg) {
2685 pr_err(
"vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2686 len, scratch_gpa_beg);
2690 if ((scratch_gpa_beg & PAGE_MASK) ==
control->ghcb_gpa) {
2692 ghcb_scratch_beg =
control->ghcb_gpa +
2693 offsetof(
struct ghcb, shared_buffer);
2694 ghcb_scratch_end =
control->ghcb_gpa +
2695 offsetof(
struct ghcb, reserved_0xff0);
2701 if (scratch_gpa_beg < ghcb_scratch_beg ||
2702 scratch_gpa_end > ghcb_scratch_end) {
2703 pr_err(
"vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2704 scratch_gpa_beg, scratch_gpa_end);
2709 scratch_va += (scratch_gpa_beg -
control->ghcb_gpa);
2716 pr_err(
"vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2720 scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
2726 pr_err(
"vmgexit: kvm_read_guest for scratch area failed\n");
2748 ghcb_set_sw_exit_info_1(svm->
sev_es.
ghcb, 2);
2749 ghcb_set_sw_exit_info_2(svm->
sev_es.
ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
2757 svm->
vmcb->control.ghcb_gpa &= ~(mask << pos);
2758 svm->
vmcb->control.ghcb_gpa |= (value & mask) << pos;
2763 return (svm->
vmcb->control.ghcb_gpa >> pos) & mask;
2768 svm->
vmcb->control.ghcb_gpa = value;
2773 struct vmcb_control_area *
control = &svm->
vmcb->control;
2774 struct kvm_vcpu *vcpu = &svm->
vcpu;
2778 ghcb_info =
control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2780 trace_kvm_vmgexit_msr_protocol_enter(svm->
vcpu.vcpu_id,
2783 switch (ghcb_info) {
2784 case GHCB_MSR_SEV_INFO_REQ:
2789 case GHCB_MSR_CPUID_REQ: {
2793 GHCB_MSR_CPUID_FUNC_MASK,
2794 GHCB_MSR_CPUID_FUNC_POS);
2797 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2798 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2807 GHCB_MSR_CPUID_REG_MASK,
2808 GHCB_MSR_CPUID_REG_POS);
2810 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2812 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2814 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2816 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2819 GHCB_MSR_CPUID_VALUE_MASK,
2820 GHCB_MSR_CPUID_VALUE_POS);
2827 case GHCB_MSR_TERM_REQ: {
2828 u64 reason_set, reason_code;
2831 GHCB_MSR_TERM_REASON_SET_MASK,
2832 GHCB_MSR_TERM_REASON_SET_POS);
2834 GHCB_MSR_TERM_REASON_MASK,
2835 GHCB_MSR_TERM_REASON_POS);
2836 pr_info(
"SEV-ES guest requested termination: %#llx:%#llx\n",
2837 reason_set, reason_code);
2839 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
2840 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM;
2841 vcpu->run->system_event.ndata = 1;
2842 vcpu->run->system_event.data[0] =
control->ghcb_gpa;
2851 trace_kvm_vmgexit_msr_protocol_exit(svm->
vcpu.vcpu_id,
2860 struct vmcb_control_area *
control = &svm->
vmcb->control;
2861 u64 ghcb_gpa, exit_code;
2866 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2870 vcpu_unimpl(vcpu,
"vmgexit: GHCB gpa is not set\n");
2878 vcpu_unimpl(vcpu,
"vmgexit: error mapping GHCB [%#llx] from guest\n",
2887 trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->
sev_es.
ghcb);
2894 ghcb_set_sw_exit_info_1(svm->
sev_es.
ghcb, 0);
2895 ghcb_set_sw_exit_info_2(svm->
sev_es.
ghcb, 0);
2898 switch (exit_code) {
2899 case SVM_VMGEXIT_MMIO_READ:
2909 case SVM_VMGEXIT_MMIO_WRITE:
2919 case SVM_VMGEXIT_NMI_COMPLETE:
2920 ++vcpu->stat.nmi_window_exits;
2922 kvm_make_request(KVM_REQ_EVENT, vcpu);
2925 case SVM_VMGEXIT_AP_HLT_LOOP:
2928 case SVM_VMGEXIT_AP_JUMP_TABLE: {
2931 switch (
control->exit_info_1) {
2941 pr_err(
"svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2943 ghcb_set_sw_exit_info_1(svm->
sev_es.
ghcb, 2);
2944 ghcb_set_sw_exit_info_2(svm->
sev_es.
ghcb, GHCB_ERR_INVALID_INPUT);
2950 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2952 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2969 if (svm->
vmcb->control.exit_info_2 > INT_MAX)
2972 count = svm->
vmcb->control.exit_info_2;
2973 if (unlikely(check_mul_overflow(count, size, &bytes)))
2986 struct kvm_vcpu *vcpu = &svm->
vcpu;
2988 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
3017 struct kvm_vcpu *vcpu = &svm->
vcpu;
3018 struct kvm_cpuid_entry2 *best;
3023 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
3032 struct kvm_vcpu *vcpu = &svm->
vcpu;
3034 svm->
vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
3035 svm->
vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
3063 vmcb->control.intercepts[INTERCEPT_DR] = 0;
3095 svm->
vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
3138 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
3139 hostsa->pkru = read_pkru();
3148 hostsa->dr0 = native_get_debugreg(0);
3149 hostsa->dr1 = native_get_debugreg(1);
3150 hostsa->dr2 = native_get_debugreg(2);
3151 hostsa->dr3 = native_get_debugreg(3);
3152 hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
3153 hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
3154 hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
3155 hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
3177 ghcb_set_sw_exit_info_2(svm->
sev_es.
ghcb, 1);
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
void kvm_put_kvm(struct kvm *kvm)
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len)
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
void kvm_get_kvm(struct kvm *kvm)
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
bool file_is_kvm(struct file *file)
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644)
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
static DECLARE_RWSEM(sev_deactivate_lock)
static int sev_lock_vcpus_for_migration(struct kvm *kvm, enum sev_migration_role role)
static void sev_clflush_pages(struct page *pages[], unsigned long npages)
void sev_hardware_unsetup(void)
static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
static int sev_es_sync_vmsa(struct vcpu_svm *svm)
static struct page ** sev_pin_memory(struct kvm *kvm, unsigned long uaddr, unsigned long ulen, unsigned long *n, int write)
int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
static int __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp, struct kvm_sev_send_start *params)
static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, void __user *vaddr, unsigned long dst_paddr, void __user *dst_vaddr, int size, int *error)
static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
static unsigned int nr_asids
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
static void __unregister_enc_region_locked(struct kvm *kvm, struct enc_region *region)
static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
unsigned int max_sev_asid
int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
static struct enc_region * find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr, void __user *dst_uaddr, unsigned long dst_paddr, int size, int *err)
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
static void sev_es_init_vmcb(struct vcpu_svm *svm)
static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
static unsigned int sev_get_asid(struct kvm *kvm)
static unsigned long sev_me_mask
static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
#define MISC_CG_RES_SEV_ES
static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr, unsigned long dst_paddr, int sz, int *err)
static bool is_mirroring_enc_context(struct kvm *kvm)
static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, int *error)
static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
int sev_cpu_init(struct svm_cpu_data *sd)
static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
int sev_mem_enc_unregister_region(struct kvm *kvm, struct kvm_enc_region *range)
static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_asid_new(struct kvm_sev_info *sev)
static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask, unsigned int pos)
static int __sev_issue_cmd(int fd, int id, void *data, int *error)
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
void __init sev_hardware_setup(void)
static int __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, struct kvm_sev_send_update_data *params)
static void sev_decommission(unsigned int handle)
static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
void sev_es_vcpu_reset(struct vcpu_svm *svm)
static void sev_unpin_memory(struct kvm *kvm, struct page **pages, unsigned long npages)
int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
int sev_mem_enc_register_region(struct kvm *kvm, struct kvm_enc_region *range)
static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
#define sev_es_debug_swap_enabled
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
void sev_init_vmcb(struct vcpu_svm *svm)
int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
void pre_sev_run(struct vcpu_svm *svm, int cpu)
static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
static unsigned long * sev_asid_bitmap
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
static unsigned long get_num_contig_pages(unsigned long idx, struct page **inpages, unsigned long npages)
static void dump_ghcb(struct vcpu_svm *svm)
void sev_guest_memory_reclaimed(struct kvm *kvm)
static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
#define GHCB_SCRATCH_AREA_LIMIT
void __init sev_set_cpu_caps(void)
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
static void sev_asid_free(struct kvm_sev_info *sev)
void sev_vm_destroy(struct kvm *kvm)
void sev_free_vcpu(struct kvm_vcpu *vcpu)
static DEFINE_MUTEX(sev_bitmap_lock)
static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src, unsigned long dst, int size, int *error, bool enc)
static bool is_cmd_allowed_from_mirror(u32 cmd_id)
static unsigned int min_sev_asid
static unsigned long * sev_reclaim_asid_bitmap
bool __read_mostly enable_mmio_caching
unsigned long pages_locked
struct kvm * enc_context_owner
atomic_t migration_in_progress
struct list_head mirror_entry
struct list_head regions_list
struct list_head mirror_vms
struct kvm_sev_info sev_info
struct sev_es_save_area * vmsa
struct kvm_host_map ghcb_map
struct vcpu_sev_es_state sev_es
struct kvm_vmcb_info vmcb01
void recalc_intercepts(struct vcpu_svm *svm)
bool __read_mostly dump_invalid_vmcb
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write)
static void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
static __always_inline bool sev_es_guest(struct kvm *kvm)
static __always_inline bool sev_guest(struct kvm *kvm)
static void svm_clr_intercept(struct vcpu_svm *svm, int bit)
static __always_inline struct vcpu_svm * to_svm(struct kvm_vcpu *vcpu)
static void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
static __always_inline struct kvm_svm * to_kvm_svm(struct kvm *kvm)
static void svm_set_intercept(struct vcpu_svm *svm, int bit)
static void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
u64 __read_mostly host_xss
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, void *data)
int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, void *data)
int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, unsigned int port, void *data, unsigned int count, int in)