KVM
Classes | Macros | Enumerations | Functions | Variables
svm.h File Reference
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
#include <linux/bits.h>
#include <asm/svm.h>
#include <asm/sev-common.h>
#include "cpuid.h"
#include "kvm_cache_regs.h"
Include dependency graph for svm.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  kvm_sev_info
 
struct  kvm_svm
 
struct  kvm_vmcb_info
 
struct  vmcb_save_area_cached
 
struct  vmcb_ctrl_area_cached
 
struct  svm_nested_state
 
struct  vcpu_sev_es_state
 
struct  vcpu_svm
 
struct  svm_cpu_data
 

Macros

#define __sme_page_pa(x)   __sme_set(page_to_pfn(x) << PAGE_SHIFT)
 
#define IOPM_SIZE   PAGE_SIZE * 3
 
#define MSRPM_SIZE   PAGE_SIZE * 2
 
#define MAX_DIRECT_ACCESS_MSRS   47
 
#define MSRPM_OFFSETS   32
 
#define VMCB_ALL_CLEAN_MASK
 
#define VMCB_ALWAYS_DIRTY_MASK   ((1U << VMCB_INTR) | (1U << VMCB_CR2))
 
#define SVM_REGS_LAZY_LOAD_SET   (1 << VCPU_EXREG_PDPTR)
 
#define MSR_INVALID   0xffffffffU
 
#define DEBUGCTL_RESERVED_BITS   (~(0x3fULL))
 
#define NESTED_EXIT_HOST   0 /* Exit handled on host level */
 
#define NESTED_EXIT_DONE   1 /* Exit caused nested vmexit */
 
#define NESTED_EXIT_CONTINUE   2 /* Further checks needed */
 
#define AVIC_REQUIRED_APICV_INHIBITS
 
#define GHCB_VERSION_MAX   1ULL
 
#define GHCB_VERSION_MIN   1ULL
 
#define DEFINE_KVM_GHCB_ACCESSORS(field)
 

Enumerations

enum  {
  VMCB_INTERCEPTS , VMCB_PERM_MAP , VMCB_ASID , VMCB_INTR ,
  VMCB_NPT , VMCB_CR , VMCB_DR , VMCB_DT ,
  VMCB_SEG , VMCB_CR2 , VMCB_LBR , VMCB_AVIC ,
  VMCB_SW = 31
}
 

Functions

 DECLARE_PER_CPU (struct svm_cpu_data, svm_data)
 
void recalc_intercepts (struct vcpu_svm *svm)
 
static __always_inline struct kvm_svmto_kvm_svm (struct kvm *kvm)
 
static __always_inline bool sev_guest (struct kvm *kvm)
 
static __always_inline bool sev_es_guest (struct kvm *kvm)
 
static void vmcb_mark_all_dirty (struct vmcb *vmcb)
 
static void vmcb_mark_all_clean (struct vmcb *vmcb)
 
static void vmcb_mark_dirty (struct vmcb *vmcb, int bit)
 
static bool vmcb_is_dirty (struct vmcb *vmcb, int bit)
 
static __always_inline struct vcpu_svmto_svm (struct kvm_vcpu *vcpu)
 
static void vmcb_set_intercept (struct vmcb_control_area *control, u32 bit)
 
static void vmcb_clr_intercept (struct vmcb_control_area *control, u32 bit)
 
static bool vmcb_is_intercept (struct vmcb_control_area *control, u32 bit)
 
static bool vmcb12_is_intercept (struct vmcb_ctrl_area_cached *control, u32 bit)
 
static void set_exception_intercept (struct vcpu_svm *svm, u32 bit)
 
static void clr_exception_intercept (struct vcpu_svm *svm, u32 bit)
 
static void svm_set_intercept (struct vcpu_svm *svm, int bit)
 
static void svm_clr_intercept (struct vcpu_svm *svm, int bit)
 
static bool svm_is_intercept (struct vcpu_svm *svm, int bit)
 
static bool nested_vgif_enabled (struct vcpu_svm *svm)
 
static struct vmcb * get_vgif_vmcb (struct vcpu_svm *svm)
 
static void enable_gif (struct vcpu_svm *svm)
 
static void disable_gif (struct vcpu_svm *svm)
 
static bool gif_set (struct vcpu_svm *svm)
 
static bool nested_npt_enabled (struct vcpu_svm *svm)
 
static bool nested_vnmi_enabled (struct vcpu_svm *svm)
 
static bool is_x2apic_msrpm_offset (u32 offset)
 
static struct vmcb * get_vnmi_vmcb_l1 (struct vcpu_svm *svm)
 
static bool is_vnmi_enabled (struct vcpu_svm *svm)
 
u32 svm_msrpm_offset (u32 msr)
 
u32 * svm_vcpu_alloc_msrpm (void)
 
void svm_vcpu_init_msrpm (struct kvm_vcpu *vcpu, u32 *msrpm)
 
void svm_vcpu_free_msrpm (u32 *msrpm)
 
void svm_copy_lbrs (struct vmcb *to_vmcb, struct vmcb *from_vmcb)
 
void svm_update_lbrv (struct kvm_vcpu *vcpu)
 
int svm_set_efer (struct kvm_vcpu *vcpu, u64 efer)
 
void svm_set_cr0 (struct kvm_vcpu *vcpu, unsigned long cr0)
 
void svm_set_cr4 (struct kvm_vcpu *vcpu, unsigned long cr4)
 
void disable_nmi_singlestep (struct vcpu_svm *svm)
 
bool svm_smi_blocked (struct kvm_vcpu *vcpu)
 
bool svm_nmi_blocked (struct kvm_vcpu *vcpu)
 
bool svm_interrupt_blocked (struct kvm_vcpu *vcpu)
 
void svm_set_gif (struct vcpu_svm *svm, bool value)
 
int svm_invoke_exit_handler (struct kvm_vcpu *vcpu, u64 exit_code)
 
void set_msr_interception (struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write)
 
void svm_set_x2apic_msr_interception (struct vcpu_svm *svm, bool disable)
 
void svm_complete_interrupt_delivery (struct kvm_vcpu *vcpu, int delivery_mode, int trig_mode, int vec)
 
static bool nested_svm_virtualize_tpr (struct kvm_vcpu *vcpu)
 
static bool nested_exit_on_smi (struct vcpu_svm *svm)
 
static bool nested_exit_on_intr (struct vcpu_svm *svm)
 
static bool nested_exit_on_nmi (struct vcpu_svm *svm)
 
int enter_svm_guest_mode (struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun)
 
void svm_leave_nested (struct kvm_vcpu *vcpu)
 
void svm_free_nested (struct vcpu_svm *svm)
 
int svm_allocate_nested (struct vcpu_svm *svm)
 
int nested_svm_vmrun (struct kvm_vcpu *vcpu)
 
void svm_copy_vmrun_state (struct vmcb_save_area *to_save, struct vmcb_save_area *from_save)
 
void svm_copy_vmloadsave_state (struct vmcb *to_vmcb, struct vmcb *from_vmcb)
 
int nested_svm_vmexit (struct vcpu_svm *svm)
 
static int nested_svm_simple_vmexit (struct vcpu_svm *svm, u32 exit_code)
 
int nested_svm_exit_handled (struct vcpu_svm *svm)
 
int nested_svm_check_permissions (struct kvm_vcpu *vcpu)
 
int nested_svm_check_exception (struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code)
 
int nested_svm_exit_special (struct vcpu_svm *svm)
 
void nested_svm_update_tsc_ratio_msr (struct kvm_vcpu *vcpu)
 
void svm_write_tsc_multiplier (struct kvm_vcpu *vcpu)
 
void nested_copy_vmcb_control_to_cache (struct vcpu_svm *svm, struct vmcb_control_area *control)
 
void nested_copy_vmcb_save_to_cache (struct vcpu_svm *svm, struct vmcb_save_area *save)
 
void nested_sync_control_from_vmcb02 (struct vcpu_svm *svm)
 
void nested_vmcb02_compute_g_pat (struct vcpu_svm *svm)
 
void svm_switch_vmcb (struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
 
bool avic_hardware_setup (void)
 
int avic_ga_log_notifier (u32 ga_tag)
 
void avic_vm_destroy (struct kvm *kvm)
 
int avic_vm_init (struct kvm *kvm)
 
void avic_init_vmcb (struct vcpu_svm *svm, struct vmcb *vmcb)
 
int avic_incomplete_ipi_interception (struct kvm_vcpu *vcpu)
 
int avic_unaccelerated_access_interception (struct kvm_vcpu *vcpu)
 
int avic_init_vcpu (struct vcpu_svm *svm)
 
void avic_vcpu_load (struct kvm_vcpu *vcpu, int cpu)
 
void avic_vcpu_put (struct kvm_vcpu *vcpu)
 
void avic_apicv_post_state_restore (struct kvm_vcpu *vcpu)
 
void avic_refresh_apicv_exec_ctrl (struct kvm_vcpu *vcpu)
 
int avic_pi_update_irte (struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set)
 
void avic_vcpu_blocking (struct kvm_vcpu *vcpu)
 
void avic_vcpu_unblocking (struct kvm_vcpu *vcpu)
 
void avic_ring_doorbell (struct kvm_vcpu *vcpu)
 
unsigned long avic_vcpu_get_apicv_inhibit_reasons (struct kvm_vcpu *vcpu)
 
void avic_refresh_virtual_apic_mode (struct kvm_vcpu *vcpu)
 
void sev_vm_destroy (struct kvm *kvm)
 
int sev_mem_enc_ioctl (struct kvm *kvm, void __user *argp)
 
int sev_mem_enc_register_region (struct kvm *kvm, struct kvm_enc_region *range)
 
int sev_mem_enc_unregister_region (struct kvm *kvm, struct kvm_enc_region *range)
 
int sev_vm_copy_enc_context_from (struct kvm *kvm, unsigned int source_fd)
 
int sev_vm_move_enc_context_from (struct kvm *kvm, unsigned int source_fd)
 
void sev_guest_memory_reclaimed (struct kvm *kvm)
 
void pre_sev_run (struct vcpu_svm *svm, int cpu)
 
void __init sev_set_cpu_caps (void)
 
void __init sev_hardware_setup (void)
 
void sev_hardware_unsetup (void)
 
int sev_cpu_init (struct svm_cpu_data *sd)
 
void sev_init_vmcb (struct vcpu_svm *svm)
 
void sev_vcpu_after_set_cpuid (struct vcpu_svm *svm)
 
void sev_free_vcpu (struct kvm_vcpu *vcpu)
 
int sev_handle_vmgexit (struct kvm_vcpu *vcpu)
 
int sev_es_string_io (struct vcpu_svm *svm, int size, unsigned int port, int in)
 
void sev_es_vcpu_reset (struct vcpu_svm *svm)
 
void sev_vcpu_deliver_sipi_vector (struct kvm_vcpu *vcpu, u8 vector)
 
void sev_es_prepare_switch_to_guest (struct sev_es_save_area *hostsa)
 
void sev_es_unmap_ghcb (struct vcpu_svm *svm)
 
void __svm_sev_es_vcpu_run (struct vcpu_svm *svm, bool spec_ctrl_intercepted)
 
void __svm_vcpu_run (struct vcpu_svm *svm, bool spec_ctrl_intercepted)
 

Variables

u32 msrpm_offsets[MSRPM_OFFSETS__read_mostly
 
bool npt_enabled
 
int nrips
 
int vgif
 
bool intercept_smi
 
bool x2avic_enabled
 
bool vnmi
 
bool dump_invalid_vmcb
 
struct kvm_x86_nested_ops svm_nested_ops
 
unsigned int max_sev_asid
 

Macro Definition Documentation

◆ __sme_page_pa

#define __sme_page_pa (   x)    __sme_set(page_to_pfn(x) << PAGE_SHIFT)

Definition at line 28 of file svm.h.

◆ AVIC_REQUIRED_APICV_INHIBITS

#define AVIC_REQUIRED_APICV_INHIBITS
Value:
( \
BIT(APICV_INHIBIT_REASON_DISABLE) | \
BIT(APICV_INHIBIT_REASON_ABSENT) | \
BIT(APICV_INHIBIT_REASON_HYPERV) | \
BIT(APICV_INHIBIT_REASON_NESTED) | \
BIT(APICV_INHIBIT_REASON_IRQWIN) | \
BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \
BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
BIT(APICV_INHIBIT_REASON_SEV) | \
BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \
BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \
)

Definition at line 628 of file svm.h.

◆ DEBUGCTL_RESERVED_BITS

#define DEBUGCTL_RESERVED_BITS   (~(0x3fULL))

Definition at line 537 of file svm.h.

◆ DEFINE_KVM_GHCB_ACCESSORS

#define DEFINE_KVM_GHCB_ACCESSORS (   field)
Value:
static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
{ \
return test_bit(GHCB_BITMAP_IDX(field), \
(unsigned long *)&svm->sev_es.valid_bitmap); \
} \
\
static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
{ \
return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
} \
Definition: svm.h:209

Definition at line 703 of file svm.h.

◆ GHCB_VERSION_MAX

#define GHCB_VERSION_MAX   1ULL

Definition at line 667 of file svm.h.

◆ GHCB_VERSION_MIN

#define GHCB_VERSION_MIN   1ULL

Definition at line 668 of file svm.h.

◆ IOPM_SIZE

#define IOPM_SIZE   PAGE_SIZE * 3

Definition at line 30 of file svm.h.

◆ MAX_DIRECT_ACCESS_MSRS

#define MAX_DIRECT_ACCESS_MSRS   47

Definition at line 33 of file svm.h.

◆ MSR_INVALID

#define MSR_INVALID   0xffffffffU

Definition at line 535 of file svm.h.

◆ MSRPM_OFFSETS

#define MSRPM_OFFSETS   32

Definition at line 34 of file svm.h.

◆ MSRPM_SIZE

#define MSRPM_SIZE   PAGE_SIZE * 2

Definition at line 31 of file svm.h.

◆ NESTED_EXIT_CONTINUE

#define NESTED_EXIT_CONTINUE   2 /* Further checks needed */

Definition at line 567 of file svm.h.

◆ NESTED_EXIT_DONE

#define NESTED_EXIT_DONE   1 /* Exit caused nested vmexit */

Definition at line 566 of file svm.h.

◆ NESTED_EXIT_HOST

#define NESTED_EXIT_HOST   0 /* Exit handled on host level */

Definition at line 565 of file svm.h.

◆ SVM_REGS_LAZY_LOAD_SET

#define SVM_REGS_LAZY_LOAD_SET   (1 << VCPU_EXREG_PDPTR)

Definition at line 377 of file svm.h.

◆ VMCB_ALL_CLEAN_MASK

#define VMCB_ALL_CLEAN_MASK
Value:
( \
(1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
(1U << VMCB_ASID) | (1U << VMCB_INTR) | \
(1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
(1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
(1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
(1U << VMCB_SW))
@ VMCB_PERM_MAP
Definition: svm.h:51
@ VMCB_DT
Definition: svm.h:57
@ VMCB_AVIC
Definition: svm.h:61
@ VMCB_INTR
Definition: svm.h:53
@ VMCB_CR
Definition: svm.h:55
@ VMCB_NPT
Definition: svm.h:54
@ VMCB_LBR
Definition: svm.h:60
@ VMCB_INTERCEPTS
Definition: svm.h:49
@ VMCB_SW
Definition: svm.h:65
@ VMCB_SEG
Definition: svm.h:58
@ VMCB_CR2
Definition: svm.h:59
@ VMCB_DR
Definition: svm.h:56
@ VMCB_ASID
Definition: svm.h:52

Definition at line 68 of file svm.h.

◆ VMCB_ALWAYS_DIRTY_MASK

#define VMCB_ALWAYS_DIRTY_MASK   ((1U << VMCB_INTR) | (1U << VMCB_CR2))

Definition at line 77 of file svm.h.

Enumeration Type Documentation

◆ anonymous enum

anonymous enum
Enumerator
VMCB_INTERCEPTS 
VMCB_PERM_MAP 
VMCB_ASID 
VMCB_INTR 
VMCB_NPT 
VMCB_CR 
VMCB_DR 
VMCB_DT 
VMCB_SEG 
VMCB_CR2 
VMCB_LBR 
VMCB_AVIC 
VMCB_SW 

Definition at line 48 of file svm.h.

48  {
49  VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
50  pause filter count */
51  VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
52  VMCB_ASID, /* ASID */
53  VMCB_INTR, /* int_ctl, int_vector */
54  VMCB_NPT, /* npt_en, nCR3, gPAT */
55  VMCB_CR, /* CR0, CR3, CR4, EFER */
56  VMCB_DR, /* DR6, DR7 */
57  VMCB_DT, /* GDT, IDT */
58  VMCB_SEG, /* CS, DS, SS, ES, CPL */
59  VMCB_CR2, /* CR2 only */
60  VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
61  VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
62  * AVIC PHYSICAL_TABLE pointer,
63  * AVIC LOGICAL_TABLE pointer
64  */
65  VMCB_SW = 31, /* Reserved for hypervisor/software use */
66 };

Function Documentation

◆ __svm_sev_es_vcpu_run()

void __svm_sev_es_vcpu_run ( struct vcpu_svm svm,
bool  spec_ctrl_intercepted 
)
Here is the caller graph for this function:

◆ __svm_vcpu_run()

void __svm_vcpu_run ( struct vcpu_svm svm,
bool  spec_ctrl_intercepted 
)
Here is the caller graph for this function:

◆ avic_apicv_post_state_restore()

void avic_apicv_post_state_restore ( struct kvm_vcpu *  vcpu)

Definition at line 738 of file avic.c.

739 {
742 }
static void avic_handle_ldr_update(struct kvm_vcpu *vcpu)
Definition: avic.c:610
static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
Definition: avic.c:629
Here is the call graph for this function:
Here is the caller graph for this function:

◆ avic_ga_log_notifier()

int avic_ga_log_notifier ( u32  ga_tag)

Definition at line 143 of file avic.c.

144 {
145  unsigned long flags;
146  struct kvm_svm *kvm_svm;
147  struct kvm_vcpu *vcpu = NULL;
148  u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
149  u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
150 
151  pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
152  trace_kvm_avic_ga_log(vm_id, vcpu_id);
153 
154  spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
155  hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
156  if (kvm_svm->avic_vm_id != vm_id)
157  continue;
158  vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
159  break;
160  }
161  spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
162 
163  /* Note:
164  * At this point, the IOMMU should have already set the pending
165  * bit in the vAPIC backing page. So, we just need to schedule
166  * in the vcpu.
167  */
168  if (vcpu)
169  kvm_vcpu_wake_up(vcpu);
170 
171  return 0;
172 }
#define AVIC_GATAG_TO_VCPUID(x)
Definition: avic.c:47
#define AVIC_GATAG_TO_VMID(x)
Definition: avic.c:46
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3915
Definition: svm.h:95
u32 avic_vm_id
Definition: svm.h:99
struct kvm kvm
Definition: svm.h:96
uint32_t flags
Definition: xen.c:1
Here is the call graph for this function:

◆ avic_hardware_setup()

bool avic_hardware_setup ( void  )

Definition at line 1188 of file avic.c.

1189 {
1190  if (!npt_enabled)
1191  return false;
1192 
1193  /* AVIC is a prerequisite for x2AVIC. */
1194  if (!boot_cpu_has(X86_FEATURE_AVIC) && !force_avic) {
1195  if (boot_cpu_has(X86_FEATURE_X2AVIC)) {
1196  pr_warn(FW_BUG "Cannot support x2AVIC due to AVIC is disabled");
1197  pr_warn(FW_BUG "Try enable AVIC using force_avic option");
1198  }
1199  return false;
1200  }
1201 
1202  if (boot_cpu_has(X86_FEATURE_AVIC)) {
1203  pr_info("AVIC enabled\n");
1204  } else if (force_avic) {
1205  /*
1206  * Some older systems does not advertise AVIC support.
1207  * See Revision Guide for specific AMD processor for more detail.
1208  */
1209  pr_warn("AVIC is not supported in CPUID but force enabled");
1210  pr_warn("Your system might crash and burn");
1211  }
1212 
1213  /* AVIC is a prerequisite for x2AVIC. */
1214  x2avic_enabled = boot_cpu_has(X86_FEATURE_X2AVIC);
1215  if (x2avic_enabled)
1216  pr_info("x2AVIC enabled\n");
1217 
1218  amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1219 
1220  return true;
1221 }
static bool force_avic
Definition: avic.c:60
bool x2avic_enabled
Definition: avic.c:75
int avic_ga_log_notifier(u32 ga_tag)
Definition: avic.c:143
bool npt_enabled
Definition: svm.c:198
Here is the caller graph for this function:

◆ avic_incomplete_ipi_interception()

int avic_incomplete_ipi_interception ( struct kvm_vcpu *  vcpu)

Definition at line 490 of file avic.c.

491 {
492  struct vcpu_svm *svm = to_svm(vcpu);
493  u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
494  u32 icrl = svm->vmcb->control.exit_info_1;
495  u32 id = svm->vmcb->control.exit_info_2 >> 32;
496  u32 index = svm->vmcb->control.exit_info_2 & 0x1FF;
497  struct kvm_lapic *apic = vcpu->arch.apic;
498 
499  trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
500 
501  switch (id) {
502  case AVIC_IPI_FAILURE_INVALID_TARGET:
503  case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
504  /*
505  * Emulate IPIs that are not handled by AVIC hardware, which
506  * only virtualizes Fixed, Edge-Triggered INTRs, and falls over
507  * if _any_ targets are invalid, e.g. if the logical mode mask
508  * is a superset of running vCPUs.
509  *
510  * The exit is a trap, e.g. ICR holds the correct value and RIP
511  * has been advanced, KVM is responsible only for emulating the
512  * IPI. Sadly, hardware may sometimes leave the BUSY flag set,
513  * in which case KVM needs to emulate the ICR write as well in
514  * order to clear the BUSY flag.
515  */
516  if (icrl & APIC_ICR_BUSY)
517  kvm_apic_write_nodecode(vcpu, APIC_ICR);
518  else
519  kvm_apic_send_ipi(apic, icrl, icrh);
520  break;
521  case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING:
522  /*
523  * At this point, we expect that the AVIC HW has already
524  * set the appropriate IRR bits on the valid target
525  * vcpus. So, we just need to kick the appropriate vcpu.
526  */
527  avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh, index);
528  break;
529  case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
530  WARN_ONCE(1, "Invalid backing page\n");
531  break;
532  case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
533  /* Invalid IPI with vector < 16 */
534  break;
535  default:
536  vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
537  }
538 
539  return 1;
540 }
static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source, u32 icrl, u32 icrh, u32 index)
Definition: avic.c:465
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
Definition: lapic.c:2446
void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
Definition: lapic.c:1504
struct kvm_vcpu * vcpu
Definition: lapic.h:64
struct vmcb * vmcb
Definition: svm.h:212
struct kvm_vcpu vcpu
Definition: svm.h:210
static __always_inline struct vcpu_svm * to_svm(struct kvm_vcpu *vcpu)
Definition: svm.h:364
Here is the call graph for this function:

◆ avic_init_vcpu()

int avic_init_vcpu ( struct vcpu_svm svm)

Definition at line 719 of file avic.c.

720 {
721  int ret;
722  struct kvm_vcpu *vcpu = &svm->vcpu;
723 
724  if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
725  return 0;
726 
727  ret = avic_init_backing_page(vcpu);
728  if (ret)
729  return ret;
730 
731  INIT_LIST_HEAD(&svm->ir_list);
732  spin_lock_init(&svm->ir_list_lock);
733  svm->dfr_reg = APIC_DFR_FLAT;
734 
735  return ret;
736 }
#define irqchip_in_kernel(k)
Definition: arm_vgic.h:392
static int avic_init_backing_page(struct kvm_vcpu *vcpu)
Definition: avic.c:277
u32 dfr_reg
Definition: svm.h:268
struct list_head ir_list
Definition: svm.h:278
spinlock_t ir_list_lock
Definition: svm.h:279
bool __read_mostly enable_apicv
Definition: x86.c:235
Here is the call graph for this function:
Here is the caller graph for this function:

◆ avic_init_vmcb()

void avic_init_vmcb ( struct vcpu_svm svm,
struct vmcb *  vmcb 
)

Definition at line 244 of file avic.c.

245 {
246  struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
247  phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
248  phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
249  phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
250 
251  vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
252  vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
253  vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
254  vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK;
255 
256  if (kvm_apicv_activated(svm->vcpu.kvm))
257  avic_activate_vmcb(svm);
258  else
260 }
static void avic_activate_vmcb(struct vcpu_svm *svm)
Definition: avic.c:85
static void avic_deactivate_vmcb(struct vcpu_svm *svm)
Definition: avic.c:120
struct page * avic_logical_id_table_page
Definition: svm.h:100
struct page * avic_physical_id_table_page
Definition: svm.h:101
struct page * avic_backing_page
Definition: svm.h:269
static __always_inline struct kvm_svm * to_kvm_svm(struct kvm *kvm)
Definition: svm.h:316
bool kvm_apicv_activated(struct kvm *kvm)
Definition: x86.c:9935
Here is the call graph for this function:
Here is the caller graph for this function:

◆ avic_pi_update_irte()

int avic_pi_update_irte ( struct kvm *  kvm,
unsigned int  host_irq,
uint32_t  guest_irq,
bool  set 
)

Here, we setup with legacy mode in the following cases:

  1. When cannot target interrupt to a specific vcpu.
  2. Unsetting posted interrupt.
  3. APIC virtualization is disabled for the vcpu.
  4. IRQ has incompatible delivery mode (SMI, INIT, etc)

Here, we successfully setting up vcpu affinity in IOMMU guest mode. Now, we need to store the posted interrupt information in a per-vcpu ir_list so that we can reference to them directly when we update vcpu scheduling information in IOMMU irte.

Here, pi is used to:

  • Tell IOMMU to use legacy mode for this interrupt.
  • Retrieve ga_tag of prior interrupt remapping data.

Check if the posted interrupt was previously setup with the guest_mode by checking if the ga_tag was cached. If so, we need to clean up the per-vcpu ir_list.

Definition at line 894 of file avic.c.

896 {
897  struct kvm_kernel_irq_routing_entry *e;
898  struct kvm_irq_routing_table *irq_rt;
899  int idx, ret = 0;
900 
901  if (!kvm_arch_has_assigned_device(kvm) ||
902  !irq_remapping_cap(IRQ_POSTING_CAP))
903  return 0;
904 
905  pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
906  __func__, host_irq, guest_irq, set);
907 
908  idx = srcu_read_lock(&kvm->irq_srcu);
909  irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
910 
911  if (guest_irq >= irq_rt->nr_rt_entries ||
912  hlist_empty(&irq_rt->map[guest_irq])) {
913  pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
914  guest_irq, irq_rt->nr_rt_entries);
915  goto out;
916  }
917 
918  hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
919  struct vcpu_data vcpu_info;
920  struct vcpu_svm *svm = NULL;
921 
922  if (e->type != KVM_IRQ_ROUTING_MSI)
923  continue;
924 
925  /**
926  * Here, we setup with legacy mode in the following cases:
927  * 1. When cannot target interrupt to a specific vcpu.
928  * 2. Unsetting posted interrupt.
929  * 3. APIC virtualization is disabled for the vcpu.
930  * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
931  */
932  if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
933  kvm_vcpu_apicv_active(&svm->vcpu)) {
934  struct amd_iommu_pi_data pi;
935 
936  /* Try to enable guest_mode in IRTE */
937  pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
938  AVIC_HPA_MASK);
939  pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
940  svm->vcpu.vcpu_id);
941  pi.is_guest_mode = true;
942  pi.vcpu_data = &vcpu_info;
943  ret = irq_set_vcpu_affinity(host_irq, &pi);
944 
945  /**
946  * Here, we successfully setting up vcpu affinity in
947  * IOMMU guest mode. Now, we need to store the posted
948  * interrupt information in a per-vcpu ir_list so that
949  * we can reference to them directly when we update vcpu
950  * scheduling information in IOMMU irte.
951  */
952  if (!ret && pi.is_guest_mode)
953  svm_ir_list_add(svm, &pi);
954  } else {
955  /* Use legacy mode in IRTE */
956  struct amd_iommu_pi_data pi;
957 
958  /**
959  * Here, pi is used to:
960  * - Tell IOMMU to use legacy mode for this interrupt.
961  * - Retrieve ga_tag of prior interrupt remapping data.
962  */
963  pi.prev_ga_tag = 0;
964  pi.is_guest_mode = false;
965  ret = irq_set_vcpu_affinity(host_irq, &pi);
966 
967  /**
968  * Check if the posted interrupt was previously
969  * setup with the guest_mode by checking if the ga_tag
970  * was cached. If so, we need to clean up the per-vcpu
971  * ir_list.
972  */
973  if (!ret && pi.prev_ga_tag) {
974  int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
975  struct kvm_vcpu *vcpu;
976 
977  vcpu = kvm_get_vcpu_by_id(kvm, id);
978  if (vcpu)
979  svm_ir_list_del(to_svm(vcpu), &pi);
980  }
981  }
982 
983  if (!ret && svm) {
984  trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
985  e->gsi, vcpu_info.vector,
986  vcpu_info.pi_desc_addr, set);
987  }
988 
989  if (ret < 0) {
990  pr_err("%s: failed to update PI IRTE\n", __func__);
991  goto out;
992  }
993  }
994 
995  ret = 0;
996 out:
997  srcu_read_unlock(&kvm->irq_srcu, idx);
998  return ret;
999 }
static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
Definition: avic.c:792
static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
Definition: avic.c:776
static int get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
Definition: avic.c:861
#define AVIC_GATAG(vm_id, vcpu_id)
Definition: avic.c:51
static bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
Definition: lapic.h:226
bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
Definition: x86.c:13419
Here is the call graph for this function:

◆ avic_refresh_apicv_exec_ctrl()

void avic_refresh_apicv_exec_ctrl ( struct kvm_vcpu *  vcpu)

Definition at line 1136 of file avic.c.

1137 {
1138  bool activated = kvm_vcpu_apicv_active(vcpu);
1139 
1140  if (!enable_apicv)
1141  return;
1142 
1144 
1145  if (activated)
1146  avic_vcpu_load(vcpu, vcpu->cpu);
1147  else
1148  avic_vcpu_put(vcpu);
1149 
1150  avic_set_pi_irte_mode(vcpu, activated);
1151 }
void avic_vcpu_put(struct kvm_vcpu *vcpu)
Definition: avic.c:1072
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
Definition: avic.c:1112
static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
Definition: avic.c:744
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Definition: avic.c:1028
Here is the call graph for this function:

◆ avic_refresh_virtual_apic_mode()

void avic_refresh_virtual_apic_mode ( struct kvm_vcpu *  vcpu)

During AVIC temporary deactivation, guest could update APIC ID, DFR and LDR registers, which would not be trapped by avic_unaccelerated_access_interception(). In this case, we need to check and update the AVIC logical APIC ID table accordingly before re-activating.

Definition at line 1112 of file avic.c.

1113 {
1114  struct vcpu_svm *svm = to_svm(vcpu);
1115  struct vmcb *vmcb = svm->vmcb01.ptr;
1116 
1117  if (!lapic_in_kernel(vcpu) || !enable_apicv)
1118  return;
1119 
1120  if (kvm_vcpu_apicv_active(vcpu)) {
1121  /**
1122  * During AVIC temporary deactivation, guest could update
1123  * APIC ID, DFR and LDR registers, which would not be trapped
1124  * by avic_unaccelerated_access_interception(). In this case,
1125  * we need to check and update the AVIC logical APIC ID table
1126  * accordingly before re-activating.
1127  */
1129  avic_activate_vmcb(svm);
1130  } else {
1131  avic_deactivate_vmcb(svm);
1132  }
1133  vmcb_mark_dirty(vmcb, VMCB_AVIC);
1134 }
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
Definition: avic.c:738
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
Definition: lapic.h:186
struct vmcb * ptr
Definition: svm.h:110
struct kvm_vmcb_info vmcb01
Definition: svm.h:213
static void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
Definition: svm.h:354
Here is the call graph for this function:
Here is the caller graph for this function:

◆ avic_ring_doorbell()

void avic_ring_doorbell ( struct kvm_vcpu *  vcpu)

Definition at line 321 of file avic.c.

322 {
323  /*
324  * Note, the vCPU could get migrated to a different pCPU at any point,
325  * which could result in signalling the wrong/previous pCPU. But if
326  * that happens the vCPU is guaranteed to do a VMRUN (after being
327  * migrated) and thus will process pending interrupts, i.e. a doorbell
328  * is not needed (and the spurious one is harmless).
329  */
330  int cpu = READ_ONCE(vcpu->cpu);
331 
332  if (cpu != get_cpu()) {
333  wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
334  trace_kvm_avic_doorbell(vcpu->vcpu_id, kvm_cpu_get_apicid(cpu));
335  }
336  put_cpu();
337 }
Here is the caller graph for this function:

◆ avic_unaccelerated_access_interception()

int avic_unaccelerated_access_interception ( struct kvm_vcpu *  vcpu)

Definition at line 693 of file avic.c.

694 {
695  struct vcpu_svm *svm = to_svm(vcpu);
696  int ret = 0;
697  u32 offset = svm->vmcb->control.exit_info_1 &
698  AVIC_UNACCEL_ACCESS_OFFSET_MASK;
699  u32 vector = svm->vmcb->control.exit_info_2 &
700  AVIC_UNACCEL_ACCESS_VECTOR_MASK;
701  bool write = (svm->vmcb->control.exit_info_1 >> 32) &
702  AVIC_UNACCEL_ACCESS_WRITE_MASK;
703  bool trap = is_avic_unaccelerated_access_trap(offset);
704 
705  trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
706  trap, write, vector);
707  if (trap) {
708  /* Handling Trap */
709  WARN_ONCE(!write, "svm: Handling trap read.\n");
711  } else {
712  /* Handling Fault */
713  ret = kvm_emulate_instruction(vcpu, 0);
714  }
715 
716  return ret;
717 }
static bool is_avic_unaccelerated_access_trap(u32 offset)
Definition: avic.c:664
static int avic_unaccel_trap_write(struct kvm_vcpu *vcpu)
Definition: avic.c:641
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
Definition: x86.c:9262
Here is the call graph for this function:

◆ avic_vcpu_blocking()

void avic_vcpu_blocking ( struct kvm_vcpu *  vcpu)

Definition at line 1153 of file avic.c.

1154 {
1155  if (!kvm_vcpu_apicv_active(vcpu))
1156  return;
1157 
1158  /*
1159  * Unload the AVIC when the vCPU is about to block, _before_
1160  * the vCPU actually blocks.
1161  *
1162  * Any IRQs that arrive before IsRunning=0 will not cause an
1163  * incomplete IPI vmexit on the source, therefore vIRR will also
1164  * be checked by kvm_vcpu_check_block() before blocking. The
1165  * memory barrier implicit in set_current_state orders writing
1166  * IsRunning=0 before reading the vIRR. The processor needs a
1167  * matching memory barrier on interrupt delivery between writing
1168  * IRR and reading IsRunning; the lack of this barrier might be
1169  * the cause of errata #1235).
1170  */
1171  avic_vcpu_put(vcpu);
1172 }
Here is the call graph for this function:

◆ avic_vcpu_get_apicv_inhibit_reasons()

unsigned long avic_vcpu_get_apicv_inhibit_reasons ( struct kvm_vcpu *  vcpu)

Definition at line 542 of file avic.c.

543 {
544  if (is_guest_mode(vcpu))
545  return APICV_INHIBIT_REASON_NESTED;
546  return 0;
547 }
static bool is_guest_mode(struct kvm_vcpu *vcpu)
Here is the call graph for this function:

◆ avic_vcpu_load()

void avic_vcpu_load ( struct kvm_vcpu *  vcpu,
int  cpu 
)

Definition at line 1028 of file avic.c.

1029 {
1030  u64 entry;
1031  int h_physical_id = kvm_cpu_get_apicid(cpu);
1032  struct vcpu_svm *svm = to_svm(vcpu);
1033  unsigned long flags;
1034 
1035  lockdep_assert_preemption_disabled();
1036 
1037  if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
1038  return;
1039 
1040  /*
1041  * No need to update anything if the vCPU is blocking, i.e. if the vCPU
1042  * is being scheduled in after being preempted. The CPU entries in the
1043  * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
1044  * If the vCPU was migrated, its new CPU value will be stuffed when the
1045  * vCPU unblocks.
1046  */
1047  if (kvm_vcpu_is_blocking(vcpu))
1048  return;
1049 
1050  /*
1051  * Grab the per-vCPU interrupt remapping lock even if the VM doesn't
1052  * _currently_ have assigned devices, as that can change. Holding
1053  * ir_list_lock ensures that either svm_ir_list_add() will consume
1054  * up-to-date entry information, or that this task will wait until
1055  * svm_ir_list_add() completes to set the new target pCPU.
1056  */
1057  spin_lock_irqsave(&svm->ir_list_lock, flags);
1058 
1059  entry = READ_ONCE(*(svm->avic_physical_id_cache));
1060  WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1061 
1062  entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1063  entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1064  entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1065 
1066  WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1067  avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
1068 
1069  spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1070 }
static int avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
Definition: avic.c:1002
u64 * avic_physical_id_cache
Definition: svm.h:270
Here is the call graph for this function:
Here is the caller graph for this function:

◆ avic_vcpu_put()

void avic_vcpu_put ( struct kvm_vcpu *  vcpu)

Definition at line 1072 of file avic.c.

1073 {
1074  u64 entry;
1075  struct vcpu_svm *svm = to_svm(vcpu);
1076  unsigned long flags;
1077 
1078  lockdep_assert_preemption_disabled();
1079 
1080  /*
1081  * Note, reading the Physical ID entry outside of ir_list_lock is safe
1082  * as only the pCPU that has loaded (or is loading) the vCPU is allowed
1083  * to modify the entry, and preemption is disabled. I.e. the vCPU
1084  * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
1085  * recursively.
1086  */
1087  entry = READ_ONCE(*(svm->avic_physical_id_cache));
1088 
1089  /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
1090  if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
1091  return;
1092 
1093  /*
1094  * Take and hold the per-vCPU interrupt remapping lock while updating
1095  * the Physical ID entry even though the lock doesn't protect against
1096  * multiple writers (see above). Holding ir_list_lock ensures that
1097  * either svm_ir_list_add() will consume up-to-date entry information,
1098  * or that this task will wait until svm_ir_list_add() completes to
1099  * mark the vCPU as not running.
1100  */
1101  spin_lock_irqsave(&svm->ir_list_lock, flags);
1102 
1104 
1105  entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1106  WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1107 
1108  spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1109 
1110 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ avic_vcpu_unblocking()

void avic_vcpu_unblocking ( struct kvm_vcpu *  vcpu)

Definition at line 1174 of file avic.c.

1175 {
1176  if (!kvm_vcpu_apicv_active(vcpu))
1177  return;
1178 
1179  avic_vcpu_load(vcpu, vcpu->cpu);
1180 }
Here is the call graph for this function:

◆ avic_vm_destroy()

void avic_vm_destroy ( struct kvm *  kvm)

Definition at line 174 of file avic.c.

175 {
176  unsigned long flags;
177  struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
178 
179  if (!enable_apicv)
180  return;
181 
183  __free_page(kvm_svm->avic_logical_id_table_page);
185  __free_page(kvm_svm->avic_physical_id_table_page);
186 
187  spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
188  hash_del(&kvm_svm->hnode);
189  spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
190 }
struct hlist_node hnode
Definition: svm.h:102
Here is the call graph for this function:
Here is the caller graph for this function:

◆ avic_vm_init()

int avic_vm_init ( struct kvm *  kvm)

Definition at line 192 of file avic.c.

193 {
194  unsigned long flags;
195  int err = -ENOMEM;
196  struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
197  struct kvm_svm *k2;
198  struct page *p_page;
199  struct page *l_page;
200  u32 vm_id;
201 
202  if (!enable_apicv)
203  return 0;
204 
205  /* Allocating physical APIC ID table (4KB) */
206  p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
207  if (!p_page)
208  goto free_avic;
209 
211 
212  /* Allocating logical APIC ID table (4KB) */
213  l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
214  if (!l_page)
215  goto free_avic;
216 
218 
219  spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
220  again:
221  vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
222  if (vm_id == 0) { /* id is 1-based, zero is not okay */
223  next_vm_id_wrapped = 1;
224  goto again;
225  }
226  /* Is it still in use? Only possible if wrapped at least once */
227  if (next_vm_id_wrapped) {
228  hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
229  if (k2->avic_vm_id == vm_id)
230  goto again;
231  }
232  }
233  kvm_svm->avic_vm_id = vm_id;
234  hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
235  spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
236 
237  return 0;
238 
239 free_avic:
240  avic_vm_destroy(kvm);
241  return err;
242 }
static bool next_vm_id_wrapped
Definition: avic.c:73
void avic_vm_destroy(struct kvm *kvm)
Definition: avic.c:174
#define AVIC_VM_ID_MASK
Definition: avic.c:44
static u32 next_vm_id
Definition: avic.c:72
Here is the call graph for this function:

◆ clr_exception_intercept()

static void clr_exception_intercept ( struct vcpu_svm svm,
u32  bit 
)
inlinestatic

Definition at line 413 of file svm.h.

414 {
415  struct vmcb *vmcb = svm->vmcb01.ptr;
416 
417  WARN_ON_ONCE(bit >= 32);
418  vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
419 
420  recalc_intercepts(svm);
421 }
static void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
Definition: svm.h:385
void recalc_intercepts(struct vcpu_svm *svm)
Definition: nested.c:122
Here is the call graph for this function:
Here is the caller graph for this function:

◆ DECLARE_PER_CPU()

DECLARE_PER_CPU ( struct svm_cpu_data  ,
svm_data   
)

◆ disable_gif()

static void disable_gif ( struct vcpu_svm svm)
inlinestatic

Definition at line 473 of file svm.h.

474 {
475  struct vmcb *vmcb = get_vgif_vmcb(svm);
476 
477  if (vmcb)
478  vmcb->control.int_ctl &= ~V_GIF_MASK;
479  else
480  svm->guest_gif = false;
481 }
bool guest_gif
Definition: svm.h:294
static struct vmcb * get_vgif_vmcb(struct vcpu_svm *svm)
Definition: svm.h:452
Here is the call graph for this function:
Here is the caller graph for this function:

◆ disable_nmi_singlestep()

void disable_nmi_singlestep ( struct vcpu_svm svm)

Definition at line 1054 of file svm.c.

1055 {
1056  svm->nmi_singlestep = false;
1057 
1058  if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1059  /* Clear our flags if they were not set by the guest */
1060  if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1061  svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1062  if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1063  svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1064  }
1065 }
bool nmi_singlestep
Definition: svm.h:257
u64 nmi_singlestep_guest_rflags
Definition: svm.h:258
Here is the caller graph for this function:

◆ enable_gif()

static void enable_gif ( struct vcpu_svm svm)
inlinestatic

Definition at line 463 of file svm.h.

464 {
465  struct vmcb *vmcb = get_vgif_vmcb(svm);
466 
467  if (vmcb)
468  vmcb->control.int_ctl |= V_GIF_MASK;
469  else
470  svm->guest_gif = true;
471 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ enter_svm_guest_mode()

int enter_svm_guest_mode ( struct kvm_vcpu *  vcpu,
u64  vmcb_gpa,
struct vmcb *  vmcb12,
bool  from_vmrun 
)

Definition at line 785 of file nested.c.

787 {
788  struct vcpu_svm *svm = to_svm(vcpu);
789  int ret;
790 
791  trace_kvm_nested_vmenter(svm->vmcb->save.rip,
792  vmcb12_gpa,
793  vmcb12->save.rip,
794  vmcb12->control.int_ctl,
795  vmcb12->control.event_inj,
796  vmcb12->control.nested_ctl,
797  vmcb12->control.nested_cr3,
798  vmcb12->save.cr3,
799  KVM_ISA_SVM);
800 
801  trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
802  vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
803  vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
804  vmcb12->control.intercepts[INTERCEPT_WORD3],
805  vmcb12->control.intercepts[INTERCEPT_WORD4],
806  vmcb12->control.intercepts[INTERCEPT_WORD5]);
807 
808 
809  svm->nested.vmcb12_gpa = vmcb12_gpa;
810 
811  WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
812 
814 
815  svm_switch_vmcb(svm, &svm->nested.vmcb02);
816  nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
817  nested_vmcb02_prepare_save(svm, vmcb12);
818 
819  ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
820  nested_npt_enabled(svm), from_vmrun);
821  if (ret)
822  return ret;
823 
824  if (!from_vmrun)
825  kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
826 
827  svm_set_gif(svm, true);
828 
830  kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
831 
833 
834  return 0;
835 }
u64 vmcb12_gpa
Definition: svm.h:162
struct vmcb_save_area_cached save
Definition: svm.h:179
struct kvm_vmcb_info vmcb02
Definition: svm.h:159
struct svm_nested_state nested
Definition: svm.h:238
static void nested_svm_hv_update_vm_vp_ids(struct kvm_vcpu *vcpu)
Definition: hyperv.h:46
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_npt, bool reload_pdptrs)
Definition: nested.c:507
static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, unsigned long vmcb12_rip, unsigned long vmcb12_csbase)
Definition: nested.c:632
static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Definition: nested.c:773
static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
Definition: nested.c:537
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
Definition: svm.c:1407
void svm_set_gif(struct vcpu_svm *svm, bool value)
Definition: svm.c:2406
static bool nested_npt_enabled(struct vcpu_svm *svm)
Definition: svm.h:493
#define KVM_ISA_SVM
Definition: trace.h:284
Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_vgif_vmcb()

static struct vmcb* get_vgif_vmcb ( struct vcpu_svm svm)
inlinestatic

Definition at line 452 of file svm.h.

453 {
454  if (!vgif)
455  return NULL;
456 
457  if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
458  return svm->nested.vmcb02.ptr;
459  else
460  return svm->vmcb01.ptr;
461 }
int vgif
Definition: svm.c:214
static bool nested_vgif_enabled(struct vcpu_svm *svm)
Definition: svm.h:446
Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_vnmi_vmcb_l1()

static struct vmcb* get_vnmi_vmcb_l1 ( struct vcpu_svm svm)
inlinestatic

Definition at line 513 of file svm.h.

514 {
515  if (!vnmi)
516  return NULL;
517 
518  if (is_guest_mode(&svm->vcpu))
519  return NULL;
520  else
521  return svm->vmcb01.ptr;
522 }
bool vnmi
Definition: svm.c:238
Here is the call graph for this function:
Here is the caller graph for this function:

◆ gif_set()

static bool gif_set ( struct vcpu_svm svm)
inlinestatic

Definition at line 483 of file svm.h.

484 {
485  struct vmcb *vmcb = get_vgif_vmcb(svm);
486 
487  if (vmcb)
488  return !!(vmcb->control.int_ctl & V_GIF_MASK);
489  else
490  return svm->guest_gif;
491 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_vnmi_enabled()

static bool is_vnmi_enabled ( struct vcpu_svm svm)
inlinestatic

Definition at line 524 of file svm.h.

525 {
526  struct vmcb *vmcb = get_vnmi_vmcb_l1(svm);
527 
528  if (vmcb)
529  return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK);
530  else
531  return false;
532 }
static struct vmcb * get_vnmi_vmcb_l1(struct vcpu_svm *svm)
Definition: svm.h:513
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_x2apic_msrpm_offset()

static bool is_x2apic_msrpm_offset ( u32  offset)
inlinestatic

Definition at line 504 of file svm.h.

505 {
506  /* 4 msrs per u8, and 4 u8 in u32 */
507  u32 msr = offset * 16;
508 
509  return (msr >= APIC_BASE_MSR) &&
510  (msr < (APIC_BASE_MSR + 0x100));
511 }
Here is the caller graph for this function:

◆ nested_copy_vmcb_control_to_cache()

void nested_copy_vmcb_control_to_cache ( struct vcpu_svm svm,
struct vmcb_control_area *  control 
)

Definition at line 382 of file nested.c.

384 {
386 }
u64 control
Definition: posted_intr.h:16
struct vmcb_ctrl_area_cached ctl
Definition: svm.h:173
static void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu, struct vmcb_ctrl_area_cached *to, struct vmcb_control_area *from)
Definition: nested.c:336
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_copy_vmcb_save_to_cache()

void nested_copy_vmcb_save_to_cache ( struct vcpu_svm svm,
struct vmcb_save_area *  save 
)

Definition at line 404 of file nested.c.

406 {
408 }
static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to, struct vmcb_save_area *from)
Definition: nested.c:388
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_exit_on_intr()

static bool nested_exit_on_intr ( struct vcpu_svm svm)
inlinestatic

Definition at line 581 of file svm.h.

582 {
583  return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
584 }
static bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
Definition: svm.h:397
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_exit_on_nmi()

static bool nested_exit_on_nmi ( struct vcpu_svm svm)
inlinestatic

Definition at line 586 of file svm.h.

587 {
588  return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
589 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_exit_on_smi()

static bool nested_exit_on_smi ( struct vcpu_svm svm)
inlinestatic

Definition at line 576 of file svm.h.

577 {
578  return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
579 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_npt_enabled()

static bool nested_npt_enabled ( struct vcpu_svm svm)
inlinestatic

Definition at line 493 of file svm.h.

494 {
495  return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
496 }
Here is the caller graph for this function:

◆ nested_svm_check_exception()

int nested_svm_check_exception ( struct vcpu_svm svm,
unsigned  nr,
bool  has_error_code,
u32  error_code 
)

◆ nested_svm_check_permissions()

int nested_svm_check_permissions ( struct kvm_vcpu *  vcpu)

Definition at line 1357 of file nested.c.

1358 {
1359  if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1360  kvm_queue_exception(vcpu, UD_VECTOR);
1361  return 1;
1362  }
1363 
1364  if (to_svm(vcpu)->vmcb->save.cpl) {
1365  kvm_inject_gp(vcpu, 0);
1366  return 1;
1367  }
1368 
1369  return 0;
1370 }
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
Definition: x86.c:731
static bool is_paging(struct kvm_vcpu *vcpu)
Definition: x86.h:198
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_svm_exit_handled()

int nested_svm_exit_handled ( struct vcpu_svm svm)

Definition at line 1345 of file nested.c.

1346 {
1347  int vmexit;
1348 
1349  vmexit = nested_svm_intercept(svm);
1350 
1351  if (vmexit == NESTED_EXIT_DONE)
1352  nested_svm_vmexit(svm);
1353 
1354  return vmexit;
1355 }
int nested_svm_vmexit(struct vcpu_svm *svm)
Definition: nested.c:967
static int nested_svm_intercept(struct vcpu_svm *svm)
Definition: nested.c:1301
#define NESTED_EXIT_DONE
Definition: svm.h:566
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_svm_exit_special()

int nested_svm_exit_special ( struct vcpu_svm svm)

Definition at line 1496 of file nested.c.

1497 {
1498  u32 exit_code = svm->vmcb->control.exit_code;
1499  struct kvm_vcpu *vcpu = &svm->vcpu;
1500 
1501  switch (exit_code) {
1502  case SVM_EXIT_INTR:
1503  case SVM_EXIT_NMI:
1504  case SVM_EXIT_NPF:
1505  return NESTED_EXIT_HOST;
1506  case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1507  u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1508 
1509  if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1510  excp_bits)
1511  return NESTED_EXIT_HOST;
1512  else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1513  svm->vcpu.arch.apf.host_apf_flags)
1514  /* Trap async PF even if not shadowing */
1515  return NESTED_EXIT_HOST;
1516  break;
1517  }
1518  case SVM_EXIT_VMMCALL:
1519  /* Hyper-V L2 TLB flush hypercall is handled by L0 */
1520  if (guest_hv_cpuid_has_l2_tlb_flush(vcpu) &&
1523  return NESTED_EXIT_HOST;
1524  break;
1525  default:
1526  break;
1527  }
1528 
1529  return NESTED_EXIT_CONTINUE;
1530 }
static bool kvm_hv_is_tlb_flush_hcall(struct kvm_vcpu *vcpu)
Definition: hyperv.h:308
static bool guest_hv_cpuid_has_l2_tlb_flush(struct kvm_vcpu *vcpu)
Definition: hyperv.h:312
static bool nested_svm_l2_tlb_flush_enabled(struct kvm_vcpu *vcpu)
Definition: hyperv.h:47
#define NESTED_EXIT_HOST
Definition: svm.h:565
#define NESTED_EXIT_CONTINUE
Definition: svm.h:567
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_svm_simple_vmexit()

static int nested_svm_simple_vmexit ( struct vcpu_svm svm,
u32  exit_code 
)
inlinestatic

Definition at line 602 of file svm.h.

603 {
604  svm->vmcb->control.exit_code = exit_code;
605  svm->vmcb->control.exit_info_1 = 0;
606  svm->vmcb->control.exit_info_2 = 0;
607  return nested_svm_vmexit(svm);
608 }
int nested_svm_vmexit(struct vcpu_svm *svm)
Definition: nested.c:967
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_svm_update_tsc_ratio_msr()

void nested_svm_update_tsc_ratio_msr ( struct kvm_vcpu *  vcpu)

Definition at line 1532 of file nested.c.

1533 {
1534  struct vcpu_svm *svm = to_svm(vcpu);
1535 
1536  vcpu->arch.tsc_scaling_ratio =
1537  kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
1538  svm->tsc_ratio_msr);
1540 }
u64 tsc_ratio_msr
Definition: svm.h:226
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
Definition: svm.c:1163
u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
Definition: x86.c:2605
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_svm_virtualize_tpr()

static bool nested_svm_virtualize_tpr ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 569 of file svm.h.

570 {
571  struct vcpu_svm *svm = to_svm(vcpu);
572 
573  return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
574 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_svm_vmexit()

int nested_svm_vmexit ( struct vcpu_svm svm)

Definition at line 967 of file nested.c.

968 {
969  struct kvm_vcpu *vcpu = &svm->vcpu;
970  struct vmcb *vmcb01 = svm->vmcb01.ptr;
971  struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
972  struct vmcb *vmcb12;
973  struct kvm_host_map map;
974  int rc;
975 
976  rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
977  if (rc) {
978  if (rc == -EINVAL)
979  kvm_inject_gp(vcpu, 0);
980  return 1;
981  }
982 
983  vmcb12 = map.hva;
984 
985  /* Exit Guest-Mode */
986  leave_guest_mode(vcpu);
987  svm->nested.vmcb12_gpa = 0;
988  WARN_ON_ONCE(svm->nested.nested_run_pending);
989 
990  kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
991 
992  /* in case we halted in L2 */
993  svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
994 
995  /* Give the current vmcb to the guest */
996 
997  vmcb12->save.es = vmcb02->save.es;
998  vmcb12->save.cs = vmcb02->save.cs;
999  vmcb12->save.ss = vmcb02->save.ss;
1000  vmcb12->save.ds = vmcb02->save.ds;
1001  vmcb12->save.gdtr = vmcb02->save.gdtr;
1002  vmcb12->save.idtr = vmcb02->save.idtr;
1003  vmcb12->save.efer = svm->vcpu.arch.efer;
1004  vmcb12->save.cr0 = kvm_read_cr0(vcpu);
1005  vmcb12->save.cr3 = kvm_read_cr3(vcpu);
1006  vmcb12->save.cr2 = vmcb02->save.cr2;
1007  vmcb12->save.cr4 = svm->vcpu.arch.cr4;
1008  vmcb12->save.rflags = kvm_get_rflags(vcpu);
1009  vmcb12->save.rip = kvm_rip_read(vcpu);
1010  vmcb12->save.rsp = kvm_rsp_read(vcpu);
1011  vmcb12->save.rax = kvm_rax_read(vcpu);
1012  vmcb12->save.dr7 = vmcb02->save.dr7;
1013  vmcb12->save.dr6 = svm->vcpu.arch.dr6;
1014  vmcb12->save.cpl = vmcb02->save.cpl;
1015 
1016  vmcb12->control.int_state = vmcb02->control.int_state;
1017  vmcb12->control.exit_code = vmcb02->control.exit_code;
1018  vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
1019  vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
1020  vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
1021 
1022  if (vmcb12->control.exit_code != SVM_EXIT_ERR)
1024 
1025  if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
1026  vmcb12->control.next_rip = vmcb02->control.next_rip;
1027 
1028  vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
1029  vmcb12->control.event_inj = svm->nested.ctl.event_inj;
1030  vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
1031 
1032  if (!kvm_pause_in_guest(vcpu->kvm)) {
1033  vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1035 
1036  }
1037 
1039 
1040  svm_switch_vmcb(svm, &svm->vmcb01);
1041 
1042  /*
1043  * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01:
1044  *
1045  * V_IRQ, V_IRQ_VECTOR, V_INTR_PRIO_MASK, V_IGN_TPR: If L1 doesn't
1046  * intercept interrupts, then KVM will use vmcb02's V_IRQ (and related
1047  * flags) to detect interrupt windows for L1 IRQs (even if L1 uses
1048  * virtual interrupt masking). Raise KVM_REQ_EVENT to ensure that
1049  * KVM re-requests an interrupt window if necessary, which implicitly
1050  * copies this bits from vmcb02 to vmcb01.
1051  *
1052  * V_TPR: If L1 doesn't use virtual interrupt masking, then L1's vTPR
1053  * is stored in vmcb02, but its value doesn't need to be copied from/to
1054  * vmcb01 because it is copied from/to the virtual APIC's TPR register
1055  * on each VM entry/exit.
1056  *
1057  * V_GIF: If nested vGIF is not used, KVM uses vmcb02's V_GIF for L1's
1058  * V_GIF. However, GIF is architecturally clear on each VM exit, thus
1059  * there is no need to copy V_GIF from vmcb02 to vmcb01.
1060  */
1061  if (!nested_exit_on_intr(svm))
1062  kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
1063 
1064  if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1065  (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1066  svm_copy_lbrs(vmcb12, vmcb02);
1067  svm_update_lbrv(vcpu);
1068  } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1069  svm_copy_lbrs(vmcb01, vmcb02);
1070  svm_update_lbrv(vcpu);
1071  }
1072 
1073  if (vnmi) {
1074  if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)
1075  vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
1076  else
1077  vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
1078 
1079  if (vcpu->arch.nmi_pending) {
1080  vcpu->arch.nmi_pending--;
1081  vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
1082  } else {
1083  vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
1084  }
1085  }
1086 
1087  /*
1088  * On vmexit the GIF is set to false and
1089  * no event can be injected in L1.
1090  */
1091  svm_set_gif(svm, false);
1092  vmcb01->control.exit_int_info = 0;
1093 
1094  svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
1095  if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1096  vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1098  }
1099 
1100  if (kvm_caps.has_tsc_control &&
1101  vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
1102  vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
1104  }
1105 
1106  svm->nested.ctl.nested_cr3 = 0;
1107 
1108  /*
1109  * Restore processor state that had been saved in vmcb01
1110  */
1111  kvm_set_rflags(vcpu, vmcb01->save.rflags);
1112  svm_set_efer(vcpu, vmcb01->save.efer);
1113  svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1114  svm_set_cr4(vcpu, vmcb01->save.cr4);
1115  kvm_rax_write(vcpu, vmcb01->save.rax);
1116  kvm_rsp_write(vcpu, vmcb01->save.rsp);
1117  kvm_rip_write(vcpu, vmcb01->save.rip);
1118 
1119  svm->vcpu.arch.dr7 = DR7_FIXED_1;
1120  kvm_update_dr7(&svm->vcpu);
1121 
1122  trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
1123  vmcb12->control.exit_info_1,
1124  vmcb12->control.exit_info_2,
1125  vmcb12->control.exit_int_info,
1126  vmcb12->control.exit_int_info_err,
1127  KVM_ISA_SVM);
1128 
1129  kvm_vcpu_unmap(vcpu, &map, true);
1130 
1132 
1134 
1135  rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1136  if (rc)
1137  return 1;
1138 
1139  /*
1140  * Drop what we picked up for L2 via svm_complete_interrupts() so it
1141  * doesn't end up in L1.
1142  */
1143  svm->vcpu.arch.nmi_injected = false;
1146 
1147  /*
1148  * If we are here following the completion of a VMRUN that
1149  * is being single-stepped, queue the pending #DB intercept
1150  * right now so that it an be accounted for before we execute
1151  * L1's next instruction.
1152  */
1153  if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1154  kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
1155 
1156  /*
1157  * Un-inhibit the AVIC right away, so that other vCPUs can start
1158  * to benefit from it right away.
1159  */
1160  if (kvm_apicv_activated(vcpu->kvm))
1162 
1163  return 0;
1164 }
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:278
static unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
static ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
static void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
static ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
static void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static void leave_guest_mode(struct kvm_vcpu *vcpu)
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
Definition: kvm_main.c:3152
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
Definition: kvm_main.c:3186
Definition: x86.h:12
bool has_tsc_control
Definition: x86.h:14
bool nested_run_pending
Definition: svm.h:170
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
Definition: nested.c:102
static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, struct vmcb *vmcb12)
Definition: nested.c:448
static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
Definition: nested.c:481
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
Definition: svm.c:982
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
Definition: svm.c:1852
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
Definition: svm.c:296
bool vnmi
Definition: svm.c:238
void svm_update_lbrv(struct kvm_vcpu *vcpu)
Definition: svm.c:1037
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Definition: svm.c:1914
static bool nested_exit_on_intr(struct vcpu_svm *svm)
Definition: svm.h:581
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
Definition: x86.c:13189
void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
Definition: x86.c:10525
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
Definition: x86.c:13170
void kvm_update_dr7(struct kvm_vcpu *vcpu)
Definition: x86.c:1346
static void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
Definition: x86.h:122
static bool kvm_pause_in_guest(struct kvm *kvm)
Definition: x86.h:419
static void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
Definition: x86.h:107
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_svm_vmrun()

int nested_svm_vmrun ( struct kvm_vcpu *  vcpu)

Definition at line 837 of file nested.c.

838 {
839  struct vcpu_svm *svm = to_svm(vcpu);
840  int ret;
841  struct vmcb *vmcb12;
842  struct kvm_host_map map;
843  u64 vmcb12_gpa;
844  struct vmcb *vmcb01 = svm->vmcb01.ptr;
845 
846  if (!svm->nested.hsave_msr) {
847  kvm_inject_gp(vcpu, 0);
848  return 1;
849  }
850 
851  if (is_smm(vcpu)) {
852  kvm_queue_exception(vcpu, UD_VECTOR);
853  return 1;
854  }
855 
856  /* This fails when VP assist page is enabled but the supplied GPA is bogus */
857  ret = kvm_hv_verify_vp_assist(vcpu);
858  if (ret) {
859  kvm_inject_gp(vcpu, 0);
860  return ret;
861  }
862 
863  vmcb12_gpa = svm->vmcb->save.rax;
864  ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
865  if (ret == -EINVAL) {
866  kvm_inject_gp(vcpu, 0);
867  return 1;
868  } else if (ret) {
869  return kvm_skip_emulated_instruction(vcpu);
870  }
871 
872  ret = kvm_skip_emulated_instruction(vcpu);
873 
874  vmcb12 = map.hva;
875 
876  if (WARN_ON_ONCE(!svm->nested.initialized))
877  return -EINVAL;
878 
879  nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
880  nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
881 
882  if (!nested_vmcb_check_save(vcpu) ||
884  vmcb12->control.exit_code = SVM_EXIT_ERR;
885  vmcb12->control.exit_code_hi = 0;
886  vmcb12->control.exit_info_1 = 0;
887  vmcb12->control.exit_info_2 = 0;
888  goto out;
889  }
890 
891  /*
892  * Since vmcb01 is not in use, we can use it to store some of the L1
893  * state.
894  */
895  vmcb01->save.efer = vcpu->arch.efer;
896  vmcb01->save.cr0 = kvm_read_cr0(vcpu);
897  vmcb01->save.cr4 = vcpu->arch.cr4;
898  vmcb01->save.rflags = kvm_get_rflags(vcpu);
899  vmcb01->save.rip = kvm_rip_read(vcpu);
900 
901  if (!npt_enabled)
902  vmcb01->save.cr3 = kvm_read_cr3(vcpu);
903 
904  svm->nested.nested_run_pending = 1;
905 
906  if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
907  goto out_exit_err;
908 
909  if (nested_svm_vmrun_msrpm(svm))
910  goto out;
911 
912 out_exit_err:
913  svm->nested.nested_run_pending = 0;
914  svm->nmi_l1_to_l2 = false;
915  svm->soft_int_injected = false;
916 
917  svm->vmcb->control.exit_code = SVM_EXIT_ERR;
918  svm->vmcb->control.exit_code_hi = 0;
919  svm->vmcb->control.exit_info_1 = 0;
920  svm->vmcb->control.exit_info_2 = 0;
921 
922  nested_svm_vmexit(svm);
923 
924 out:
925  kvm_vcpu_unmap(vcpu, &map, true);
926 
927  return ret;
928 }
static int kvm_hv_verify_vp_assist(struct kvm_vcpu *vcpu)
Definition: hyperv.h:316
static bool is_smm(struct kvm_vcpu *vcpu)
Definition: smm.h:160
bool initialized
Definition: svm.h:181
u64 hsave_msr
Definition: svm.h:160
bool nmi_l1_to_l2
Definition: svm.h:260
bool soft_int_injected
Definition: svm.h:265
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Definition: nested.c:188
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, struct vmcb *vmcb12, bool from_vmrun)
Definition: nested.c:785
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, struct vmcb_control_area *control)
Definition: nested.c:382
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, struct vmcb_save_area *save)
Definition: nested.c:404
static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
Definition: nested.c:319
static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
Definition: nested.c:327
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
Definition: x86.c:8916
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_sync_control_from_vmcb02()

void nested_sync_control_from_vmcb02 ( struct vcpu_svm svm)

Definition at line 414 of file nested.c.

415 {
416  u32 mask;
417  svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
418  svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
419 
420  /* Only a few fields of int_ctl are written by the processor. */
421  mask = V_IRQ_MASK | V_TPR_MASK;
422  /*
423  * Don't sync vmcb02 V_IRQ back to vmcb12 if KVM (L0) is intercepting
424  * virtual interrupts in order to request an interrupt window, as KVM
425  * has usurped vmcb02's int_ctl. If an interrupt window opens before
426  * the next VM-Exit, svm_clear_vintr() will restore vmcb12's int_ctl.
427  * If no window opens, V_IRQ will be correctly preserved in vmcb12's
428  * int_ctl (because it was never recognized while L2 was running).
429  */
430  if (svm_is_intercept(svm, INTERCEPT_VINTR) &&
431  !test_bit(INTERCEPT_VINTR, (unsigned long *)svm->nested.ctl.intercepts))
432  mask &= ~V_IRQ_MASK;
433 
434  if (nested_vgif_enabled(svm))
435  mask |= V_GIF_MASK;
436 
437  if (nested_vnmi_enabled(svm))
438  mask |= V_NMI_BLOCKING_MASK | V_NMI_PENDING_MASK;
439 
440  svm->nested.ctl.int_ctl &= ~mask;
441  svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
442 }
u32 intercepts[MAX_INTERCEPT]
Definition: svm.h:126
static bool svm_is_intercept(struct vcpu_svm *svm, int bit)
Definition: svm.h:441
static bool nested_vnmi_enabled(struct vcpu_svm *svm)
Definition: svm.h:498
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vgif_enabled()

static bool nested_vgif_enabled ( struct vcpu_svm svm)
inlinestatic

Definition at line 446 of file svm.h.

447 {
448  return guest_can_use(&svm->vcpu, X86_FEATURE_VGIF) &&
449  (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
450 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nested_vmcb02_compute_g_pat()

void nested_vmcb02_compute_g_pat ( struct vcpu_svm svm)

Definition at line 528 of file nested.c.

529 {
530  if (!svm->nested.vmcb02.ptr)
531  return;
532 
533  /* FIXME: merge g_pat from vmcb01 and vmcb12. */
534  svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
535 }
Here is the caller graph for this function:

◆ nested_vnmi_enabled()

static bool nested_vnmi_enabled ( struct vcpu_svm svm)
inlinestatic

Definition at line 498 of file svm.h.

499 {
500  return guest_can_use(&svm->vcpu, X86_FEATURE_VNMI) &&
501  (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
502 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pre_sev_run()

void pre_sev_run ( struct vcpu_svm svm,
int  cpu 
)

Definition at line 2646 of file sev.c.

2647 {
2648  struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
2649  unsigned int asid = sev_get_asid(svm->vcpu.kvm);
2650 
2651  /* Assign the asid allocated with this SEV guest */
2652  svm->asid = asid;
2653 
2654  /*
2655  * Flush guest TLB:
2656  *
2657  * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2658  * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2659  */
2660  if (sd->sev_vmcbs[asid] == svm->vmcb &&
2661  svm->vcpu.arch.last_vmentry_cpu == cpu)
2662  return;
2663 
2664  sd->sev_vmcbs[asid] = svm->vmcb;
2665  svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2667 }
static unsigned int sev_get_asid(struct kvm *kvm)
Definition: sev.c:197
struct vmcb ** sev_vmcbs
Definition: svm.h:309
u32 asid
Definition: svm.h:215
Here is the call graph for this function:
Here is the caller graph for this function:

◆ recalc_intercepts()

void recalc_intercepts ( struct vcpu_svm svm)

Definition at line 122 of file nested.c.

123 {
124  struct vmcb_control_area *c, *h;
125  struct vmcb_ctrl_area_cached *g;
126  unsigned int i;
127 
129 
130  if (!is_guest_mode(&svm->vcpu))
131  return;
132 
133  c = &svm->vmcb->control;
134  h = &svm->vmcb01.ptr->control;
135  g = &svm->nested.ctl;
136 
137  for (i = 0; i < MAX_INTERCEPT; i++)
138  c->intercepts[i] = h->intercepts[i];
139 
140  if (g->int_ctl & V_INTR_MASKING_MASK) {
141  /*
142  * If L2 is active and V_INTR_MASKING is enabled in vmcb12,
143  * disable intercept of CR8 writes as L2's CR8 does not affect
144  * any interrupt KVM may want to inject.
145  *
146  * Similarly, disable intercept of virtual interrupts (used to
147  * detect interrupt windows) if the saved RFLAGS.IF is '0', as
148  * the effective RFLAGS.IF for L1 interrupts will never be set
149  * while L2 is running (L2's RFLAGS.IF doesn't affect L1 IRQs).
150  */
151  vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
152  if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF))
153  vmcb_clr_intercept(c, INTERCEPT_VINTR);
154  }
155 
156  /*
157  * We want to see VMMCALLs from a nested guest only when Hyper-V L2 TLB
158  * flush feature is enabled.
159  */
161  vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
162 
163  for (i = 0; i < MAX_INTERCEPT; i++)
164  c->intercepts[i] |= g->intercepts[i];
165 
166  /* If SMI is not intercepted, ignore guest SMI intercept as well */
167  if (!intercept_smi)
168  vmcb_clr_intercept(c, INTERCEPT_SMI);
169 
171  /*
172  * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
173  * we must intercept these instructions to correctly
174  * emulate them in case L1 doesn't intercept them.
175  */
176  vmcb_set_intercept(c, INTERCEPT_VMLOAD);
177  vmcb_set_intercept(c, INTERCEPT_VMSAVE);
178  } else {
179  WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
180  }
181 }
static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
Definition: nested.c:108
bool intercept_smi
Definition: svm.c:235
static void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
Definition: svm.h:379
Here is the call graph for this function:
Here is the caller graph for this function:

◆ set_exception_intercept()

static void set_exception_intercept ( struct vcpu_svm svm,
u32  bit 
)
inlinestatic

Definition at line 403 of file svm.h.

404 {
405  struct vmcb *vmcb = svm->vmcb01.ptr;
406 
407  WARN_ON_ONCE(bit >= 32);
408  vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
409 
410  recalc_intercepts(svm);
411 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ set_msr_interception()

void set_msr_interception ( struct kvm_vcpu *  vcpu,
u32 *  msrpm,
u32  msr,
int  read,
int  write 
)

Definition at line 859 of file svm.c.

861 {
862  set_shadow_msr_intercept(vcpu, msr, read, write);
863  set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
864 }
static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read, int write)
Definition: svm.c:768
static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write)
Definition: svm.c:822
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_cpu_init()

int sev_cpu_init ( struct svm_cpu_data sd)

Definition at line 2313 of file sev.c.

2314 {
2315  if (!sev_enabled)
2316  return 0;
2317 
2318  sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
2319  if (!sd->sev_vmcbs)
2320  return -ENOMEM;
2321 
2322  return 0;
2323 }
static unsigned int nr_asids
Definition: sev.c:74
#define sev_enabled
Definition: sev.c:63
Here is the caller graph for this function:

◆ sev_es_guest()

static __always_inline bool sev_es_guest ( struct kvm *  kvm)
static

Definition at line 332 of file svm.h.

333 {
334 #ifdef CONFIG_KVM_AMD_SEV
335  struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
336 
337  return sev->es_active && !WARN_ON_ONCE(!sev->active);
338 #else
339  return false;
340 #endif
341 }
bool active
Definition: svm.h:80
bool es_active
Definition: svm.h:81
struct kvm_sev_info sev_info
Definition: svm.h:104
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_es_prepare_switch_to_guest()

void sev_es_prepare_switch_to_guest ( struct sev_es_save_area *  hostsa)

Definition at line 3119 of file sev.c.

3120 {
3121  /*
3122  * All host state for SEV-ES guests is categorized into three swap types
3123  * based on how it is handled by hardware during a world switch:
3124  *
3125  * A: VMRUN: Host state saved in host save area
3126  * VMEXIT: Host state loaded from host save area
3127  *
3128  * B: VMRUN: Host state _NOT_ saved in host save area
3129  * VMEXIT: Host state loaded from host save area
3130  *
3131  * C: VMRUN: Host state _NOT_ saved in host save area
3132  * VMEXIT: Host state initialized to default(reset) values
3133  *
3134  * Manually save type-B state, i.e. state that is loaded by VMEXIT but
3135  * isn't saved by VMRUN, that isn't already saved by VMSAVE (performed
3136  * by common SVM code).
3137  */
3138  hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
3139  hostsa->pkru = read_pkru();
3140  hostsa->xss = host_xss;
3141 
3142  /*
3143  * If DebugSwap is enabled, debug registers are loaded but NOT saved by
3144  * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
3145  * saves and loads debug registers (Type-A).
3146  */
3148  hostsa->dr0 = native_get_debugreg(0);
3149  hostsa->dr1 = native_get_debugreg(1);
3150  hostsa->dr2 = native_get_debugreg(2);
3151  hostsa->dr3 = native_get_debugreg(3);
3152  hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
3153  hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
3154  hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
3155  hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
3156  }
3157 }
#define sev_es_debug_swap_enabled
Definition: sev.c:65
u64 __read_mostly host_xss
Definition: x86.c:238
Here is the caller graph for this function:

◆ sev_es_string_io()

int sev_es_string_io ( struct vcpu_svm svm,
int  size,
unsigned int  port,
int  in 
)

Definition at line 2963 of file sev.c.

2964 {
2965  int count;
2966  int bytes;
2967  int r;
2968 
2969  if (svm->vmcb->control.exit_info_2 > INT_MAX)
2970  return -EINVAL;
2971 
2972  count = svm->vmcb->control.exit_info_2;
2973  if (unlikely(check_mul_overflow(count, size, &bytes)))
2974  return -EINVAL;
2975 
2976  r = setup_vmgexit_scratch(svm, in, bytes);
2977  if (r)
2978  return r;
2979 
2980  return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
2981  count, in);
2982 }
static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
Definition: sev.c:2670
void * ghcb_sa
Definition: svm.h:203
struct vcpu_sev_es_state sev_es
Definition: svm.h:287
int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, unsigned int port, void *data, unsigned int count, int in)
Definition: x86.c:13876
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_es_unmap_ghcb()

void sev_es_unmap_ghcb ( struct vcpu_svm svm)

Definition at line 2614 of file sev.c.

2615 {
2616  if (!svm->sev_es.ghcb)
2617  return;
2618 
2619  if (svm->sev_es.ghcb_sa_free) {
2620  /*
2621  * The scratch area lives outside the GHCB, so there is a
2622  * buffer that, depending on the operation performed, may
2623  * need to be synced, then freed.
2624  */
2625  if (svm->sev_es.ghcb_sa_sync) {
2626  kvm_write_guest(svm->vcpu.kvm,
2627  svm->sev_es.sw_scratch,
2628  svm->sev_es.ghcb_sa,
2629  svm->sev_es.ghcb_sa_len);
2630  svm->sev_es.ghcb_sa_sync = false;
2631  }
2632 
2633  kvfree(svm->sev_es.ghcb_sa);
2634  svm->sev_es.ghcb_sa = NULL;
2635  svm->sev_es.ghcb_sa_free = false;
2636  }
2637 
2638  trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
2639 
2640  sev_es_sync_to_ghcb(svm);
2641 
2642  kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
2643  svm->sev_es.ghcb = NULL;
2644 }
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len)
Definition: kvm_main.c:3449
static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
Definition: sev.c:2416
u32 ghcb_sa_len
Definition: svm.h:204
struct kvm_host_map ghcb_map
Definition: svm.h:198
u64 sw_scratch
Definition: svm.h:202
struct ghcb * ghcb
Definition: svm.h:196
bool ghcb_sa_free
Definition: svm.h:206
bool ghcb_sa_sync
Definition: svm.h:205
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_es_vcpu_reset()

void sev_es_vcpu_reset ( struct vcpu_svm svm)

Definition at line 3108 of file sev.c.

3109 {
3110  /*
3111  * Set the GHCB MSR value as per the GHCB specification when emulating
3112  * vCPU RESET for an SEV-ES guest.
3113  */
3114  set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
3116  sev_enc_bit));
3117 }
static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
Definition: sev.c:2766
static u8 sev_enc_bit
Definition: sev.c:68
#define GHCB_VERSION_MIN
Definition: svm.h:668
#define GHCB_VERSION_MAX
Definition: svm.h:667
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_free_vcpu()

void sev_free_vcpu ( struct kvm_vcpu *  vcpu)

Definition at line 2373 of file sev.c.

2374 {
2375  struct vcpu_svm *svm;
2376 
2377  if (!sev_es_guest(vcpu->kvm))
2378  return;
2379 
2380  svm = to_svm(vcpu);
2381 
2382  if (vcpu->arch.guest_state_protected)
2384 
2385  __free_page(virt_to_page(svm->sev_es.vmsa));
2386 
2387  if (svm->sev_es.ghcb_sa_free)
2388  kvfree(svm->sev_es.ghcb_sa);
2389 }
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
Definition: sev.c:2329
struct sev_es_save_area * vmsa
Definition: svm.h:195
static __always_inline bool sev_es_guest(struct kvm *kvm)
Definition: svm.h:332
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_guest()

static __always_inline bool sev_guest ( struct kvm *  kvm)
static

Definition at line 321 of file svm.h.

322 {
323 #ifdef CONFIG_KVM_AMD_SEV
324  struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
325 
326  return sev->active;
327 #else
328  return false;
329 #endif
330 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_guest_memory_reclaimed()

void sev_guest_memory_reclaimed ( struct kvm *  kvm)

Definition at line 2365 of file sev.c.

2366 {
2367  if (!sev_guest(kvm))
2368  return;
2369 
2370  wbinvd_on_all_cpus();
2371 }
static __always_inline bool sev_guest(struct kvm *kvm)
Definition: svm.h:321
Here is the call graph for this function:

◆ sev_handle_vmgexit()

int sev_handle_vmgexit ( struct kvm_vcpu *  vcpu)

Definition at line 2857 of file sev.c.

2858 {
2859  struct vcpu_svm *svm = to_svm(vcpu);
2860  struct vmcb_control_area *control = &svm->vmcb->control;
2861  u64 ghcb_gpa, exit_code;
2862  int ret;
2863 
2864  /* Validate the GHCB */
2865  ghcb_gpa = control->ghcb_gpa;
2866  if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2867  return sev_handle_vmgexit_msr_protocol(svm);
2868 
2869  if (!ghcb_gpa) {
2870  vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
2871 
2872  /* Without a GHCB, just return right back to the guest */
2873  return 1;
2874  }
2875 
2876  if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
2877  /* Unable to map GHCB from guest */
2878  vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
2879  ghcb_gpa);
2880 
2881  /* Without a GHCB, just return right back to the guest */
2882  return 1;
2883  }
2884 
2885  svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
2886 
2887  trace_kvm_vmgexit_enter(vcpu->vcpu_id, svm->sev_es.ghcb);
2888 
2889  sev_es_sync_from_ghcb(svm);
2890  ret = sev_es_validate_vmgexit(svm);
2891  if (ret)
2892  return ret;
2893 
2894  ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 0);
2895  ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 0);
2896 
2897  exit_code = kvm_ghcb_get_sw_exit_code(control);
2898  switch (exit_code) {
2899  case SVM_VMGEXIT_MMIO_READ:
2900  ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
2901  if (ret)
2902  break;
2903 
2904  ret = kvm_sev_es_mmio_read(vcpu,
2905  control->exit_info_1,
2906  control->exit_info_2,
2907  svm->sev_es.ghcb_sa);
2908  break;
2909  case SVM_VMGEXIT_MMIO_WRITE:
2910  ret = setup_vmgexit_scratch(svm, false, control->exit_info_2);
2911  if (ret)
2912  break;
2913 
2914  ret = kvm_sev_es_mmio_write(vcpu,
2915  control->exit_info_1,
2916  control->exit_info_2,
2917  svm->sev_es.ghcb_sa);
2918  break;
2919  case SVM_VMGEXIT_NMI_COMPLETE:
2920  ++vcpu->stat.nmi_window_exits;
2921  svm->nmi_masked = false;
2922  kvm_make_request(KVM_REQ_EVENT, vcpu);
2923  ret = 1;
2924  break;
2925  case SVM_VMGEXIT_AP_HLT_LOOP:
2926  ret = kvm_emulate_ap_reset_hold(vcpu);
2927  break;
2928  case SVM_VMGEXIT_AP_JUMP_TABLE: {
2929  struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
2930 
2931  switch (control->exit_info_1) {
2932  case 0:
2933  /* Set AP jump table address */
2934  sev->ap_jump_table = control->exit_info_2;
2935  break;
2936  case 1:
2937  /* Get AP jump table address */
2938  ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, sev->ap_jump_table);
2939  break;
2940  default:
2941  pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2942  control->exit_info_1);
2943  ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 2);
2944  ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, GHCB_ERR_INVALID_INPUT);
2945  }
2946 
2947  ret = 1;
2948  break;
2949  }
2950  case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2951  vcpu_unimpl(vcpu,
2952  "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2953  control->exit_info_1, control->exit_info_2);
2954  ret = -EINVAL;
2955  break;
2956  default:
2957  ret = svm_invoke_exit_handler(vcpu, exit_code);
2958  }
2959 
2960  return ret;
2961 }
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
Definition: sev.c:2489
static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
Definition: sev.c:2484
static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
Definition: sev.c:2771
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
Definition: sev.c:2435
u64 ap_jump_table
Definition: svm.h:87
bool nmi_masked
Definition: svm.h:241
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
Definition: svm.c:3453
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
Definition: x86.c:9868
int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, void *data)
Definition: x86.c:13722
int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, void *data)
Definition: x86.c:13761
Here is the call graph for this function:

◆ sev_hardware_setup()

void __init sev_hardware_setup ( void  )

Definition at line 2193 of file sev.c.

2194 {
2195 #ifdef CONFIG_KVM_AMD_SEV
2196  unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
2197  bool sev_es_supported = false;
2198  bool sev_supported = false;
2199 
2200  if (!sev_enabled || !npt_enabled || !nrips)
2201  goto out;
2202 
2203  /*
2204  * SEV must obviously be supported in hardware. Sanity check that the
2205  * CPU supports decode assists, which is mandatory for SEV guests to
2206  * support instruction emulation. Ditto for flushing by ASID, as SEV
2207  * guests are bound to a single ASID, i.e. KVM can't rotate to a new
2208  * ASID to effect a TLB flush.
2209  */
2210  if (!boot_cpu_has(X86_FEATURE_SEV) ||
2211  WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)) ||
2212  WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_FLUSHBYASID)))
2213  goto out;
2214 
2215  /* Retrieve SEV CPUID information */
2216  cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
2217 
2218  /* Set encryption bit location for SEV-ES guests */
2219  sev_enc_bit = ebx & 0x3f;
2220 
2221  /* Maximum number of encrypted guests supported simultaneously */
2222  max_sev_asid = ecx;
2223  if (!max_sev_asid)
2224  goto out;
2225 
2226  /* Minimum ASID value that should be used for SEV guest */
2227  min_sev_asid = edx;
2228  sev_me_mask = 1UL << (ebx & 0x3f);
2229 
2230  /*
2231  * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
2232  * even though it's never used, so that the bitmap is indexed by the
2233  * actual ASID.
2234  */
2235  nr_asids = max_sev_asid + 1;
2236  sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2237  if (!sev_asid_bitmap)
2238  goto out;
2239 
2240  sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2241  if (!sev_reclaim_asid_bitmap) {
2242  bitmap_free(sev_asid_bitmap);
2243  sev_asid_bitmap = NULL;
2244  goto out;
2245  }
2246 
2247  if (min_sev_asid <= max_sev_asid) {
2248  sev_asid_count = max_sev_asid - min_sev_asid + 1;
2249  WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
2250  }
2251  sev_supported = true;
2252 
2253  /* SEV-ES support requested? */
2254  if (!sev_es_enabled)
2255  goto out;
2256 
2257  /*
2258  * SEV-ES requires MMIO caching as KVM doesn't have access to the guest
2259  * instruction stream, i.e. can't emulate in response to a #NPF and
2260  * instead relies on #NPF(RSVD) being reflected into the guest as #VC
2261  * (the guest can then do a #VMGEXIT to request MMIO emulation).
2262  */
2263  if (!enable_mmio_caching)
2264  goto out;
2265 
2266  /* Does the CPU support SEV-ES? */
2267  if (!boot_cpu_has(X86_FEATURE_SEV_ES))
2268  goto out;
2269 
2270  /* Has the system been allocated ASIDs for SEV-ES? */
2271  if (min_sev_asid == 1)
2272  goto out;
2273 
2274  sev_es_asid_count = min_sev_asid - 1;
2275  WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
2276  sev_es_supported = true;
2277 
2278 out:
2279  if (boot_cpu_has(X86_FEATURE_SEV))
2280  pr_info("SEV %s (ASIDs %u - %u)\n",
2281  sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
2282  "unusable" :
2283  "disabled",
2285  if (boot_cpu_has(X86_FEATURE_SEV_ES))
2286  pr_info("SEV-ES %s (ASIDs %u - %u)\n",
2287  sev_es_supported ? "enabled" : "disabled",
2288  min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
2289 
2290  sev_enabled = sev_supported;
2291  sev_es_enabled = sev_es_supported;
2292  if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) ||
2293  !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP))
2294  sev_es_debug_swap_enabled = false;
2295 #endif
2296 }
unsigned int max_sev_asid
Definition: sev.c:71
#define sev_es_enabled
Definition: sev.c:64
static unsigned long sev_me_mask
Definition: sev.c:73
#define MISC_CG_RES_SEV_ES
Definition: sev.c:47
#define MISC_CG_RES_SEV
Definition: sev.c:46
static unsigned long * sev_asid_bitmap
Definition: sev.c:75
static unsigned int min_sev_asid
Definition: sev.c:72
static unsigned long * sev_reclaim_asid_bitmap
Definition: sev.c:76
bool __read_mostly enable_mmio_caching
Definition: spte.c:22
int nrips
Definition: svm.c:206
Here is the caller graph for this function:

◆ sev_hardware_unsetup()

void sev_hardware_unsetup ( void  )

Definition at line 2298 of file sev.c.

2299 {
2300  if (!sev_enabled)
2301  return;
2302 
2303  /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
2305 
2306  bitmap_free(sev_asid_bitmap);
2307  bitmap_free(sev_reclaim_asid_bitmap);
2308 
2309  misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
2310  misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
2311 }
static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
Definition: sev.c:87
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_init_vmcb()

void sev_init_vmcb ( struct vcpu_svm svm)

Definition at line 3093 of file sev.c.

3094 {
3095  svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
3096  clr_exception_intercept(svm, UD_VECTOR);
3097 
3098  /*
3099  * Don't intercept #GP for SEV guests, e.g. for the VMware backdoor, as
3100  * KVM can't decrypt guest memory to decode the faulting instruction.
3101  */
3102  clr_exception_intercept(svm, GP_VECTOR);
3103 
3104  if (sev_es_guest(svm->vcpu.kvm))
3105  sev_es_init_vmcb(svm);
3106 }
static void sev_es_init_vmcb(struct vcpu_svm *svm)
Definition: sev.c:3029
static void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
Definition: svm.h:413
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_mem_enc_ioctl()

int sev_mem_enc_ioctl ( struct kvm *  kvm,
void __user *  argp 
)

Definition at line 1862 of file sev.c.

1863 {
1864  struct kvm_sev_cmd sev_cmd;
1865  int r;
1866 
1867  if (!sev_enabled)
1868  return -ENOTTY;
1869 
1870  if (!argp)
1871  return 0;
1872 
1873  if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1874  return -EFAULT;
1875 
1876  mutex_lock(&kvm->lock);
1877 
1878  /* Only the enc_context_owner handles some memory enc operations. */
1879  if (is_mirroring_enc_context(kvm) &&
1880  !is_cmd_allowed_from_mirror(sev_cmd.id)) {
1881  r = -EINVAL;
1882  goto out;
1883  }
1884 
1885  switch (sev_cmd.id) {
1886  case KVM_SEV_ES_INIT:
1887  if (!sev_es_enabled) {
1888  r = -ENOTTY;
1889  goto out;
1890  }
1891  fallthrough;
1892  case KVM_SEV_INIT:
1893  r = sev_guest_init(kvm, &sev_cmd);
1894  break;
1895  case KVM_SEV_LAUNCH_START:
1896  r = sev_launch_start(kvm, &sev_cmd);
1897  break;
1898  case KVM_SEV_LAUNCH_UPDATE_DATA:
1899  r = sev_launch_update_data(kvm, &sev_cmd);
1900  break;
1901  case KVM_SEV_LAUNCH_UPDATE_VMSA:
1902  r = sev_launch_update_vmsa(kvm, &sev_cmd);
1903  break;
1904  case KVM_SEV_LAUNCH_MEASURE:
1905  r = sev_launch_measure(kvm, &sev_cmd);
1906  break;
1907  case KVM_SEV_LAUNCH_FINISH:
1908  r = sev_launch_finish(kvm, &sev_cmd);
1909  break;
1910  case KVM_SEV_GUEST_STATUS:
1911  r = sev_guest_status(kvm, &sev_cmd);
1912  break;
1913  case KVM_SEV_DBG_DECRYPT:
1914  r = sev_dbg_crypt(kvm, &sev_cmd, true);
1915  break;
1916  case KVM_SEV_DBG_ENCRYPT:
1917  r = sev_dbg_crypt(kvm, &sev_cmd, false);
1918  break;
1919  case KVM_SEV_LAUNCH_SECRET:
1920  r = sev_launch_secret(kvm, &sev_cmd);
1921  break;
1922  case KVM_SEV_GET_ATTESTATION_REPORT:
1923  r = sev_get_attestation_report(kvm, &sev_cmd);
1924  break;
1925  case KVM_SEV_SEND_START:
1926  r = sev_send_start(kvm, &sev_cmd);
1927  break;
1928  case KVM_SEV_SEND_UPDATE_DATA:
1929  r = sev_send_update_data(kvm, &sev_cmd);
1930  break;
1931  case KVM_SEV_SEND_FINISH:
1932  r = sev_send_finish(kvm, &sev_cmd);
1933  break;
1934  case KVM_SEV_SEND_CANCEL:
1935  r = sev_send_cancel(kvm, &sev_cmd);
1936  break;
1937  case KVM_SEV_RECEIVE_START:
1938  r = sev_receive_start(kvm, &sev_cmd);
1939  break;
1940  case KVM_SEV_RECEIVE_UPDATE_DATA:
1941  r = sev_receive_update_data(kvm, &sev_cmd);
1942  break;
1943  case KVM_SEV_RECEIVE_FINISH:
1944  r = sev_receive_finish(kvm, &sev_cmd);
1945  break;
1946  default:
1947  r = -EINVAL;
1948  goto out;
1949  }
1950 
1951  if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1952  r = -EFAULT;
1953 
1954 out:
1955  mutex_unlock(&kvm->lock);
1956  return r;
1957 }
static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:1295
static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:1380
static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:1404
static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:511
static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:328
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:670
static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
Definition: sev.c:941
static bool is_mirroring_enc_context(struct kvm *kvm)
Definition: sev.c:114
static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:1025
static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:756
static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:768
static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:1478
static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:1392
static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:1555
static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:1100
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:253
static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:694
static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
Definition: sev.c:1182
static bool is_cmd_allowed_from_mirror(u32 cmd_id)
Definition: sev.c:1567
Here is the call graph for this function:

◆ sev_mem_enc_register_region()

int sev_mem_enc_register_region ( struct kvm *  kvm,
struct kvm_enc_region *  range 
)

Definition at line 1959 of file sev.c.

1961 {
1962  struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1963  struct enc_region *region;
1964  int ret = 0;
1965 
1966  if (!sev_guest(kvm))
1967  return -ENOTTY;
1968 
1969  /* If kvm is mirroring encryption context it isn't responsible for it */
1970  if (is_mirroring_enc_context(kvm))
1971  return -EINVAL;
1972 
1973  if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1974  return -EINVAL;
1975 
1976  region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1977  if (!region)
1978  return -ENOMEM;
1979 
1980  mutex_lock(&kvm->lock);
1981  region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
1982  if (IS_ERR(region->pages)) {
1983  ret = PTR_ERR(region->pages);
1984  mutex_unlock(&kvm->lock);
1985  goto e_free;
1986  }
1987 
1988  /*
1989  * The guest may change the memory encryption attribute from C=0 -> C=1
1990  * or vice versa for this memory range. Lets make sure caches are
1991  * flushed to ensure that guest data gets written into memory with
1992  * correct C-bit. Note, this must be done before dropping kvm->lock,
1993  * as region and its array of pages can be freed by a different task
1994  * once kvm->lock is released.
1995  */
1996  sev_clflush_pages(region->pages, region->npages);
1997 
1998  region->uaddr = range->addr;
1999  region->size = range->size;
2000 
2001  list_add_tail(&region->list, &sev->regions_list);
2002  mutex_unlock(&kvm->lock);
2003 
2004  return ret;
2005 
2006 e_free:
2007  kfree(region);
2008  return ret;
2009 }
static void sev_clflush_pages(struct page *pages[], unsigned long npages)
Definition: sev.c:473
static struct page ** sev_pin_memory(struct kvm *kvm, unsigned long uaddr, unsigned long ulen, unsigned long *n, int write)
Definition: sev.c:400
Definition: sev.c:78
unsigned long size
Definition: sev.c:83
unsigned long npages
Definition: sev.c:80
struct page ** pages
Definition: sev.c:81
unsigned long uaddr
Definition: sev.c:82
struct list_head list
Definition: sev.c:79
struct list_head regions_list
Definition: svm.h:86
Here is the call graph for this function:

◆ sev_mem_enc_unregister_region()

int sev_mem_enc_unregister_region ( struct kvm *  kvm,
struct kvm_enc_region *  range 
)

Definition at line 2035 of file sev.c.

2037 {
2038  struct enc_region *region;
2039  int ret;
2040 
2041  /* If kvm is mirroring encryption context it isn't responsible for it */
2042  if (is_mirroring_enc_context(kvm))
2043  return -EINVAL;
2044 
2045  mutex_lock(&kvm->lock);
2046 
2047  if (!sev_guest(kvm)) {
2048  ret = -ENOTTY;
2049  goto failed;
2050  }
2051 
2052  region = find_enc_region(kvm, range);
2053  if (!region) {
2054  ret = -EINVAL;
2055  goto failed;
2056  }
2057 
2058  /*
2059  * Ensure that all guest tagged cache entries are flushed before
2060  * releasing the pages back to the system for use. CLFLUSH will
2061  * not do this, so issue a WBINVD.
2062  */
2063  wbinvd_on_all_cpus();
2064 
2065  __unregister_enc_region_locked(kvm, region);
2066 
2067  mutex_unlock(&kvm->lock);
2068  return 0;
2069 
2070 failed:
2071  mutex_unlock(&kvm->lock);
2072  return ret;
2073 }
static void __unregister_enc_region_locked(struct kvm *kvm, struct enc_region *region)
Definition: sev.c:2027
static struct enc_region * find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
Definition: sev.c:2012
Here is the call graph for this function:

◆ sev_set_cpu_caps()

void __init sev_set_cpu_caps ( void  )

Definition at line 2185 of file sev.c.

2186 {
2187  if (!sev_enabled)
2188  kvm_cpu_cap_clear(X86_FEATURE_SEV);
2189  if (!sev_es_enabled)
2190  kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
2191 }
static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
Definition: cpuid.h:197
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_vcpu_after_set_cpuid()

void sev_vcpu_after_set_cpuid ( struct vcpu_svm svm)

Definition at line 3015 of file sev.c.

3016 {
3017  struct kvm_vcpu *vcpu = &svm->vcpu;
3018  struct kvm_cpuid_entry2 *best;
3019 
3020  /* For sev guests, the memory encryption bit is not reserved in CR3. */
3021  best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
3022  if (best)
3023  vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
3024 
3025  if (sev_es_guest(svm->vcpu.kvm))
3027 }
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
Definition: cpuid.c:1455
static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
Definition: sev.c:2984
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_vcpu_deliver_sipi_vector()

void sev_vcpu_deliver_sipi_vector ( struct kvm_vcpu *  vcpu,
u8  vector 
)

Definition at line 3159 of file sev.c.

3160 {
3161  struct vcpu_svm *svm = to_svm(vcpu);
3162 
3163  /* First SIPI: Use the values as initially set by the VMM */
3164  if (!svm->sev_es.received_first_sipi) {
3165  svm->sev_es.received_first_sipi = true;
3166  return;
3167  }
3168 
3169  /*
3170  * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
3171  * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
3172  * non-zero value.
3173  */
3174  if (!svm->sev_es.ghcb)
3175  return;
3176 
3177  ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);
3178 }
bool received_first_sipi
Definition: svm.h:199
Here is the caller graph for this function:

◆ sev_vm_copy_enc_context_from()

int sev_vm_copy_enc_context_from ( struct kvm *  kvm,
unsigned int  source_fd 
)

Definition at line 2075 of file sev.c.

2076 {
2077  struct fd f = fdget(source_fd);
2078  struct kvm *source_kvm;
2079  struct kvm_sev_info *source_sev, *mirror_sev;
2080  int ret;
2081 
2082  if (!f.file)
2083  return -EBADF;
2084 
2085  if (!file_is_kvm(f.file)) {
2086  ret = -EBADF;
2087  goto e_source_fput;
2088  }
2089 
2090  source_kvm = f.file->private_data;
2091  ret = sev_lock_two_vms(kvm, source_kvm);
2092  if (ret)
2093  goto e_source_fput;
2094 
2095  /*
2096  * Mirrors of mirrors should work, but let's not get silly. Also
2097  * disallow out-of-band SEV/SEV-ES init if the target is already an
2098  * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
2099  * created after SEV/SEV-ES initialization, e.g. to init intercepts.
2100  */
2101  if (sev_guest(kvm) || !sev_guest(source_kvm) ||
2102  is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
2103  ret = -EINVAL;
2104  goto e_unlock;
2105  }
2106 
2107  /*
2108  * The mirror kvm holds an enc_context_owner ref so its asid can't
2109  * disappear until we're done with it
2110  */
2111  source_sev = &to_kvm_svm(source_kvm)->sev_info;
2112  kvm_get_kvm(source_kvm);
2113  mirror_sev = &to_kvm_svm(kvm)->sev_info;
2114  list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
2115 
2116  /* Set enc_context_owner and copy its encryption context over */
2117  mirror_sev->enc_context_owner = source_kvm;
2118  mirror_sev->active = true;
2119  mirror_sev->asid = source_sev->asid;
2120  mirror_sev->fd = source_sev->fd;
2121  mirror_sev->es_active = source_sev->es_active;
2122  mirror_sev->handle = source_sev->handle;
2123  INIT_LIST_HEAD(&mirror_sev->regions_list);
2124  INIT_LIST_HEAD(&mirror_sev->mirror_vms);
2125  ret = 0;
2126 
2127  /*
2128  * Do not copy ap_jump_table. Since the mirror does not share the same
2129  * KVM contexts as the original, and they may have different
2130  * memory-views.
2131  */
2132 
2133 e_unlock:
2134  sev_unlock_two_vms(kvm, source_kvm);
2135 e_source_fput:
2136  fdput(f);
2137  return ret;
2138 }
void kvm_get_kvm(struct kvm *kvm)
Definition: kvm_main.c:1403
bool file_is_kvm(struct file *file)
Definition: kvm_main.c:5424
static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
Definition: sev.c:1581
static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
Definition: sev.c:1616
struct kvm * enc_context_owner
Definition: svm.h:88
struct list_head mirror_entry
Definition: svm.h:90
unsigned int handle
Definition: svm.h:83
struct list_head mirror_vms
Definition: svm.h:89
unsigned int asid
Definition: svm.h:82
int fd
Definition: svm.h:84
Here is the call graph for this function:

◆ sev_vm_destroy()

void sev_vm_destroy ( struct kvm *  kvm)

Definition at line 2140 of file sev.c.

2141 {
2142  struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2143  struct list_head *head = &sev->regions_list;
2144  struct list_head *pos, *q;
2145 
2146  if (!sev_guest(kvm))
2147  return;
2148 
2149  WARN_ON(!list_empty(&sev->mirror_vms));
2150 
2151  /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
2152  if (is_mirroring_enc_context(kvm)) {
2153  struct kvm *owner_kvm = sev->enc_context_owner;
2154 
2155  mutex_lock(&owner_kvm->lock);
2156  list_del(&sev->mirror_entry);
2157  mutex_unlock(&owner_kvm->lock);
2158  kvm_put_kvm(owner_kvm);
2159  return;
2160  }
2161 
2162  /*
2163  * Ensure that all guest tagged cache entries are flushed before
2164  * releasing the pages back to the system for use. CLFLUSH will
2165  * not do this, so issue a WBINVD.
2166  */
2167  wbinvd_on_all_cpus();
2168 
2169  /*
2170  * if userspace was terminated before unregistering the memory regions
2171  * then lets unpin all the registered memory.
2172  */
2173  if (!list_empty(head)) {
2174  list_for_each_safe(pos, q, head) {
2176  list_entry(pos, struct enc_region, list));
2177  cond_resched();
2178  }
2179  }
2180 
2181  sev_unbind_asid(kvm, sev->handle);
2182  sev_asid_free(sev);
2183 }
void kvm_put_kvm(struct kvm *kvm)
Definition: kvm_main.c:1419
static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
Definition: sev.c:236
static void sev_asid_free(struct kvm_sev_info *sev)
Definition: sev.c:204
Here is the call graph for this function:
Here is the caller graph for this function:

◆ sev_vm_move_enc_context_from()

int sev_vm_move_enc_context_from ( struct kvm *  kvm,
unsigned int  source_fd 
)

Definition at line 1791 of file sev.c.

1792 {
1793  struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
1794  struct kvm_sev_info *src_sev, *cg_cleanup_sev;
1795  struct fd f = fdget(source_fd);
1796  struct kvm *source_kvm;
1797  bool charged = false;
1798  int ret;
1799 
1800  if (!f.file)
1801  return -EBADF;
1802 
1803  if (!file_is_kvm(f.file)) {
1804  ret = -EBADF;
1805  goto out_fput;
1806  }
1807 
1808  source_kvm = f.file->private_data;
1809  ret = sev_lock_two_vms(kvm, source_kvm);
1810  if (ret)
1811  goto out_fput;
1812 
1813  if (sev_guest(kvm) || !sev_guest(source_kvm)) {
1814  ret = -EINVAL;
1815  goto out_unlock;
1816  }
1817 
1818  src_sev = &to_kvm_svm(source_kvm)->sev_info;
1819 
1820  dst_sev->misc_cg = get_current_misc_cg();
1821  cg_cleanup_sev = dst_sev;
1822  if (dst_sev->misc_cg != src_sev->misc_cg) {
1823  ret = sev_misc_cg_try_charge(dst_sev);
1824  if (ret)
1825  goto out_dst_cgroup;
1826  charged = true;
1827  }
1828 
1830  if (ret)
1831  goto out_dst_cgroup;
1833  if (ret)
1834  goto out_dst_vcpu;
1835 
1836  ret = sev_check_source_vcpus(kvm, source_kvm);
1837  if (ret)
1838  goto out_source_vcpu;
1839 
1840  sev_migrate_from(kvm, source_kvm);
1841  kvm_vm_dead(source_kvm);
1842  cg_cleanup_sev = src_sev;
1843  ret = 0;
1844 
1845 out_source_vcpu:
1846  sev_unlock_vcpus_for_migration(source_kvm);
1847 out_dst_vcpu:
1849 out_dst_cgroup:
1850  /* Operates on the source on success, on the destination on failure. */
1851  if (charged)
1852  sev_misc_cg_uncharge(cg_cleanup_sev);
1853  put_misc_cg(cg_cleanup_sev->misc_cg);
1854  cg_cleanup_sev->misc_cg = NULL;
1855 out_unlock:
1856  sev_unlock_two_vms(kvm, source_kvm);
1857 out_fput:
1858  fdput(f);
1859  return ret;
1860 }
static int sev_lock_vcpus_for_migration(struct kvm *kvm, enum sev_migration_role role)
Definition: sev.c:1634
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
Definition: sev.c:1674
@ SEV_MIGRATION_TARGET
Definition: sev.c:1630
@ SEV_MIGRATION_SOURCE
Definition: sev.c:1629
static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
Definition: sev.c:139
static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
Definition: sev.c:133
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
Definition: sev.c:1691
static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
Definition: sev.c:1772
struct misc_cg * misc_cg
Definition: svm.h:91
Here is the call graph for this function:

◆ svm_allocate_nested()

int svm_allocate_nested ( struct vcpu_svm svm)

Definition at line 1177 of file nested.c.

1178 {
1179  struct page *vmcb02_page;
1180 
1181  if (svm->nested.initialized)
1182  return 0;
1183 
1184  vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1185  if (!vmcb02_page)
1186  return -ENOMEM;
1187  svm->nested.vmcb02.ptr = page_address(vmcb02_page);
1188  svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1189 
1191  if (!svm->nested.msrpm)
1192  goto err_free_vmcb02;
1193  svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
1194 
1195  svm->nested.initialized = true;
1196  return 0;
1197 
1198 err_free_vmcb02:
1199  __free_page(vmcb02_page);
1200  return -ENOMEM;
1201 }
unsigned long pa
Definition: svm.h:111
u32 * msrpm
Definition: svm.h:166
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
Definition: svm.c:881
u32 * svm_vcpu_alloc_msrpm(void)
Definition: svm.c:866
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_clr_intercept()

static void svm_clr_intercept ( struct vcpu_svm svm,
int  bit 
)
inlinestatic

Definition at line 432 of file svm.h.

433 {
434  struct vmcb *vmcb = svm->vmcb01.ptr;
435 
436  vmcb_clr_intercept(&vmcb->control, bit);
437 
438  recalc_intercepts(svm);
439 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_complete_interrupt_delivery()

void svm_complete_interrupt_delivery ( struct kvm_vcpu *  vcpu,
int  delivery_mode,
int  trig_mode,
int  vec 
)

Definition at line 3633 of file svm.c.

3635 {
3636  /*
3637  * apic->apicv_active must be read after vcpu->mode.
3638  * Pairs with smp_store_release in vcpu_enter_guest.
3639  */
3640  bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
3641 
3642  /* Note, this is called iff the local APIC is in-kernel. */
3643  if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
3644  /* Process the interrupt via kvm_check_and_inject_events(). */
3645  kvm_make_request(KVM_REQ_EVENT, vcpu);
3646  kvm_vcpu_kick(vcpu);
3647  return;
3648  }
3649 
3650  trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
3651  if (in_guest_mode) {
3652  /*
3653  * Signal the doorbell to tell hardware to inject the IRQ. If
3654  * the vCPU exits the guest before the doorbell chimes, hardware
3655  * will automatically process AVIC interrupts at the next VMRUN.
3656  */
3657  avic_ring_doorbell(vcpu);
3658  } else {
3659  /*
3660  * Wake the vCPU if it was blocking. KVM will then detect the
3661  * pending IRQ when checking if the vCPU has a wake event.
3662  */
3663  kvm_vcpu_wake_up(vcpu);
3664  }
3665 }
void avic_ring_doorbell(struct kvm_vcpu *vcpu)
Definition: avic.c:321
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3931
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_copy_lbrs()

void svm_copy_lbrs ( struct vmcb *  to_vmcb,
struct vmcb *  from_vmcb 
)

Definition at line 982 of file svm.c.

983 {
984  to_vmcb->save.dbgctl = from_vmcb->save.dbgctl;
985  to_vmcb->save.br_from = from_vmcb->save.br_from;
986  to_vmcb->save.br_to = from_vmcb->save.br_to;
987  to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from;
988  to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to;
989 
990  vmcb_mark_dirty(to_vmcb, VMCB_LBR);
991 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_copy_vmloadsave_state()

void svm_copy_vmloadsave_state ( struct vmcb *  to_vmcb,
struct vmcb *  from_vmcb 
)

Definition at line 951 of file nested.c.

952 {
953  to_vmcb->save.fs = from_vmcb->save.fs;
954  to_vmcb->save.gs = from_vmcb->save.gs;
955  to_vmcb->save.tr = from_vmcb->save.tr;
956  to_vmcb->save.ldtr = from_vmcb->save.ldtr;
957  to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
958  to_vmcb->save.star = from_vmcb->save.star;
959  to_vmcb->save.lstar = from_vmcb->save.lstar;
960  to_vmcb->save.cstar = from_vmcb->save.cstar;
961  to_vmcb->save.sfmask = from_vmcb->save.sfmask;
962  to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
963  to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
964  to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
965 }
Here is the caller graph for this function:

◆ svm_copy_vmrun_state()

void svm_copy_vmrun_state ( struct vmcb_save_area *  to_save,
struct vmcb_save_area *  from_save 
)

Definition at line 931 of file nested.c.

933 {
934  to_save->es = from_save->es;
935  to_save->cs = from_save->cs;
936  to_save->ss = from_save->ss;
937  to_save->ds = from_save->ds;
938  to_save->gdtr = from_save->gdtr;
939  to_save->idtr = from_save->idtr;
940  to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
941  to_save->efer = from_save->efer;
942  to_save->cr0 = from_save->cr0;
943  to_save->cr3 = from_save->cr3;
944  to_save->cr4 = from_save->cr4;
945  to_save->rax = from_save->rax;
946  to_save->rsp = from_save->rsp;
947  to_save->rip = from_save->rip;
948  to_save->cpl = 0;
949 }
Here is the caller graph for this function:

◆ svm_free_nested()

void svm_free_nested ( struct vcpu_svm svm)

Definition at line 1203 of file nested.c.

1204 {
1205  if (!svm->nested.initialized)
1206  return;
1207 
1208  if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
1209  svm_switch_vmcb(svm, &svm->vmcb01);
1210 
1212  svm->nested.msrpm = NULL;
1213 
1214  __free_page(virt_to_page(svm->nested.vmcb02.ptr));
1215  svm->nested.vmcb02.ptr = NULL;
1216 
1217  /*
1218  * When last_vmcb12_gpa matches the current vmcb12 gpa,
1219  * some vmcb12 fields are not loaded if they are marked clean
1220  * in the vmcb12, since in this case they are up to date already.
1221  *
1222  * When the vmcb02 is freed, this optimization becomes invalid.
1223  */
1224  svm->nested.last_vmcb12_gpa = INVALID_GPA;
1225 
1226  svm->nested.initialized = false;
1227 }
u64 last_vmcb12_gpa
Definition: svm.h:163
void svm_vcpu_free_msrpm(u32 *msrpm)
Definition: svm.c:915
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_interrupt_blocked()

bool svm_interrupt_blocked ( struct kvm_vcpu *  vcpu)

Definition at line 3767 of file svm.c.

3768 {
3769  struct vcpu_svm *svm = to_svm(vcpu);
3770  struct vmcb *vmcb = svm->vmcb;
3771 
3772  if (!gif_set(svm))
3773  return true;
3774 
3775  if (is_guest_mode(vcpu)) {
3776  /* As long as interrupts are being delivered... */
3777  if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
3778  ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
3779  : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3780  return true;
3781 
3782  /* ... vmexits aren't blocked by the interrupt shadow */
3783  if (nested_exit_on_intr(svm))
3784  return false;
3785  } else {
3786  if (!svm_get_if_flag(vcpu))
3787  return true;
3788  }
3789 
3790  return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3791 }
static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
Definition: svm.c:1603
static bool gif_set(struct vcpu_svm *svm)
Definition: svm.h:483
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_invoke_exit_handler()

int svm_invoke_exit_handler ( struct kvm_vcpu *  vcpu,
u64  exit_code 
)

Definition at line 3453 of file svm.c.

3454 {
3455  if (!svm_check_exit_valid(exit_code))
3456  return svm_handle_invalid_exit(vcpu, exit_code);
3457 
3458 #ifdef CONFIG_RETPOLINE
3459  if (exit_code == SVM_EXIT_MSR)
3460  return msr_interception(vcpu);
3461  else if (exit_code == SVM_EXIT_VINTR)
3462  return interrupt_window_interception(vcpu);
3463  else if (exit_code == SVM_EXIT_INTR)
3464  return intr_interception(vcpu);
3465  else if (exit_code == SVM_EXIT_HLT)
3466  return kvm_emulate_halt(vcpu);
3467  else if (exit_code == SVM_EXIT_NPF)
3468  return npf_interception(vcpu);
3469 #endif
3470  return svm_exit_handlers[exit_code](vcpu);
3471 }
static int msr_interception(struct kvm_vcpu *vcpu)
Definition: svm.c:3169
static int intr_interception(struct kvm_vcpu *vcpu)
Definition: svm.c:2246
static int npf_interception(struct kvm_vcpu *vcpu)
Definition: svm.c:2051
static int interrupt_window_interception(struct kvm_vcpu *vcpu)
Definition: svm.c:3177
static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
Definition: svm.c:3441
static bool svm_check_exit_valid(u64 exit_code)
Definition: svm.c:3435
static int(*const svm_exit_handlers[])(struct kvm_vcpu *vcpu)
Definition: svm.c:3238
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
Definition: x86.c:9857
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_is_intercept()

static bool svm_is_intercept ( struct vcpu_svm svm,
int  bit 
)
inlinestatic

Definition at line 441 of file svm.h.

442 {
443  return vmcb_is_intercept(&svm->vmcb->control, bit);
444 }
static bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
Definition: svm.h:391
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_leave_nested()

void svm_leave_nested ( struct kvm_vcpu *  vcpu)

Definition at line 1229 of file nested.c.

1230 {
1231  struct vcpu_svm *svm = to_svm(vcpu);
1232 
1233  if (is_guest_mode(vcpu)) {
1234  svm->nested.nested_run_pending = 0;
1235  svm->nested.vmcb12_gpa = INVALID_GPA;
1236 
1238 
1239  svm_switch_vmcb(svm, &svm->vmcb01);
1240 
1242  vmcb_mark_all_dirty(svm->vmcb);
1243 
1244  if (kvm_apicv_activated(vcpu->kvm))
1245  kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
1246  }
1247 
1248  kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1249 }
static void vmcb_mark_all_dirty(struct vmcb *vmcb)
Definition: svm.h:343
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_msrpm_offset()

u32 svm_msrpm_offset ( u32  msr)

Definition at line 264 of file svm.c.

265 {
266  u32 offset;
267  int i;
268 
269  for (i = 0; i < NUM_MSR_MAPS; i++) {
270  if (msr < msrpm_ranges[i] ||
271  msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
272  continue;
273 
274  offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
275  offset += (i * MSRS_RANGE_SIZE); /* add range offset */
276 
277  /* Now we have the u8 offset - but need the u32 offset */
278  return offset / 4;
279  }
280 
281  /* MSR not in any range */
282  return MSR_INVALID;
283 }
#define MSRS_RANGE_SIZE
Definition: svm.c:261
#define MSRS_IN_RANGE
Definition: svm.c:262
#define NUM_MSR_MAPS
Definition: svm.c:260
static const u32 msrpm_ranges[]
Definition: svm.c:258
#define MSR_INVALID
Definition: svm.h:535
Here is the caller graph for this function:

◆ svm_nmi_blocked()

bool svm_nmi_blocked ( struct kvm_vcpu *  vcpu)

Definition at line 3735 of file svm.c.

3736 {
3737  struct vcpu_svm *svm = to_svm(vcpu);
3738  struct vmcb *vmcb = svm->vmcb;
3739 
3740  if (!gif_set(svm))
3741  return true;
3742 
3743  if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3744  return false;
3745 
3746  if (svm_get_nmi_mask(vcpu))
3747  return true;
3748 
3749  return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK;
3750 }
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
Definition: svm.c:3706
static bool nested_exit_on_nmi(struct vcpu_svm *svm)
Definition: svm.h:586
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_set_cr0()

void svm_set_cr0 ( struct kvm_vcpu *  vcpu,
unsigned long  cr0 
)

Definition at line 1852 of file svm.c.

1853 {
1854  struct vcpu_svm *svm = to_svm(vcpu);
1855  u64 hcr0 = cr0;
1856  bool old_paging = is_paging(vcpu);
1857 
1858 #ifdef CONFIG_X86_64
1859  if (vcpu->arch.efer & EFER_LME) {
1860  if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1861  vcpu->arch.efer |= EFER_LMA;
1862  if (!vcpu->arch.guest_state_protected)
1863  svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1864  }
1865 
1866  if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1867  vcpu->arch.efer &= ~EFER_LMA;
1868  if (!vcpu->arch.guest_state_protected)
1869  svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1870  }
1871  }
1872 #endif
1873  vcpu->arch.cr0 = cr0;
1874 
1875  if (!npt_enabled) {
1876  hcr0 |= X86_CR0_PG | X86_CR0_WP;
1877  if (old_paging != is_paging(vcpu))
1879  }
1880 
1881  /*
1882  * re-enable caching here because the QEMU bios
1883  * does not do it - this results in some delay at
1884  * reboot
1885  */
1886  if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1887  hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1888 
1889  svm->vmcb->save.cr0 = hcr0;
1890  vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1891 
1892  /*
1893  * SEV-ES guests must always keep the CR intercepts cleared. CR
1894  * tracking is done using the CR write traps.
1895  */
1896  if (sev_es_guest(vcpu->kvm))
1897  return;
1898 
1899  if (hcr0 == cr0) {
1900  /* Selective CR0 write remains on. */
1901  svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1902  svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1903  } else {
1904  svm_set_intercept(svm, INTERCEPT_CR0_READ);
1905  svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1906  }
1907 }
static ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
static void svm_clr_intercept(struct vcpu_svm *svm, int bit)
Definition: svm.h:432
static void svm_set_intercept(struct vcpu_svm *svm, int bit)
Definition: svm.h:423
static bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
Definition: x86.h:288
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_set_cr4()

void svm_set_cr4 ( struct kvm_vcpu *  vcpu,
unsigned long  cr4 
)

Definition at line 1914 of file svm.c.

1915 {
1916  unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1917  unsigned long old_cr4 = vcpu->arch.cr4;
1918 
1919  if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1920  svm_flush_tlb_current(vcpu);
1921 
1922  vcpu->arch.cr4 = cr4;
1923  if (!npt_enabled) {
1924  cr4 |= X86_CR4_PAE;
1925 
1926  if (!is_paging(vcpu))
1927  cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
1928  }
1929  cr4 |= host_cr4_mce;
1930  to_svm(vcpu)->vmcb->save.cr4 = cr4;
1931  vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1932 
1933  if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1935 }
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
Definition: cpuid.c:309
static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
Definition: svm.c:3911
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_set_efer()

int svm_set_efer ( struct kvm_vcpu *  vcpu,
u64  efer 
)

Definition at line 296 of file svm.c.

297 {
298  struct vcpu_svm *svm = to_svm(vcpu);
299  u64 old_efer = vcpu->arch.efer;
300  vcpu->arch.efer = efer;
301 
302  if (!npt_enabled) {
303  /* Shadow paging assumes NX to be available. */
304  efer |= EFER_NX;
305 
306  if (!(efer & EFER_LMA))
307  efer &= ~EFER_LME;
308  }
309 
310  if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
311  if (!(efer & EFER_SVME)) {
313  svm_set_gif(svm, true);
314  /* #GP intercept is still needed for vmware backdoor */
316  clr_exception_intercept(svm, GP_VECTOR);
317 
318  /*
319  * Free the nested guest state, unless we are in SMM.
320  * In this case we will return to the nested guest
321  * as soon as we leave SMM.
322  */
323  if (!is_smm(vcpu))
324  svm_free_nested(svm);
325 
326  } else {
327  int ret = svm_allocate_nested(svm);
328 
329  if (ret) {
330  vcpu->arch.efer = old_efer;
331  return ret;
332  }
333 
334  /*
335  * Never intercept #GP for SEV guests, KVM can't
336  * decrypt guest memory to workaround the erratum.
337  */
339  set_exception_intercept(svm, GP_VECTOR);
340  }
341  }
342 
343  svm->vmcb->save.efer = efer | EFER_SVME;
345  return 0;
346 }
void svm_free_nested(struct vcpu_svm *svm)
Definition: nested.c:1203
int svm_allocate_nested(struct vcpu_svm *svm)
Definition: nested.c:1177
void svm_leave_nested(struct kvm_vcpu *vcpu)
Definition: nested.c:1229
static bool svm_gp_erratum_intercept
Definition: svm.c:241
static void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
Definition: svm.h:403
bool __read_mostly enable_vmware_backdoor
Definition: x86.c:176
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_set_gif()

void svm_set_gif ( struct vcpu_svm svm,
bool  value 
)

Definition at line 2406 of file svm.c.

2407 {
2408  if (value) {
2409  /*
2410  * If VGIF is enabled, the STGI intercept is only added to
2411  * detect the opening of the SMI/NMI window; remove it now.
2412  * Likewise, clear the VINTR intercept, we will set it
2413  * again while processing KVM_REQ_EVENT if needed.
2414  */
2415  if (vgif)
2416  svm_clr_intercept(svm, INTERCEPT_STGI);
2417  if (svm_is_intercept(svm, INTERCEPT_VINTR))
2418  svm_clear_vintr(svm);
2419 
2420  enable_gif(svm);
2421  if (svm->vcpu.arch.smi_pending ||
2422  svm->vcpu.arch.nmi_pending ||
2425  kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2426  } else {
2427  disable_gif(svm);
2428 
2429  /*
2430  * After a CLGI no interrupts should come. But if vGIF is
2431  * in use, we still rely on the VINTR intercept (rather than
2432  * STGI) to detect an open interrupt window.
2433  */
2434  if (!vgif)
2435  svm_clear_vintr(svm);
2436  }
2437 }
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
Definition: irq.c:82
static bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
Definition: lapic.h:231
int vgif
Definition: svm.c:214
static void svm_clear_vintr(struct vcpu_svm *svm)
Definition: svm.c:1663
static void disable_gif(struct vcpu_svm *svm)
Definition: svm.h:473
static void enable_gif(struct vcpu_svm *svm)
Definition: svm.h:463
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_set_intercept()

static void svm_set_intercept ( struct vcpu_svm svm,
int  bit 
)
inlinestatic

Definition at line 423 of file svm.h.

424 {
425  struct vmcb *vmcb = svm->vmcb01.ptr;
426 
427  vmcb_set_intercept(&vmcb->control, bit);
428 
429  recalc_intercepts(svm);
430 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_set_x2apic_msr_interception()

void svm_set_x2apic_msr_interception ( struct vcpu_svm svm,
bool  disable 
)

Definition at line 892 of file svm.c.

893 {
894  int i;
895 
896  if (intercept == svm->x2avic_msrs_intercepted)
897  return;
898 
899  if (!x2avic_enabled)
900  return;
901 
902  for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
903  int index = direct_access_msrs[i].index;
904 
905  if ((index < APIC_BASE_MSR) ||
906  (index > APIC_BASE_MSR + 0xff))
907  continue;
908  set_msr_interception(&svm->vcpu, svm->msrpm, index,
909  !intercept, !intercept);
910  }
911 
912  svm->x2avic_msrs_intercepted = intercept;
913 }
u32 * msrpm
Definition: svm.h:234
bool x2avic_msrs_intercepted
Definition: svm.h:291
static const struct svm_direct_access_msrs direct_access_msrs[MAX_DIRECT_ACCESS_MSRS]
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write)
Definition: svm.c:859
#define MAX_DIRECT_ACCESS_MSRS
Definition: svm.h:33
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_smi_blocked()

bool svm_smi_blocked ( struct kvm_vcpu *  vcpu)
Here is the caller graph for this function:

◆ svm_switch_vmcb()

void svm_switch_vmcb ( struct vcpu_svm svm,
struct kvm_vmcb_info target_vmcb 
)

Definition at line 1407 of file svm.c.

1408 {
1409  svm->current_vmcb = target_vmcb;
1410  svm->vmcb = target_vmcb->ptr;
1411 }
struct kvm_vmcb_info * current_vmcb
Definition: svm.h:214
Here is the caller graph for this function:

◆ svm_update_lbrv()

void svm_update_lbrv ( struct kvm_vcpu *  vcpu)

Definition at line 1037 of file svm.c.

1038 {
1039  struct vcpu_svm *svm = to_svm(vcpu);
1040  bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
1041  bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
1042  (is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1043  (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
1044 
1045  if (enable_lbrv == current_enable_lbrv)
1046  return;
1047 
1048  if (enable_lbrv)
1050  else
1052 }
static struct vmcb * svm_get_lbr_vmcb(struct vcpu_svm *svm)
Definition: svm.c:1026
static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
Definition: svm.c:1008
static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
Definition: svm.c:993
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_vcpu_alloc_msrpm()

u32* svm_vcpu_alloc_msrpm ( void  )

Definition at line 866 of file svm.c.

867 {
868  unsigned int order = get_order(MSRPM_SIZE);
869  struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
870  u32 *msrpm;
871 
872  if (!pages)
873  return NULL;
874 
875  msrpm = page_address(pages);
876  memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
877 
878  return msrpm;
879 }
#define MSRPM_SIZE
Definition: svm.h:31
Here is the caller graph for this function:

◆ svm_vcpu_free_msrpm()

void svm_vcpu_free_msrpm ( u32 *  msrpm)

Definition at line 915 of file svm.c.

916 {
917  __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
918 }
Here is the caller graph for this function:

◆ svm_vcpu_init_msrpm()

void svm_vcpu_init_msrpm ( struct kvm_vcpu *  vcpu,
u32 *  msrpm 
)

Definition at line 881 of file svm.c.

882 {
883  int i;
884 
885  for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
886  if (!direct_access_msrs[i].always)
887  continue;
888  set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
889  }
890 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ svm_write_tsc_multiplier()

void svm_write_tsc_multiplier ( struct kvm_vcpu *  vcpu)

Definition at line 1163 of file svm.c.

1164 {
1165  preempt_disable();
1166  if (to_svm(vcpu)->guest_state_loaded)
1167  __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1168  preempt_enable();
1169 }
static void __svm_write_tsc_multiplier(u64 multiplier)
Definition: svm.c:563
Here is the call graph for this function:
Here is the caller graph for this function:

◆ to_kvm_svm()

static __always_inline struct kvm_svm* to_kvm_svm ( struct kvm *  kvm)
static

Definition at line 316 of file svm.h.

317 {
318  return container_of(kvm, struct kvm_svm, kvm);
319 }
Here is the caller graph for this function:

◆ to_svm()

static __always_inline struct vcpu_svm* to_svm ( struct kvm_vcpu *  vcpu)
static

Definition at line 364 of file svm.h.

365 {
366  return container_of(vcpu, struct vcpu_svm, vcpu);
367 }

◆ vmcb12_is_intercept()

static bool vmcb12_is_intercept ( struct vmcb_ctrl_area_cached control,
u32  bit 
)
inlinestatic

Definition at line 397 of file svm.h.

398 {
399  WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
400  return test_bit(bit, (unsigned long *)&control->intercepts);
401 }
Here is the caller graph for this function:

◆ vmcb_clr_intercept()

static void vmcb_clr_intercept ( struct vmcb_control_area *  control,
u32  bit 
)
inlinestatic

Definition at line 385 of file svm.h.

386 {
387  WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
388  __clear_bit(bit, (unsigned long *)&control->intercepts);
389 }
Here is the caller graph for this function:

◆ vmcb_is_dirty()

static bool vmcb_is_dirty ( struct vmcb *  vmcb,
int  bit 
)
inlinestatic

Definition at line 359 of file svm.h.

360 {
361  return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
362 }
Here is the caller graph for this function:

◆ vmcb_is_intercept()

static bool vmcb_is_intercept ( struct vmcb_control_area *  control,
u32  bit 
)
inlinestatic

Definition at line 391 of file svm.h.

392 {
393  WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
394  return test_bit(bit, (unsigned long *)&control->intercepts);
395 }
Here is the caller graph for this function:

◆ vmcb_mark_all_clean()

static void vmcb_mark_all_clean ( struct vmcb *  vmcb)
inlinestatic

Definition at line 348 of file svm.h.

349 {
350  vmcb->control.clean = VMCB_ALL_CLEAN_MASK
352 }
#define VMCB_ALL_CLEAN_MASK
Definition: svm.h:68
#define VMCB_ALWAYS_DIRTY_MASK
Definition: svm.h:77
Here is the caller graph for this function:

◆ vmcb_mark_all_dirty()

static void vmcb_mark_all_dirty ( struct vmcb *  vmcb)
inlinestatic

Definition at line 343 of file svm.h.

344 {
345  vmcb->control.clean = 0;
346 }
Here is the caller graph for this function:

◆ vmcb_mark_dirty()

static void vmcb_mark_dirty ( struct vmcb *  vmcb,
int  bit 
)
inlinestatic

Definition at line 354 of file svm.h.

355 {
356  vmcb->control.clean &= ~(1 << bit);
357 }
Here is the caller graph for this function:

◆ vmcb_set_intercept()

static void vmcb_set_intercept ( struct vmcb_control_area *  control,
u32  bit 
)
inlinestatic

Definition at line 379 of file svm.h.

380 {
381  WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
382  __set_bit(bit, (unsigned long *)&control->intercepts);
383 }
Here is the caller graph for this function:

Variable Documentation

◆ __read_mostly

u32 msrpm_offsets [MSRPM_OFFSETS] __read_mostly
extern

Definition at line 36 of file cpuid.c.

◆ dump_invalid_vmcb

bool dump_invalid_vmcb
extern

Definition at line 231 of file svm.c.

◆ intercept_smi

bool intercept_smi
extern

Definition at line 235 of file svm.c.

◆ max_sev_asid

unsigned int max_sev_asid
extern

Definition at line 71 of file sev.c.

◆ npt_enabled

bool npt_enabled
extern

Definition at line 198 of file svm.c.

◆ nrips

int nrips
extern

Definition at line 206 of file svm.c.

◆ svm_nested_ops

struct kvm_x86_nested_ops svm_nested_ops
extern

Definition at line 1780 of file nested.c.

◆ vgif

int vgif
extern

Definition at line 214 of file svm.c.

◆ vnmi

bool vnmi
extern

Definition at line 238 of file svm.c.

◆ x2avic_enabled

bool x2avic_enabled
extern

Definition at line 75 of file avic.c.