18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
23 #include <asm/sev-common.h>
28 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
30 #define IOPM_SIZE PAGE_SIZE * 3
31 #define MSRPM_SIZE PAGE_SIZE * 2
33 #define MAX_DIRECT_ACCESS_MSRS 47
34 #define MSRPM_OFFSETS 32
68 #define VMCB_ALL_CLEAN_MASK ( \
69 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
70 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
71 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
72 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
73 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
77 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
102 struct hlist_node
hnode;
151 #if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV)
152 struct hv_vmcb_enlightenments hv_enlightenments;
210 struct kvm_vcpu
vcpu;
323 #ifdef CONFIG_KVM_AMD_SEV
334 #ifdef CONFIG_KVM_AMD_SEV
345 vmcb->control.clean = 0;
356 vmcb->control.clean &= ~(1 << bit);
361 return !test_bit(bit, (
unsigned long *)&vmcb->control.clean);
377 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
381 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
382 __set_bit(bit, (
unsigned long *)&
control->intercepts);
387 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
388 __clear_bit(bit, (
unsigned long *)&
control->intercepts);
393 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
394 return test_bit(bit, (
unsigned long *)&
control->intercepts);
399 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
400 return test_bit(bit, (
unsigned long *)&
control->intercepts);
407 WARN_ON_ONCE(bit >= 32);
417 WARN_ON_ONCE(bit >= 32);
468 vmcb->control.int_ctl |= V_GIF_MASK;
478 vmcb->control.int_ctl &= ~V_GIF_MASK;
488 return !!(vmcb->control.int_ctl & V_GIF_MASK);
507 u32 msr = offset * 16;
509 return (msr >= APIC_BASE_MSR) &&
510 (msr < (APIC_BASE_MSR + 0x100));
529 return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK);
535 #define MSR_INVALID 0xffffffffU
537 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
545 void svm_copy_lbrs(
struct vmcb *to_vmcb,
struct vmcb *from_vmcb);
549 void svm_set_cr0(
struct kvm_vcpu *vcpu,
unsigned long cr0);
550 void svm_set_cr4(
struct kvm_vcpu *vcpu,
unsigned long cr4);
558 int read,
int write);
561 int trig_mode,
int vec);
565 #define NESTED_EXIT_HOST 0
566 #define NESTED_EXIT_DONE 1
567 #define NESTED_EXIT_CONTINUE 2
592 u64 vmcb_gpa,
struct vmcb *vmcb12,
bool from_vmrun);
598 struct vmcb_save_area *from_save);
604 svm->
vmcb->control.exit_code = exit_code;
605 svm->
vmcb->control.exit_info_1 = 0;
606 svm->
vmcb->control.exit_info_2 = 0;
613 bool has_error_code, u32 error_code);
618 struct vmcb_control_area *
control);
620 struct vmcb_save_area *save);
628 #define AVIC_REQUIRED_APICV_INHIBITS \
630 BIT(APICV_INHIBIT_REASON_DISABLE) | \
631 BIT(APICV_INHIBIT_REASON_ABSENT) | \
632 BIT(APICV_INHIBIT_REASON_HYPERV) | \
633 BIT(APICV_INHIBIT_REASON_NESTED) | \
634 BIT(APICV_INHIBIT_REASON_IRQWIN) | \
635 BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \
636 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
637 BIT(APICV_INHIBIT_REASON_SEV) | \
638 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
639 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
640 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \
641 BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \
657 uint32_t guest_irq,
bool set);
667 #define GHCB_VERSION_MAX 1ULL
668 #define GHCB_VERSION_MIN 1ULL
676 struct kvm_enc_region *range);
678 struct kvm_enc_region *range);
703 #define DEFINE_KVM_GHCB_ACCESSORS(field) \
704 static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
706 return test_bit(GHCB_BITMAP_IDX(field), \
707 (unsigned long *)&svm->sev_es.valid_bitmap); \
710 static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
712 return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
unsigned long pages_locked
struct kvm * enc_context_owner
atomic_t migration_in_progress
struct list_head mirror_entry
struct list_head regions_list
struct list_head mirror_vms
struct kvm_sev_info sev_info
struct page * avic_logical_id_table_page
struct page * avic_physical_id_table_page
struct vmcb * current_vmcb
unsigned long save_area_pa
struct vmcb_ctrl_area_cached ctl
struct vmcb_save_area_cached save
struct kvm_vmcb_info vmcb02
bool force_msr_bitmap_recalc
struct sev_es_save_area * vmsa
struct kvm_host_map ghcb_map
struct kvm_vmcb_info * current_vmcb
struct page * avic_backing_page
unsigned long soft_int_old_rip
unsigned long soft_int_next_rip
unsigned long soft_int_csbase
u64 nmi_singlestep_guest_rflags
struct svm_nested_state nested
struct vcpu_svm::@33 shadow_msr_intercept
struct vcpu_sev_es_state sev_es
u64 * avic_physical_id_cache
bool awaiting_iret_completion
struct kvm_vmcb_info vmcb01
bool x2avic_msrs_intercepted
u32 intercepts[MAX_INTERCEPT]
static bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
struct kvm_x86_nested_ops svm_nested_ops
u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
void sev_hardware_unsetup(void)
int avic_vm_init(struct kvm *kvm)
static void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
static bool nested_exit_on_intr(struct vcpu_svm *svm)
static __always_inline bool sev_es_guest(struct kvm *kvm)
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
#define VMCB_ALL_CLEAN_MASK
u32 svm_msrpm_offset(u32 msr)
static __always_inline bool sev_guest(struct kvm *kvm)
bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
unsigned int max_sev_asid
static bool svm_is_intercept(struct vcpu_svm *svm, int bit)
int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set)
int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
void avic_vm_destroy(struct kvm *kvm)
void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
void avic_vcpu_put(struct kvm_vcpu *vcpu)
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
static struct vmcb * get_vnmi_vmcb_l1(struct vcpu_svm *svm)
static void svm_clr_intercept(struct vcpu_svm *svm, int bit)
int nested_svm_vmrun(struct kvm_vcpu *vcpu)
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
static void vmcb_mark_all_dirty(struct vmcb *vmcb)
void svm_set_gif(struct vcpu_svm *svm, bool value)
#define MAX_DIRECT_ACCESS_MSRS
static bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
static bool nested_exit_on_smi(struct vcpu_svm *svm)
static void vmcb_mark_all_clean(struct vmcb *vmcb)
static bool is_vnmi_enabled(struct vcpu_svm *svm)
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
void svm_vcpu_free_msrpm(u32 *msrpm)
void svm_free_nested(struct vcpu_svm *svm)
static struct vmcb * get_vgif_vmcb(struct vcpu_svm *svm)
static bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
static void disable_gif(struct vcpu_svm *svm)
int nested_svm_vmexit(struct vcpu_svm *svm)
int nested_svm_exit_handled(struct vcpu_svm *svm)
static bool is_x2apic_msrpm_offset(u32 offset)
static int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
DECLARE_PER_CPU(struct svm_cpu_data, svm_data)
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
void disable_nmi_singlestep(struct vcpu_svm *svm)
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
static __always_inline struct vcpu_svm * to_svm(struct kvm_vcpu *vcpu)
static void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code)
int sev_cpu_init(struct svm_cpu_data *sd)
bool svm_smi_blocked(struct kvm_vcpu *vcpu)
int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
int avic_init_vcpu(struct vcpu_svm *svm)
int sev_mem_enc_unregister_region(struct kvm *kvm, struct kvm_enc_region *range)
static bool nested_npt_enabled(struct vcpu_svm *svm)
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
#define VMCB_ALWAYS_DIRTY_MASK
void __init sev_hardware_setup(void)
void sev_es_vcpu_reset(struct vcpu_svm *svm)
int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
int svm_allocate_nested(struct vcpu_svm *svm)
u32 * svm_vcpu_alloc_msrpm(void)
int sev_mem_enc_register_region(struct kvm *kvm, struct kvm_enc_region *range)
static bool nested_vnmi_enabled(struct vcpu_svm *svm)
static void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted)
void sev_init_vmcb(struct vcpu_svm *svm)
int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted)
int nested_svm_exit_special(struct vcpu_svm *svm)
static __always_inline struct kvm_svm * to_kvm_svm(struct kvm *kvm)
void avic_ring_doorbell(struct kvm_vcpu *vcpu)
void pre_sev_run(struct vcpu_svm *svm, int cpu)
static void enable_gif(struct vcpu_svm *svm)
void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
void svm_update_lbrv(struct kvm_vcpu *vcpu)
static bool nested_vgif_enabled(struct vcpu_svm *svm)
#define DEFINE_KVM_GHCB_ACCESSORS(field)
void sev_guest_memory_reclaimed(struct kvm *kvm)
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
bool avic_hardware_setup(void)
void recalc_intercepts(struct vcpu_svm *svm)
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, struct vmcb_control_area *control)
void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable)
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, struct vmcb_save_area *save)
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write)
void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
static bool gif_set(struct vcpu_svm *svm)
void __init sev_set_cpu_caps(void)
void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu)
static bool nested_exit_on_nmi(struct vcpu_svm *svm)
void svm_copy_vmrun_state(struct vmcb_save_area *to_save, struct vmcb_save_area *from_save)
static void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
static bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
void sev_vm_destroy(struct kvm *kvm)
void sev_free_vcpu(struct kvm_vcpu *vcpu)
static void svm_set_intercept(struct vcpu_svm *svm, int bit)
int avic_ga_log_notifier(u32 ga_tag)
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun)
static void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
void svm_leave_nested(struct kvm_vcpu *vcpu)
void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode, int trig_mode, int vec)
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS)