KVM
Macros | Functions | Variables
arm.c File Reference
#include <linux/bug.h>
#include <linux/cpu_pm.h>
#include <linux/entry-kvm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/kvm.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
#include <linux/sched/stat.h>
#include <linux/psci.h>
#include <trace/events/kvm.h>
#include "trace_arm.h"
#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/mman.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/virt.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
#include <asm/kvm_pkvm.h>
#include <asm/kvm_emulate.h>
#include <asm/sections.h>
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_psci.h>
Include dependency graph for arm.c:

Go to the source code of this file.

Macros

#define CREATE_TRACE_POINTS
 
#define init_psci_0_1_impl_state(config, what)    config.psci_0_1_ ## what ## _implemented = psci_ops.what
 

Functions

 DECLARE_KVM_HYP_PER_CPU (unsigned long, kvm_hyp_vector)
 
 DEFINE_PER_CPU (unsigned long, kvm_arm_hyp_stack_page)
 
 DECLARE_KVM_NVHE_PER_CPU (struct kvm_nvhe_init_params, kvm_init_params)
 
 DECLARE_KVM_NVHE_PER_CPU (struct kvm_cpu_context, kvm_hyp_ctxt)
 
static DEFINE_PER_CPU (unsigned char, kvm_hyp_initialized)
 
 DEFINE_STATIC_KEY_FALSE (userspace_irqchip_in_use)
 
bool is_kvm_arm_initialised (void)
 
int kvm_arch_vcpu_should_kick (struct kvm_vcpu *vcpu)
 
int kvm_vm_ioctl_enable_cap (struct kvm *kvm, struct kvm_enable_cap *cap)
 
static int kvm_arm_default_max_vcpus (void)
 
int kvm_arch_init_vm (struct kvm *kvm, unsigned long type)
 
vm_fault_t kvm_arch_vcpu_fault (struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 
void kvm_arch_destroy_vm (struct kvm *kvm)
 
int kvm_vm_ioctl_check_extension (struct kvm *kvm, long ext)
 
long kvm_arch_dev_ioctl (struct file *filp, unsigned int ioctl, unsigned long arg)
 
struct kvm * kvm_arch_alloc_vm (void)
 
int kvm_arch_vcpu_precreate (struct kvm *kvm, unsigned int id)
 
int kvm_arch_vcpu_create (struct kvm_vcpu *vcpu)
 
void kvm_arch_vcpu_postcreate (struct kvm_vcpu *vcpu)
 
void kvm_arch_vcpu_destroy (struct kvm_vcpu *vcpu)
 
void kvm_arch_vcpu_blocking (struct kvm_vcpu *vcpu)
 
void kvm_arch_vcpu_unblocking (struct kvm_vcpu *vcpu)
 
void kvm_arch_vcpu_load (struct kvm_vcpu *vcpu, int cpu)
 
void kvm_arch_vcpu_put (struct kvm_vcpu *vcpu)
 
static void __kvm_arm_vcpu_power_off (struct kvm_vcpu *vcpu)
 
void kvm_arm_vcpu_power_off (struct kvm_vcpu *vcpu)
 
bool kvm_arm_vcpu_stopped (struct kvm_vcpu *vcpu)
 
static void kvm_arm_vcpu_suspend (struct kvm_vcpu *vcpu)
 
static bool kvm_arm_vcpu_suspended (struct kvm_vcpu *vcpu)
 
int kvm_arch_vcpu_ioctl_get_mpstate (struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)
 
int kvm_arch_vcpu_ioctl_set_mpstate (struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)
 
int kvm_arch_vcpu_runnable (struct kvm_vcpu *v)
 
bool kvm_arch_vcpu_in_kernel (struct kvm_vcpu *vcpu)
 
static int kvm_vcpu_initialized (struct kvm_vcpu *vcpu)
 
static void kvm_init_mpidr_data (struct kvm *kvm)
 
int kvm_arch_vcpu_run_pid_change (struct kvm_vcpu *vcpu)
 
bool kvm_arch_intc_initialized (struct kvm *kvm)
 
void kvm_arm_halt_guest (struct kvm *kvm)
 
void kvm_arm_resume_guest (struct kvm *kvm)
 
static void kvm_vcpu_sleep (struct kvm_vcpu *vcpu)
 
void kvm_vcpu_wfi (struct kvm_vcpu *vcpu)
 
static int kvm_vcpu_suspend (struct kvm_vcpu *vcpu)
 
static int check_vcpu_requests (struct kvm_vcpu *vcpu)
 
static bool vcpu_mode_is_bad_32bit (struct kvm_vcpu *vcpu)
 
static bool kvm_vcpu_exit_request (struct kvm_vcpu *vcpu, int *ret)
 
static int noinstr kvm_arm_vcpu_enter_exit (struct kvm_vcpu *vcpu)
 
int kvm_arch_vcpu_ioctl_run (struct kvm_vcpu *vcpu)
 
static int vcpu_interrupt_line (struct kvm_vcpu *vcpu, int number, bool level)
 
int kvm_vm_ioctl_irq_line (struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status)
 
static unsigned long system_supported_vcpu_features (void)
 
static int kvm_vcpu_init_check_features (struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
 
static bool kvm_vcpu_init_changed (struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
 
static int kvm_setup_vcpu (struct kvm_vcpu *vcpu)
 
static int __kvm_vcpu_set_target (struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
 
static int kvm_vcpu_set_target (struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
 
static int kvm_arch_vcpu_ioctl_vcpu_init (struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
 
static int kvm_arm_vcpu_set_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
static int kvm_arm_vcpu_get_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
static int kvm_arm_vcpu_has_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
static int kvm_arm_vcpu_get_events (struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
 
static int kvm_arm_vcpu_set_events (struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
 
long kvm_arch_vcpu_ioctl (struct file *filp, unsigned int ioctl, unsigned long arg)
 
void kvm_arch_sync_dirty_log (struct kvm *kvm, struct kvm_memory_slot *memslot)
 
static int kvm_vm_ioctl_set_device_addr (struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
 
static int kvm_vm_has_attr (struct kvm *kvm, struct kvm_device_attr *attr)
 
static int kvm_vm_set_attr (struct kvm *kvm, struct kvm_device_attr *attr)
 
int kvm_arch_vm_ioctl (struct file *filp, unsigned int ioctl, unsigned long arg)
 
static void unlock_vcpus (struct kvm *kvm, int vcpu_lock_idx)
 
void unlock_all_vcpus (struct kvm *kvm)
 
bool lock_all_vcpus (struct kvm *kvm)
 
static unsigned long nvhe_percpu_size (void)
 
static unsigned long nvhe_percpu_order (void)
 
static void kvm_init_vector_slot (void *base, enum arm64_hyp_spectre_vector slot)
 
static int kvm_init_vector_slots (void)
 
static void __init cpu_prepare_hyp_mode (int cpu, u32 hyp_va_bits)
 
static void hyp_install_host_vector (void)
 
static void cpu_init_hyp_mode (void)
 
static void cpu_hyp_reset (void)
 
static void cpu_set_hyp_vector (void)
 
static void cpu_hyp_init_context (void)
 
static void cpu_hyp_init_features (void)
 
static void cpu_hyp_reinit (void)
 
static void cpu_hyp_init (void *discard)
 
static void cpu_hyp_uninit (void *discard)
 
int kvm_arch_hardware_enable (void)
 
void kvm_arch_hardware_disable (void)
 
static void __init hyp_cpu_pm_init (void)
 
static void __init hyp_cpu_pm_exit (void)
 
static void __init init_cpu_logical_map (void)
 
static bool __init init_psci_relay (void)
 
static int __init init_subsystems (void)
 
static void __init teardown_subsystems (void)
 
static void __init teardown_hyp_mode (void)
 
static int __init do_pkvm_init (u32 hyp_va_bits)
 
static u64 get_hyp_id_aa64pfr0_el1 (void)
 
static void kvm_hyp_init_symbols (void)
 
static int __init kvm_hyp_init_protection (u32 hyp_va_bits)
 
static void pkvm_hyp_init_ptrauth (void)
 
static int __init init_hyp_mode (void)
 
struct kvm_vcpu * kvm_mpidr_to_vcpu (struct kvm *kvm, unsigned long mpidr)
 
bool kvm_arch_irqchip_in_kernel (struct kvm *kvm)
 
bool kvm_arch_has_irq_bypass (void)
 
int kvm_arch_irq_bypass_add_producer (struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod)
 
void kvm_arch_irq_bypass_del_producer (struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod)
 
void kvm_arch_irq_bypass_stop (struct irq_bypass_consumer *cons)
 
void kvm_arch_irq_bypass_start (struct irq_bypass_consumer *cons)
 
static __init int kvm_arm_init (void)
 
static int __init early_kvm_mode_cfg (char *arg)
 
 early_param ("kvm-arm.mode", early_kvm_mode_cfg)
 
enum kvm_mode kvm_get_mode (void)
 
 module_init (kvm_arm_init)
 

Variables

static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT
 
static bool vgic_present
 
static bool kvm_arm_initialised
 
static void * hyp_spectre_vector_selector [BP_HARDEN_EL2_SLOTS]
 

Macro Definition Documentation

◆ CREATE_TRACE_POINTS

#define CREATE_TRACE_POINTS

Definition at line 26 of file arm.c.

◆ init_psci_0_1_impl_state

#define init_psci_0_1_impl_state (   config,
  what 
)     config.psci_0_1_ ## what ## _implemented = psci_ops.what

Definition at line 2104 of file arm.c.

Function Documentation

◆ __kvm_arm_vcpu_power_off()

static void __kvm_arm_vcpu_power_off ( struct kvm_vcpu *  vcpu)
static

Definition at line 487 of file arm.c.

488 {
489  WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
490  kvm_make_request(KVM_REQ_SLEEP, vcpu);
491  kvm_vcpu_kick(vcpu);
492 }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3931
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __kvm_vcpu_set_target()

static int __kvm_vcpu_set_target ( struct kvm_vcpu *  vcpu,
const struct kvm_vcpu_init init 
)
static

Definition at line 1341 of file arm.c.

1343 {
1344  unsigned long features = init->features[0];
1345  struct kvm *kvm = vcpu->kvm;
1346  int ret = -EINVAL;
1347 
1348  mutex_lock(&kvm->arch.config_lock);
1349 
1350  if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) &&
1351  kvm_vcpu_init_changed(vcpu, init))
1352  goto out_unlock;
1353 
1354  bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
1355 
1356  ret = kvm_setup_vcpu(vcpu);
1357  if (ret)
1358  goto out_unlock;
1359 
1360  /* Now we know what it is, we can reset it. */
1361  kvm_reset_vcpu(vcpu);
1362 
1363  set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
1364  vcpu_set_flag(vcpu, VCPU_INITIALIZED);
1365  ret = 0;
1366 out_unlock:
1367  mutex_unlock(&kvm->arch.config_lock);
1368  return ret;
1369 }
static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
Definition: arm.c:1326
static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
Definition: arm.c:1317
void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
Definition: reset.c:191
Here is the call graph for this function:
Here is the caller graph for this function:

◆ check_vcpu_requests()

static int check_vcpu_requests ( struct kvm_vcpu *  vcpu)
static

check_vcpu_requests - check and handle pending vCPU requests @vcpu: the VCPU pointer

Return: 1 if we should enter the guest 0 if we should exit to userspace < 0 if we should exit to userspace, where the return value indicates an error

Definition at line 838 of file arm.c.

839 {
840  if (kvm_request_pending(vcpu)) {
841  if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
842  kvm_vcpu_sleep(vcpu);
843 
844  if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
845  kvm_reset_vcpu(vcpu);
846 
847  /*
848  * Clear IRQ_PENDING requests that were made to guarantee
849  * that a VCPU sees new virtual interrupts.
850  */
851  kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
852 
853  if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
855 
856  if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
857  /* The distributor enable bits were changed */
858  preempt_disable();
859  vgic_v4_put(vcpu);
860  vgic_v4_load(vcpu);
861  preempt_enable();
862  }
863 
864  if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
865  kvm_vcpu_reload_pmu(vcpu);
866 
867  if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
869 
870  if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
871  return kvm_vcpu_suspend(vcpu);
872 
874  return 0;
875  }
876 
877  return 1;
878 }
static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
Definition: arm.c:796
static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu)
Definition: arm.c:740
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
Definition: dirty_ring.c:194
void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:805
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
Definition: pmu.c:176
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
Definition: pvtime.c:13
int vgic_v4_load(struct kvm_vcpu *vcpu)
Definition: vgic-v4.c:349
int vgic_v4_put(struct kvm_vcpu *vcpu)
Definition: vgic-v4.c:339
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpu_hyp_init()

static void cpu_hyp_init ( void *  discard)
static

Definition at line 1988 of file arm.c.

1989 {
1990  if (!__this_cpu_read(kvm_hyp_initialized)) {
1991  cpu_hyp_reinit();
1992  __this_cpu_write(kvm_hyp_initialized, 1);
1993  }
1994 }
static void cpu_hyp_reinit(void)
Definition: arm.c:1981
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpu_hyp_init_context()

static void cpu_hyp_init_context ( void  )
static

Definition at line 1961 of file arm.c.

1962 {
1963  kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
1964 
1965  if (!is_kernel_in_hyp_mode())
1967 }
static void cpu_init_hyp_mode(void)
Definition: arm.c:1910
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpu_hyp_init_features()

static void cpu_hyp_init_features ( void  )
static

Definition at line 1969 of file arm.c.

1970 {
1973 
1974  if (is_kernel_in_hyp_mode())
1976 
1977  if (vgic_present)
1979 }
void kvm_timer_init_vhe(void)
Definition: arch_timer.c:1553
static void cpu_set_hyp_vector(void)
Definition: arm.c:1950
static bool vgic_present
Definition: arm.c:57
void kvm_arm_init_debug(void)
Definition: debug.c:78
void kvm_vgic_init_cpu_hardware(void)
Definition: vgic-init.c:544
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpu_hyp_reinit()

static void cpu_hyp_reinit ( void  )
static

Definition at line 1981 of file arm.c.

1982 {
1983  cpu_hyp_reset();
1986 }
static void cpu_hyp_init_features(void)
Definition: arm.c:1969
static void cpu_hyp_init_context(void)
Definition: arm.c:1961
static void cpu_hyp_reset(void)
Definition: arm.c:1924
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpu_hyp_reset()

static void cpu_hyp_reset ( void  )
static

Definition at line 1924 of file arm.c.

1925 {
1926  if (!is_kernel_in_hyp_mode())
1927  __hyp_reset_vectors();
1928 }
Here is the caller graph for this function:

◆ cpu_hyp_uninit()

static void cpu_hyp_uninit ( void *  discard)
static

Definition at line 1996 of file arm.c.

1997 {
1998  if (__this_cpu_read(kvm_hyp_initialized)) {
1999  cpu_hyp_reset();
2000  __this_cpu_write(kvm_hyp_initialized, 0);
2001  }
2002 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpu_init_hyp_mode()

static void cpu_init_hyp_mode ( void  )
static

Definition at line 1910 of file arm.c.

1911 {
1913 
1914  /*
1915  * Disabling SSBD on a non-VHE system requires us to enable SSBS
1916  * at EL2.
1917  */
1918  if (this_cpu_has_cap(ARM64_SSBS) &&
1919  arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
1920  kvm_call_hyp_nvhe(__kvm_enable_ssbs);
1921  }
1922 }
static void hyp_install_host_vector(void)
Definition: arm.c:1890
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpu_prepare_hyp_mode()

static void __init cpu_prepare_hyp_mode ( int  cpu,
u32  hyp_va_bits 
)
static

Definition at line 1842 of file arm.c.

1843 {
1844  struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
1845  u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
1846  unsigned long tcr;
1847 
1848  /*
1849  * Calculate the raw per-cpu offset without a translation from the
1850  * kernel's mapping to the linear mapping, and store it in tpidr_el2
1851  * so that we can use adr_l to access per-cpu variables in EL2.
1852  * Also drop the KASAN tag which gets in the way...
1853  */
1854  params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
1855  (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
1856 
1857  params->mair_el2 = read_sysreg(mair_el1);
1858 
1859  tcr = read_sysreg(tcr_el1);
1860  if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
1861  tcr |= TCR_EPD1_MASK;
1862  } else {
1863  tcr &= TCR_EL2_MASK;
1864  tcr |= TCR_EL2_RES1;
1865  }
1866  tcr &= ~TCR_T0SZ_MASK;
1867  tcr |= TCR_T0SZ(hyp_va_bits);
1868  tcr &= ~TCR_EL2_PS_MASK;
1869  tcr |= FIELD_PREP(TCR_EL2_PS_MASK, kvm_get_parange(mmfr0));
1870  if (kvm_lpa2_is_enabled())
1871  tcr |= TCR_EL2_DS;
1872  params->tcr_el2 = tcr;
1873 
1874  params->pgd_pa = kvm_mmu_get_httbr();
1875  if (is_protected_kvm_enabled())
1876  params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
1877  else
1878  params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
1879  if (cpus_have_final_cap(ARM64_KVM_HVHE))
1880  params->hcr_el2 |= HCR_E2H;
1881  params->vttbr = params->vtcr = 0;
1882 
1883  /*
1884  * Flush the init params from the data cache because the struct will
1885  * be read while the MMU is off.
1886  */
1887  kvm_flush_dcache_to_poc(params, sizeof(*params));
1888 }
phys_addr_t kvm_mmu_get_httbr(void)
Definition: mmu.c:1823
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpu_set_hyp_vector()

static void cpu_set_hyp_vector ( void  )
static

Definition at line 1950 of file arm.c.

1951 {
1952  struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
1953  void *vector = hyp_spectre_vector_selector[data->slot];
1954 
1955  if (!is_protected_kvm_enabled())
1956  *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
1957  else
1958  kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
1959 }
static void * hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]
Definition: arm.c:1811
Here is the caller graph for this function:

◆ DECLARE_KVM_HYP_PER_CPU()

DECLARE_KVM_HYP_PER_CPU ( unsigned long  ,
kvm_hyp_vector   
)

◆ DECLARE_KVM_NVHE_PER_CPU() [1/2]

DECLARE_KVM_NVHE_PER_CPU ( struct kvm_cpu_context  ,
kvm_hyp_ctxt   
)

◆ DECLARE_KVM_NVHE_PER_CPU() [2/2]

DECLARE_KVM_NVHE_PER_CPU ( struct kvm_nvhe_init_params  ,
kvm_init_params   
)

◆ DEFINE_PER_CPU() [1/2]

static DEFINE_PER_CPU ( unsigned char  ,
kvm_hyp_initialized   
)
static

◆ DEFINE_PER_CPU() [2/2]

DEFINE_PER_CPU ( unsigned long  ,
kvm_arm_hyp_stack_page   
)

◆ DEFINE_STATIC_KEY_FALSE()

DEFINE_STATIC_KEY_FALSE ( userspace_irqchip_in_use  )

◆ do_pkvm_init()

static int __init do_pkvm_init ( u32  hyp_va_bits)
static

Definition at line 2198 of file arm.c.

2199 {
2200  void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
2201  int ret;
2202 
2203  preempt_disable();
2205  ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
2206  num_possible_cpus(), kern_hyp_va(per_cpu_base),
2207  hyp_va_bits);
2209 
2210  /*
2211  * The stub hypercalls are now disabled, so set our local flag to
2212  * prevent a later re-init attempt in kvm_arch_hardware_enable().
2213  */
2214  __this_cpu_write(kvm_hyp_initialized, 1);
2215  preempt_enable();
2216 
2217  return ret;
2218 }
unsigned long __ro_after_init kvm_arm_hyp_percpu_base[NR_CPUS]
Definition: hyp-smp.c:26
phys_addr_t hyp_mem_size
Definition: pkvm.c:24
phys_addr_t hyp_mem_base
Definition: pkvm.c:23
int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus, unsigned long *per_cpu_base, u32 hyp_va_bits)
Definition: setup.c:314
Here is the call graph for this function:
Here is the caller graph for this function:

◆ early_kvm_mode_cfg()

static int __init early_kvm_mode_cfg ( char *  arg)
static

Definition at line 2619 of file arm.c.

2620 {
2621  if (!arg)
2622  return -EINVAL;
2623 
2624  if (strcmp(arg, "none") == 0) {
2625  kvm_mode = KVM_MODE_NONE;
2626  return 0;
2627  }
2628 
2629  if (!is_hyp_mode_available()) {
2630  pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n");
2631  return 0;
2632  }
2633 
2634  if (strcmp(arg, "protected") == 0) {
2635  if (!is_kernel_in_hyp_mode())
2636  kvm_mode = KVM_MODE_PROTECTED;
2637  else
2638  pr_warn_once("Protected KVM not available with VHE\n");
2639 
2640  return 0;
2641  }
2642 
2643  if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
2644  kvm_mode = KVM_MODE_DEFAULT;
2645  return 0;
2646  }
2647 
2648  if (strcmp(arg, "nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) {
2649  kvm_mode = KVM_MODE_NV;
2650  return 0;
2651  }
2652 
2653  return -EINVAL;
2654 }
static enum kvm_mode kvm_mode
Definition: arm.c:48

◆ early_param()

early_param ( "kvm-arm.mode"  ,
early_kvm_mode_cfg   
)

◆ get_hyp_id_aa64pfr0_el1()

static u64 get_hyp_id_aa64pfr0_el1 ( void  )
static

Definition at line 2220 of file arm.c.

2221 {
2222  /*
2223  * Track whether the system isn't affected by spectre/meltdown in the
2224  * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
2225  * Although this is per-CPU, we make it global for simplicity, e.g., not
2226  * to have to worry about vcpu migration.
2227  *
2228  * Unlike for non-protected VMs, userspace cannot override this for
2229  * protected VMs.
2230  */
2231  u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2232 
2233  val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
2234  ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
2235 
2236  val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
2237  arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
2238  val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
2239  arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);
2240 
2241  return val;
2242 }
Here is the caller graph for this function:

◆ hyp_cpu_pm_exit()

static void __init hyp_cpu_pm_exit ( void  )
inlinestatic

Definition at line 2085 of file arm.c.

2086 {
2087 }
Here is the caller graph for this function:

◆ hyp_cpu_pm_init()

static void __init hyp_cpu_pm_init ( void  )
inlinestatic

Definition at line 2082 of file arm.c.

2083 {
2084 }
Here is the caller graph for this function:

◆ hyp_install_host_vector()

static void hyp_install_host_vector ( void  )
static

Definition at line 1890 of file arm.c.

1891 {
1892  struct kvm_nvhe_init_params *params;
1893  struct arm_smccc_res res;
1894 
1895  /* Switch from the HYP stub to our own HYP init vector */
1896  __hyp_set_vectors(kvm_get_idmap_vector());
1897 
1898  /*
1899  * Call initialization code, and switch to the full blown HYP code.
1900  * If the cpucaps haven't been finalized yet, something has gone very
1901  * wrong, and hyp will crash and burn when it uses any
1902  * cpus_have_*_cap() wrapper.
1903  */
1904  BUG_ON(!system_capabilities_finalized());
1905  params = this_cpu_ptr_nvhe_sym(kvm_init_params);
1906  arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
1907  WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
1908 }
phys_addr_t kvm_get_idmap_vector(void)
Definition: mmu.c:1828
Here is the call graph for this function:
Here is the caller graph for this function:

◆ init_cpu_logical_map()

static void __init init_cpu_logical_map ( void  )
static

Definition at line 2090 of file arm.c.

2091 {
2092  unsigned int cpu;
2093 
2094  /*
2095  * Copy the MPIDR <-> logical CPU ID mapping to hyp.
2096  * Only copy the set of online CPUs whose features have been checked
2097  * against the finalized system capabilities. The hypervisor will not
2098  * allow any other CPUs from the `possible` set to boot.
2099  */
2100  for_each_online_cpu(cpu)
2102 }
u64 cpu_logical_map(unsigned int cpu)
Definition: hyp-smp.c:19
u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS]
Definition: hyp-smp.c:17
Here is the call graph for this function:
Here is the caller graph for this function:

◆ init_hyp_mode()

static int __init init_hyp_mode ( void  )
static

Definition at line 2298 of file arm.c.

2299 {
2300  u32 hyp_va_bits;
2301  int cpu;
2302  int err = -ENOMEM;
2303 
2304  /*
2305  * The protected Hyp-mode cannot be initialized if the memory pool
2306  * allocation has failed.
2307  */
2308  if (is_protected_kvm_enabled() && !hyp_mem_base)
2309  goto out_err;
2310 
2311  /*
2312  * Allocate Hyp PGD and setup Hyp identity mapping
2313  */
2314  err = kvm_mmu_init(&hyp_va_bits);
2315  if (err)
2316  goto out_err;
2317 
2318  /*
2319  * Allocate stack pages for Hypervisor-mode
2320  */
2321  for_each_possible_cpu(cpu) {
2322  unsigned long stack_page;
2323 
2324  stack_page = __get_free_page(GFP_KERNEL);
2325  if (!stack_page) {
2326  err = -ENOMEM;
2327  goto out_err;
2328  }
2329 
2330  per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
2331  }
2332 
2333  /*
2334  * Allocate and initialize pages for Hypervisor-mode percpu regions.
2335  */
2336  for_each_possible_cpu(cpu) {
2337  struct page *page;
2338  void *page_addr;
2339 
2340  page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
2341  if (!page) {
2342  err = -ENOMEM;
2343  goto out_err;
2344  }
2345 
2346  page_addr = page_address(page);
2347  memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
2348  kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
2349  }
2350 
2351  /*
2352  * Map the Hyp-code called directly from the host
2353  */
2354  err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
2355  kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
2356  if (err) {
2357  kvm_err("Cannot map world-switch code\n");
2358  goto out_err;
2359  }
2360 
2361  err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
2362  kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
2363  if (err) {
2364  kvm_err("Cannot map .hyp.rodata section\n");
2365  goto out_err;
2366  }
2367 
2368  err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
2369  kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
2370  if (err) {
2371  kvm_err("Cannot map rodata section\n");
2372  goto out_err;
2373  }
2374 
2375  /*
2376  * .hyp.bss is guaranteed to be placed at the beginning of the .bss
2377  * section thanks to an assertion in the linker script. Map it RW and
2378  * the rest of .bss RO.
2379  */
2380  err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start),
2381  kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
2382  if (err) {
2383  kvm_err("Cannot map hyp bss section: %d\n", err);
2384  goto out_err;
2385  }
2386 
2387  err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end),
2388  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
2389  if (err) {
2390  kvm_err("Cannot map bss section\n");
2391  goto out_err;
2392  }
2393 
2394  /*
2395  * Map the Hyp stack pages
2396  */
2397  for_each_possible_cpu(cpu) {
2398  struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
2399  char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
2400 
2401  err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
2402  if (err) {
2403  kvm_err("Cannot map hyp stack\n");
2404  goto out_err;
2405  }
2406 
2407  /*
2408  * Save the stack PA in nvhe_init_params. This will be needed
2409  * to recreate the stack mapping in protected nVHE mode.
2410  * __hyp_pa() won't do the right thing there, since the stack
2411  * has been mapped in the flexible private VA space.
2412  */
2413  params->stack_pa = __pa(stack_page);
2414  }
2415 
2416  for_each_possible_cpu(cpu) {
2417  char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
2418  char *percpu_end = percpu_begin + nvhe_percpu_size();
2419 
2420  /* Map Hyp percpu pages */
2421  err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
2422  if (err) {
2423  kvm_err("Cannot map hyp percpu region\n");
2424  goto out_err;
2425  }
2426 
2427  /* Prepare the CPU initialization parameters */
2428  cpu_prepare_hyp_mode(cpu, hyp_va_bits);
2429  }
2430 
2432 
2433  if (is_protected_kvm_enabled()) {
2434  if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) &&
2435  cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH))
2437 
2439 
2440  if (!init_psci_relay()) {
2441  err = -ENODEV;
2442  goto out_err;
2443  }
2444 
2445  err = kvm_hyp_init_protection(hyp_va_bits);
2446  if (err) {
2447  kvm_err("Failed to init hyp memory protection\n");
2448  goto out_err;
2449  }
2450  }
2451 
2452  return 0;
2453 
2454 out_err:
2456  kvm_err("error initializing Hyp mode: %d\n", err);
2457  return err;
2458 }
static bool __init init_psci_relay(void)
Definition: arm.c:2107
static int __init kvm_hyp_init_protection(u32 hyp_va_bits)
Definition: arm.c:2259
static void kvm_hyp_init_symbols(void)
Definition: arm.c:2244
static unsigned long nvhe_percpu_order(void)
Definition: arm.c:1803
static void __init init_cpu_logical_map(void)
Definition: arm.c:2090
static unsigned long nvhe_percpu_size(void)
Definition: arm.c:1797
static void __init teardown_hyp_mode(void)
Definition: arm.c:2187
static void pkvm_hyp_init_ptrauth(void)
Definition: arm.c:2277
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
Definition: arm.c:1842
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
Definition: mmu.c:574
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
Definition: mmu.c:691
int __init kvm_mmu_init(u32 *hyp_va_bits)
Definition: mmu.c:1858
Here is the call graph for this function:
Here is the caller graph for this function:

◆ init_psci_relay()

static bool __init init_psci_relay ( void  )
static

Definition at line 2107 of file arm.c.

2108 {
2109  /*
2110  * If PSCI has not been initialized, protected KVM cannot install
2111  * itself on newly booted CPUs.
2112  */
2113  if (!psci_ops.get_version) {
2114  kvm_err("Cannot initialize protected mode without PSCI\n");
2115  return false;
2116  }
2117 
2118  kvm_host_psci_config.version = psci_ops.get_version();
2119  kvm_host_psci_config.smccc_version = arm_smccc_get_version();
2120 
2121  if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
2122  kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
2127  }
2128  return true;
2129 }
#define init_psci_0_1_impl_state(config, what)
Definition: arm.c:2104
struct kvm_host_psci_config __ro_after_init kvm_host_psci_config
Definition: psci-relay.c:23
Here is the caller graph for this function:

◆ init_subsystems()

static int __init init_subsystems ( void  )
static

Definition at line 2131 of file arm.c.

2132 {
2133  int err = 0;
2134 
2135  /*
2136  * Enable hardware so that subsystem initialisation can access EL2.
2137  */
2138  on_each_cpu(cpu_hyp_init, NULL, 1);
2139 
2140  /*
2141  * Register CPU lower-power notifier
2142  */
2143  hyp_cpu_pm_init();
2144 
2145  /*
2146  * Init HYP view of VGIC
2147  */
2148  err = kvm_vgic_hyp_init();
2149  switch (err) {
2150  case 0:
2151  vgic_present = true;
2152  break;
2153  case -ENODEV:
2154  case -ENXIO:
2155  vgic_present = false;
2156  err = 0;
2157  break;
2158  default:
2159  goto out;
2160  }
2161 
2162  /*
2163  * Init HYP architected timer support
2164  */
2166  if (err)
2167  goto out;
2168 
2169  kvm_register_perf_callbacks(NULL);
2170 
2171 out:
2172  if (err)
2173  hyp_cpu_pm_exit();
2174 
2175  if (err || !is_protected_kvm_enabled())
2176  on_each_cpu(cpu_hyp_uninit, NULL, 1);
2177 
2178  return err;
2179 }
int __init kvm_timer_hyp_init(bool has_gic)
Definition: arch_timer.c:1367
static void cpu_hyp_init(void *discard)
Definition: arm.c:1988
static void cpu_hyp_uninit(void *discard)
Definition: arm.c:1996
static void __init hyp_cpu_pm_exit(void)
Definition: arm.c:2085
static void __init hyp_cpu_pm_init(void)
Definition: arm.c:2082
int kvm_vgic_hyp_init(void)
Definition: vgic-init.c:564
Here is the call graph for this function:
Here is the caller graph for this function:

◆ is_kvm_arm_initialised()

bool is_kvm_arm_initialised ( void  )

Definition at line 62 of file arm.c.

63 {
64  return kvm_arm_initialised;
65 }
static bool kvm_arm_initialised
Definition: arm.c:57
Here is the caller graph for this function:

◆ kvm_arch_alloc_vm()

struct kvm* kvm_arch_alloc_vm ( void  )

Definition at line 336 of file arm.c.

337 {
338  size_t sz = sizeof(struct kvm);
339 
340  if (!has_vhe())
341  return kzalloc(sz, GFP_KERNEL_ACCOUNT);
342 
343  return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO);
344 }
Here is the caller graph for this function:

◆ kvm_arch_destroy_vm()

void kvm_arch_destroy_vm ( struct kvm *  kvm)

kvm_arch_destroy_vm - destroy the VM data structure @kvm: pointer to the KVM struct

Definition at line 198 of file arm.c.

199 {
200  bitmap_free(kvm->arch.pmu_filter);
201  free_cpumask_var(kvm->arch.supported_cpus);
202 
203  kvm_vgic_destroy(kvm);
204 
205  if (is_protected_kvm_enabled())
206  pkvm_destroy_hyp_vm(kvm);
207 
208  kfree(kvm->arch.mpidr_data);
209  kvm_destroy_vcpus(kvm);
210 
211  kvm_unshare_hyp(kvm, kvm + 1);
212 
214 }
void kvm_arm_teardown_hypercalls(struct kvm *kvm)
Definition: hypercalls.c:403
void kvm_destroy_vcpus(struct kvm *kvm)
Definition: kvm_main.c:522
void kvm_unshare_hyp(void *from, void *to)
Definition: mmu.c:548
void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
Definition: pkvm.c:216
void kvm_vgic_destroy(struct kvm *kvm)
Definition: vgic-init.c:397
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_dev_ioctl()

long kvm_arch_dev_ioctl ( struct file *  filp,
unsigned int  ioctl,
unsigned long  arg 
)

Definition at line 330 of file arm.c.

332 {
333  return -EINVAL;
334 }

◆ kvm_arch_hardware_disable()

void kvm_arch_hardware_disable ( void  )

Definition at line 2024 of file arm.c.

2025 {
2028 
2029  if (!is_protected_kvm_enabled())
2030  cpu_hyp_uninit(NULL);
2031 }
void kvm_timer_cpu_down(void)
Definition: arch_timer.c:1041
void kvm_vgic_cpu_down(void)
Definition: vgic-init.c:513
Here is the call graph for this function:

◆ kvm_arch_hardware_enable()

int kvm_arch_hardware_enable ( void  )

Definition at line 2004 of file arm.c.

2005 {
2006  /*
2007  * Most calls to this function are made with migration
2008  * disabled, but not with preemption disabled. The former is
2009  * enough to ensure correctness, but most of the helpers
2010  * expect the later and will throw a tantrum otherwise.
2011  */
2012  preempt_disable();
2013 
2014  cpu_hyp_init(NULL);
2015 
2016  kvm_vgic_cpu_up();
2017  kvm_timer_cpu_up();
2018 
2019  preempt_enable();
2020 
2021  return 0;
2022 }
void kvm_timer_cpu_up(void)
Definition: arch_timer.c:1034
void kvm_vgic_cpu_up(void)
Definition: vgic-init.c:507
Here is the call graph for this function:

◆ kvm_arch_has_irq_bypass()

bool kvm_arch_has_irq_bypass ( void  )

Definition at line 2490 of file arm.c.

2491 {
2492  return true;
2493 }

◆ kvm_arch_init_vm()

int kvm_arch_init_vm ( struct kvm *  kvm,
unsigned long  type 
)

kvm_arch_init_vm - initializes a VM data structure @kvm: pointer to the KVM struct

Definition at line 136 of file arm.c.

137 {
138  int ret;
139 
140  mutex_init(&kvm->arch.config_lock);
141 
142 #ifdef CONFIG_LOCKDEP
143  /* Clue in lockdep that the config_lock must be taken inside kvm->lock */
144  mutex_lock(&kvm->lock);
145  mutex_lock(&kvm->arch.config_lock);
146  mutex_unlock(&kvm->arch.config_lock);
147  mutex_unlock(&kvm->lock);
148 #endif
149 
150  ret = kvm_share_hyp(kvm, kvm + 1);
151  if (ret)
152  return ret;
153 
154  ret = pkvm_init_host_vm(kvm);
155  if (ret)
156  goto err_unshare_kvm;
157 
158  if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) {
159  ret = -ENOMEM;
160  goto err_unshare_kvm;
161  }
162  cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask);
163 
164  ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type);
165  if (ret)
166  goto err_free_cpumask;
167 
168  kvm_vgic_early_init(kvm);
169 
170  kvm_timer_init_vm(kvm);
171 
172  /* The maximum number of VCPUs is limited by the host's GIC model */
173  kvm->max_vcpus = kvm_arm_default_max_vcpus();
174 
176 
177  bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
178 
179  return 0;
180 
181 err_free_cpumask:
182  free_cpumask_var(kvm->arch.supported_cpus);
183 err_unshare_kvm:
184  kvm_unshare_hyp(kvm, kvm + 1);
185  return ret;
186 }
void kvm_timer_init_vm(struct kvm *kvm)
Definition: arch_timer.c:1028
static int kvm_arm_default_max_vcpus(void)
Definition: arm.c:127
void kvm_arm_init_hypercalls(struct kvm *kvm)
Definition: hypercalls.c:392
int kvm_share_hyp(void *from, void *to)
Definition: mmu.c:516
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type)
Definition: mmu.c:868
int pkvm_init_host_vm(struct kvm *host_kvm)
Definition: pkvm.c:223
void kvm_vgic_early_init(struct kvm *kvm)
Definition: vgic-init.c:52
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_intc_initialized()

bool kvm_arch_intc_initialized ( struct kvm *  kvm)

Definition at line 714 of file arm.c.

715 {
716  return vgic_initialized(kvm);
717 }
#define vgic_initialized(k)
Definition: arm_vgic.h:393

◆ kvm_arch_irq_bypass_add_producer()

int kvm_arch_irq_bypass_add_producer ( struct irq_bypass_consumer *  cons,
struct irq_bypass_producer *  prod 
)

Definition at line 2495 of file arm.c.

2497 {
2498  struct kvm_kernel_irqfd *irqfd =
2499  container_of(cons, struct kvm_kernel_irqfd, consumer);
2500 
2501  return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
2502  &irqfd->irq_entry);
2503 }
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, struct kvm_kernel_irq_routing_entry *irq_entry)
Definition: vgic-v4.c:411
Here is the call graph for this function:

◆ kvm_arch_irq_bypass_del_producer()

void kvm_arch_irq_bypass_del_producer ( struct irq_bypass_consumer *  cons,
struct irq_bypass_producer *  prod 
)

Definition at line 2504 of file arm.c.

2506 {
2507  struct kvm_kernel_irqfd *irqfd =
2508  container_of(cons, struct kvm_kernel_irqfd, consumer);
2509 
2510  kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
2511  &irqfd->irq_entry);
2512 }
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, struct kvm_kernel_irq_routing_entry *irq_entry)
Definition: vgic-v4.c:490
Here is the call graph for this function:

◆ kvm_arch_irq_bypass_start()

void kvm_arch_irq_bypass_start ( struct irq_bypass_consumer *  cons)

Definition at line 2522 of file arm.c.

2523 {
2524  struct kvm_kernel_irqfd *irqfd =
2525  container_of(cons, struct kvm_kernel_irqfd, consumer);
2526 
2527  kvm_arm_resume_guest(irqfd->kvm);
2528 }
void kvm_arm_resume_guest(struct kvm *kvm)
Definition: arm.c:729
Here is the call graph for this function:

◆ kvm_arch_irq_bypass_stop()

void kvm_arch_irq_bypass_stop ( struct irq_bypass_consumer *  cons)

Definition at line 2514 of file arm.c.

2515 {
2516  struct kvm_kernel_irqfd *irqfd =
2517  container_of(cons, struct kvm_kernel_irqfd, consumer);
2518 
2519  kvm_arm_halt_guest(irqfd->kvm);
2520 }
void kvm_arm_halt_guest(struct kvm *kvm)
Definition: arm.c:719
Here is the call graph for this function:

◆ kvm_arch_irqchip_in_kernel()

bool kvm_arch_irqchip_in_kernel ( struct kvm *  kvm)

Definition at line 2485 of file arm.c.

2486 {
2487  return irqchip_in_kernel(kvm);
2488 }
#define irqchip_in_kernel(k)
Definition: arm_vgic.h:392
Here is the caller graph for this function:

◆ kvm_arch_sync_dirty_log()

void kvm_arch_sync_dirty_log ( struct kvm *  kvm,
struct kvm_memory_slot *  memslot 
)

Definition at line 1645 of file arm.c.

1646 {
1647 
1648 }
Here is the caller graph for this function:

◆ kvm_arch_vcpu_blocking()

void kvm_arch_vcpu_blocking ( struct kvm_vcpu *  vcpu)

Definition at line 416 of file arm.c.

417 {
418 
419 }
Here is the caller graph for this function:

◆ kvm_arch_vcpu_create()

int kvm_arch_vcpu_create ( struct kvm_vcpu *  vcpu)

Definition at line 357 of file arm.c.

358 {
359  int err;
360 
361  spin_lock_init(&vcpu->arch.mp_state_lock);
362 
363 #ifdef CONFIG_LOCKDEP
364  /* Inform lockdep that the config_lock is acquired after vcpu->mutex */
365  mutex_lock(&vcpu->mutex);
366  mutex_lock(&vcpu->kvm->arch.config_lock);
367  mutex_unlock(&vcpu->kvm->arch.config_lock);
368  mutex_unlock(&vcpu->mutex);
369 #endif
370 
371  /* Force users to call KVM_ARM_VCPU_INIT */
372  vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
373 
374  vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
375 
376  /*
377  * Default value for the FP state, will be overloaded at load
378  * time if we support FP (pretty likely)
379  */
380  vcpu->arch.fp_state = FP_STATE_FREE;
381 
382  /* Set up the timer */
383  kvm_timer_vcpu_init(vcpu);
384 
385  kvm_pmu_vcpu_init(vcpu);
386 
388 
389  kvm_arm_pvtime_vcpu_init(&vcpu->arch);
390 
391  vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
392 
393  err = kvm_vgic_vcpu_init(vcpu);
394  if (err)
395  return err;
396 
397  return kvm_share_hyp(vcpu, vcpu + 1);
398 }
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:1011
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
Definition: debug.c:148
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:231
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
Definition: vgic-init.c:194
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_destroy()

void kvm_arch_vcpu_destroy ( struct kvm_vcpu *  vcpu)

Definition at line 404 of file arm.c.

405 {
406  if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
407  static_branch_dec(&userspace_irqchip_in_use);
408 
409  kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
411  kvm_pmu_vcpu_destroy(vcpu);
412  kvm_vgic_vcpu_destroy(vcpu);
413  kvm_arm_vcpu_destroy(vcpu);
414 }
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:1445
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:259
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
Definition: reset.c:150
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
Definition: vgic-init.c:388
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_fault()

vm_fault_t kvm_arch_vcpu_fault ( struct kvm_vcpu *  vcpu,
struct vm_fault *  vmf 
)

Definition at line 188 of file arm.c.

189 {
190  return VM_FAULT_SIGBUS;
191 }
Here is the caller graph for this function:

◆ kvm_arch_vcpu_in_kernel()

bool kvm_arch_vcpu_in_kernel ( struct kvm_vcpu *  vcpu)

Definition at line 566 of file arm.c.

567 {
568  return vcpu_mode_priv(vcpu);
569 }
Here is the caller graph for this function:

◆ kvm_arch_vcpu_ioctl()

long kvm_arch_vcpu_ioctl ( struct file *  filp,
unsigned int  ioctl,
unsigned long  arg 
)

Definition at line 1516 of file arm.c.

1518 {
1519  struct kvm_vcpu *vcpu = filp->private_data;
1520  void __user *argp = (void __user *)arg;
1521  struct kvm_device_attr attr;
1522  long r;
1523 
1524  switch (ioctl) {
1525  case KVM_ARM_VCPU_INIT: {
1526  struct kvm_vcpu_init init;
1527 
1528  r = -EFAULT;
1529  if (copy_from_user(&init, argp, sizeof(init)))
1530  break;
1531 
1532  r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
1533  break;
1534  }
1535  case KVM_SET_ONE_REG:
1536  case KVM_GET_ONE_REG: {
1537  struct kvm_one_reg reg;
1538 
1539  r = -ENOEXEC;
1540  if (unlikely(!kvm_vcpu_initialized(vcpu)))
1541  break;
1542 
1543  r = -EFAULT;
1544  if (copy_from_user(&reg, argp, sizeof(reg)))
1545  break;
1546 
1547  /*
1548  * We could owe a reset due to PSCI. Handle the pending reset
1549  * here to ensure userspace register accesses are ordered after
1550  * the reset.
1551  */
1552  if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1553  kvm_reset_vcpu(vcpu);
1554 
1555  if (ioctl == KVM_SET_ONE_REG)
1556  r = kvm_arm_set_reg(vcpu, &reg);
1557  else
1558  r = kvm_arm_get_reg(vcpu, &reg);
1559  break;
1560  }
1561  case KVM_GET_REG_LIST: {
1562  struct kvm_reg_list __user *user_list = argp;
1563  struct kvm_reg_list reg_list;
1564  unsigned n;
1565 
1566  r = -ENOEXEC;
1567  if (unlikely(!kvm_vcpu_initialized(vcpu)))
1568  break;
1569 
1570  r = -EPERM;
1571  if (!kvm_arm_vcpu_is_finalized(vcpu))
1572  break;
1573 
1574  r = -EFAULT;
1575  if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
1576  break;
1577  n = reg_list.n;
1578  reg_list.n = kvm_arm_num_regs(vcpu);
1579  if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
1580  break;
1581  r = -E2BIG;
1582  if (n < reg_list.n)
1583  break;
1584  r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
1585  break;
1586  }
1587  case KVM_SET_DEVICE_ATTR: {
1588  r = -EFAULT;
1589  if (copy_from_user(&attr, argp, sizeof(attr)))
1590  break;
1591  r = kvm_arm_vcpu_set_attr(vcpu, &attr);
1592  break;
1593  }
1594  case KVM_GET_DEVICE_ATTR: {
1595  r = -EFAULT;
1596  if (copy_from_user(&attr, argp, sizeof(attr)))
1597  break;
1598  r = kvm_arm_vcpu_get_attr(vcpu, &attr);
1599  break;
1600  }
1601  case KVM_HAS_DEVICE_ATTR: {
1602  r = -EFAULT;
1603  if (copy_from_user(&attr, argp, sizeof(attr)))
1604  break;
1605  r = kvm_arm_vcpu_has_attr(vcpu, &attr);
1606  break;
1607  }
1608  case KVM_GET_VCPU_EVENTS: {
1609  struct kvm_vcpu_events events;
1610 
1611  if (kvm_arm_vcpu_get_events(vcpu, &events))
1612  return -EINVAL;
1613 
1614  if (copy_to_user(argp, &events, sizeof(events)))
1615  return -EFAULT;
1616 
1617  return 0;
1618  }
1619  case KVM_SET_VCPU_EVENTS: {
1620  struct kvm_vcpu_events events;
1621 
1622  if (copy_from_user(&events, argp, sizeof(events)))
1623  return -EFAULT;
1624 
1625  return kvm_arm_vcpu_set_events(vcpu, &events);
1626  }
1627  case KVM_ARM_VCPU_FINALIZE: {
1628  int what;
1629 
1630  if (!kvm_vcpu_initialized(vcpu))
1631  return -ENOEXEC;
1632 
1633  if (get_user(what, (const int __user *)argp))
1634  return -EFAULT;
1635 
1636  return kvm_arm_vcpu_finalize(vcpu, what);
1637  }
1638  default:
1639  r = -EINVAL;
1640  }
1641 
1642  return r;
1643 }
static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
Definition: arm.c:1476
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
Definition: arm.c:578
static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
Definition: arm.c:1490
static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
Definition: arm.c:1462
static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
Definition: arm.c:1498
static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
Definition: arm.c:1394
static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
Definition: arm.c:1448
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
Definition: guest.c:735
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
Definition: guest.c:717
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
Definition: guest.c:782
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
Definition: guest.c:762
static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
Definition: kvm_main.c:482
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
Definition: reset.c:142
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
Definition: reset.c:126
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_ioctl_get_mpstate()

int kvm_arch_vcpu_ioctl_get_mpstate ( struct kvm_vcpu *  vcpu,
struct kvm_mp_state *  mp_state 
)

Definition at line 518 of file arm.c.

520 {
521  *mp_state = READ_ONCE(vcpu->arch.mp_state);
522 
523  return 0;
524 }
Here is the caller graph for this function:

◆ kvm_arch_vcpu_ioctl_run()

int kvm_arch_vcpu_ioctl_run ( struct kvm_vcpu *  vcpu)

kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code @vcpu: The VCPU pointer

This function is called through the VCPU_RUN ioctl called from user space. It will execute VM code in a loop until the time slice for the process is used or some emulation is needed from user space in which case the function will return with return value 0 and with the kvm_run structure filled in with the required data for the requested emulation.

Definition at line 965 of file arm.c.

966 {
967  struct kvm_run *run = vcpu->run;
968  int ret;
969 
970  if (run->exit_reason == KVM_EXIT_MMIO) {
971  ret = kvm_handle_mmio_return(vcpu);
972  if (ret)
973  return ret;
974  }
975 
976  vcpu_load(vcpu);
977 
978  if (run->immediate_exit) {
979  ret = -EINTR;
980  goto out;
981  }
982 
983  kvm_sigset_activate(vcpu);
984 
985  ret = 1;
986  run->exit_reason = KVM_EXIT_UNKNOWN;
987  run->flags = 0;
988  while (ret > 0) {
989  /*
990  * Check conditions before entering the guest
991  */
992  ret = xfer_to_guest_mode_handle_work(vcpu);
993  if (!ret)
994  ret = 1;
995 
996  if (ret > 0)
997  ret = check_vcpu_requests(vcpu);
998 
999  /*
1000  * Preparing the interrupts to be injected also
1001  * involves poking the GIC, which must be done in a
1002  * non-preemptible context.
1003  */
1004  preempt_disable();
1005 
1006  /*
1007  * The VMID allocator only tracks active VMIDs per
1008  * physical CPU, and therefore the VMID allocated may not be
1009  * preserved on VMID roll-over if the task was preempted,
1010  * making a thread's VMID inactive. So we need to call
1011  * kvm_arm_vmid_update() in non-premptible context.
1012  */
1013  if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
1014  has_vhe())
1015  __load_stage2(vcpu->arch.hw_mmu,
1016  vcpu->arch.hw_mmu->arch);
1017 
1018  kvm_pmu_flush_hwstate(vcpu);
1019 
1020  local_irq_disable();
1021 
1022  kvm_vgic_flush_hwstate(vcpu);
1023 
1025 
1026  /*
1027  * Ensure we set mode to IN_GUEST_MODE after we disable
1028  * interrupts and before the final VCPU requests check.
1029  * See the comment in kvm_vcpu_exiting_guest_mode() and
1030  * Documentation/virt/kvm/vcpu-requests.rst
1031  */
1032  smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1033 
1034  if (ret <= 0 || kvm_vcpu_exit_request(vcpu, &ret)) {
1035  vcpu->mode = OUTSIDE_GUEST_MODE;
1036  isb(); /* Ensure work in x_flush_hwstate is committed */
1037  kvm_pmu_sync_hwstate(vcpu);
1038  if (static_branch_unlikely(&userspace_irqchip_in_use))
1039  kvm_timer_sync_user(vcpu);
1040  kvm_vgic_sync_hwstate(vcpu);
1041  local_irq_enable();
1042  preempt_enable();
1043  continue;
1044  }
1045 
1046  kvm_arm_setup_debug(vcpu);
1048 
1049  /**************************************************************
1050  * Enter the guest
1051  */
1052  trace_kvm_entry(*vcpu_pc(vcpu));
1053  guest_timing_enter_irqoff();
1054 
1055  ret = kvm_arm_vcpu_enter_exit(vcpu);
1056 
1057  vcpu->mode = OUTSIDE_GUEST_MODE;
1058  vcpu->stat.exits++;
1059  /*
1060  * Back from guest
1061  *************************************************************/
1062 
1063  kvm_arm_clear_debug(vcpu);
1064 
1065  /*
1066  * We must sync the PMU state before the vgic state so
1067  * that the vgic can properly sample the updated state of the
1068  * interrupt line.
1069  */
1070  kvm_pmu_sync_hwstate(vcpu);
1071 
1072  /*
1073  * Sync the vgic state before syncing the timer state because
1074  * the timer code needs to know if the virtual timer
1075  * interrupts are active.
1076  */
1077  kvm_vgic_sync_hwstate(vcpu);
1078 
1079  /*
1080  * Sync the timer hardware state before enabling interrupts as
1081  * we don't want vtimer interrupts to race with syncing the
1082  * timer virtual interrupt state.
1083  */
1084  if (static_branch_unlikely(&userspace_irqchip_in_use))
1085  kvm_timer_sync_user(vcpu);
1086 
1088 
1089  /*
1090  * We must ensure that any pending interrupts are taken before
1091  * we exit guest timing so that timer ticks are accounted as
1092  * guest time. Transiently unmask interrupts so that any
1093  * pending interrupts are taken.
1094  *
1095  * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
1096  * context synchronization event) is necessary to ensure that
1097  * pending interrupts are taken.
1098  */
1099  if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) {
1100  local_irq_enable();
1101  isb();
1102  local_irq_disable();
1103  }
1104 
1105  guest_timing_exit_irqoff();
1106 
1107  local_irq_enable();
1108 
1109  trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
1110 
1111  /* Exit types that need handling before we can be preempted */
1112  handle_exit_early(vcpu, ret);
1113 
1114  preempt_enable();
1115 
1116  /*
1117  * The ARMv8 architecture doesn't give the hypervisor
1118  * a mechanism to prevent a guest from dropping to AArch32 EL0
1119  * if implemented by the CPU. If we spot the guest in such
1120  * state and that we decided it wasn't supposed to do so (like
1121  * with the asymmetric AArch32 case), return to userspace with
1122  * a fatal error.
1123  */
1124  if (vcpu_mode_is_bad_32bit(vcpu)) {
1125  /*
1126  * As we have caught the guest red-handed, decide that
1127  * it isn't fit for purpose anymore by making the vcpu
1128  * invalid. The VMM can try and fix it by issuing a
1129  * KVM_ARM_VCPU_INIT if it really wants to.
1130  */
1131  vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
1132  ret = ARM_EXCEPTION_IL;
1133  }
1134 
1135  ret = handle_exit(vcpu, ret);
1136  }
1137 
1138  /* Tell userspace about in-kernel device output levels */
1139  if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
1140  kvm_timer_update_run(vcpu);
1141  kvm_pmu_update_run(vcpu);
1142  }
1143 
1144  kvm_sigset_deactivate(vcpu);
1145 
1146 out:
1147  /*
1148  * In the unlikely event that we are returning to userspace
1149  * with pending exceptions or PC adjustment, commit these
1150  * adjustments in order to give userspace a consistent view of
1151  * the vcpu state. Note that this relies on __kvm_adjust_pc()
1152  * being preempt-safe on VHE.
1153  */
1154  if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) ||
1155  vcpu_get_flag(vcpu, INCREMENT_PC)))
1156  kvm_call_hyp(__kvm_adjust_pc, vcpu);
1157 
1158  vcpu_put(vcpu);
1159  return ret;
1160 }
void kvm_timer_update_run(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:430
void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:927
static int check_vcpu_requests(struct kvm_vcpu *vcpu)
Definition: arm.c:838
static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
Definition: arm.c:944
static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
Definition: arm.c:905
static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
Definition: arm.c:880
static void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu)
Definition: arm_pmu.h:172
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
Definition: debug.c:280
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
Definition: debug.c:169
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
Definition: exception.c:365
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
Definition: fpsimd.c:139
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
Definition: fpsimd.c:126
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
Definition: handle_exit.c:366
int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
Definition: handle_exit.c:322
void vcpu_put(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:219
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3692
void vcpu_load(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:208
void kvm_sigset_activate(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3678
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
Definition: mmio.c:81
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:405
void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:388
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:417
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
Definition: vgic.c:905
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
Definition: vgic.c:875
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
Definition: vmid.c:138
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_ioctl_set_mpstate()

int kvm_arch_vcpu_ioctl_set_mpstate ( struct kvm_vcpu *  vcpu,
struct kvm_mp_state *  mp_state 
)

Definition at line 526 of file arm.c.

528 {
529  int ret = 0;
530 
531  spin_lock(&vcpu->arch.mp_state_lock);
532 
533  switch (mp_state->mp_state) {
534  case KVM_MP_STATE_RUNNABLE:
535  WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
536  break;
537  case KVM_MP_STATE_STOPPED:
539  break;
540  case KVM_MP_STATE_SUSPENDED:
541  kvm_arm_vcpu_suspend(vcpu);
542  break;
543  default:
544  ret = -EINVAL;
545  }
546 
547  spin_unlock(&vcpu->arch.mp_state_lock);
548 
549  return ret;
550 }
static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu)
Definition: arm.c:506
static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu)
Definition: arm.c:487
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_ioctl_vcpu_init()

static int kvm_arch_vcpu_ioctl_vcpu_init ( struct kvm_vcpu *  vcpu,
struct kvm_vcpu_init init 
)
static

Definition at line 1394 of file arm.c.

1396 {
1397  bool power_off = false;
1398  int ret;
1399 
1400  /*
1401  * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid
1402  * reflecting it in the finalized feature set, thus limiting its scope
1403  * to a single KVM_ARM_VCPU_INIT call.
1404  */
1405  if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) {
1406  init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF);
1407  power_off = true;
1408  }
1409 
1410  ret = kvm_vcpu_set_target(vcpu, init);
1411  if (ret)
1412  return ret;
1413 
1414  /*
1415  * Ensure a rebooted VM will fault in RAM pages and detect if the
1416  * guest MMU is turned off and flush the caches as needed.
1417  *
1418  * S2FWB enforces all memory accesses to RAM being cacheable,
1419  * ensuring that the data side is always coherent. We still
1420  * need to invalidate the I-cache though, as FWB does *not*
1421  * imply CTR_EL0.DIC.
1422  */
1423  if (vcpu_has_run_once(vcpu)) {
1424  if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
1425  stage2_unmap_vm(vcpu->kvm);
1426  else
1427  icache_inval_all_pou();
1428  }
1429 
1430  vcpu_reset_hcr(vcpu);
1431  vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
1432 
1433  /*
1434  * Handle the "start in power-off" case.
1435  */
1436  spin_lock(&vcpu->arch.mp_state_lock);
1437 
1438  if (power_off)
1440  else
1441  WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
1442 
1443  spin_unlock(&vcpu->arch.mp_state_lock);
1444 
1445  return 0;
1446 }
static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
Definition: arm.c:1371
void stage2_unmap_vm(struct kvm *kvm)
Definition: mmu.c:992
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_load()

void kvm_arch_vcpu_load ( struct kvm_vcpu *  vcpu,
int  cpu 
)

Definition at line 426 of file arm.c.

427 {
428  struct kvm_s2_mmu *mmu;
429  int *last_ran;
430 
431  mmu = vcpu->arch.hw_mmu;
432  last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
433 
434  /*
435  * We guarantee that both TLBs and I-cache are private to each
436  * vcpu. If detecting that a vcpu from the same VM has
437  * previously run on the same physical CPU, call into the
438  * hypervisor code to nuke the relevant contexts.
439  *
440  * We might get preempted before the vCPU actually runs, but
441  * over-invalidation doesn't affect correctness.
442  */
443  if (*last_ran != vcpu->vcpu_idx) {
444  kvm_call_hyp(__kvm_flush_cpu_context, mmu);
445  *last_ran = vcpu->vcpu_idx;
446  }
447 
448  vcpu->cpu = cpu;
449 
450  kvm_vgic_load(vcpu);
451  kvm_timer_vcpu_load(vcpu);
452  if (has_vhe())
453  kvm_vcpu_load_vhe(vcpu);
454  kvm_arch_vcpu_load_fp(vcpu);
456  if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
457  kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
458 
459  if (single_task_running())
460  vcpu_clear_wfx_traps(vcpu);
461  else
462  vcpu_set_wfx_traps(vcpu);
463 
464  if (vcpu_has_ptrauth(vcpu))
465  vcpu_ptrauth_disable(vcpu);
467 
468  if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
469  vcpu_set_on_unsupported_cpu(vcpu);
470 }
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:826
void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
Definition: debug.c:317
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
Definition: fpsimd.c:75
void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
Definition: tlb.c:182
void kvm_vgic_load(struct kvm_vcpu *vcpu)
Definition: vgic.c:938
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
Definition: switch.c:163
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_postcreate()

void kvm_arch_vcpu_postcreate ( struct kvm_vcpu *  vcpu)

Definition at line 400 of file arm.c.

401 {
402 }
Here is the caller graph for this function:

◆ kvm_arch_vcpu_precreate()

int kvm_arch_vcpu_precreate ( struct kvm *  kvm,
unsigned int  id 
)

Definition at line 346 of file arm.c.

347 {
348  if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
349  return -EBUSY;
350 
351  if (id >= kvm->max_vcpus)
352  return -EINVAL;
353 
354  return 0;
355 }
Here is the caller graph for this function:

◆ kvm_arch_vcpu_put()

void kvm_arch_vcpu_put ( struct kvm_vcpu *  vcpu)

Definition at line 472 of file arm.c.

473 {
475  kvm_arch_vcpu_put_fp(vcpu);
476  if (has_vhe())
477  kvm_vcpu_put_vhe(vcpu);
478  kvm_timer_vcpu_put(vcpu);
479  kvm_vgic_put(vcpu);
482 
483  vcpu_clear_on_unsupported_cpu(vcpu);
484  vcpu->cpu = -1;
485 }
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:877
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
Definition: debug.c:340
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
Definition: fpsimd.c:175
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
Definition: pmu.c:197
void kvm_vgic_put(struct kvm_vcpu *vcpu)
Definition: vgic.c:949
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
Definition: switch.c:170
void kvm_arm_vmid_clear_active(void)
Definition: vmid.c:133
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_run_pid_change()

int kvm_arch_vcpu_run_pid_change ( struct kvm_vcpu *  vcpu)

Definition at line 639 of file arm.c.

640 {
641  struct kvm *kvm = vcpu->kvm;
642  int ret;
643 
644  if (!kvm_vcpu_initialized(vcpu))
645  return -ENOEXEC;
646 
647  if (!kvm_arm_vcpu_is_finalized(vcpu))
648  return -EPERM;
649 
650  ret = kvm_arch_vcpu_run_map_fp(vcpu);
651  if (ret)
652  return ret;
653 
654  if (likely(vcpu_has_run_once(vcpu)))
655  return 0;
656 
657  kvm_init_mpidr_data(kvm);
658 
660 
661  if (likely(irqchip_in_kernel(kvm))) {
662  /*
663  * Map the VGIC hardware resources before running a vcpu the
664  * first time on this VM.
665  */
666  ret = kvm_vgic_map_resources(kvm);
667  if (ret)
668  return ret;
669  }
670 
671  if (vcpu_has_nv(vcpu)) {
672  ret = kvm_init_nv_sysregs(vcpu->kvm);
673  if (ret)
674  return ret;
675  }
676 
677  ret = kvm_timer_enable(vcpu);
678  if (ret)
679  return ret;
680 
681  ret = kvm_arm_pmu_v3_enable(vcpu);
682  if (ret)
683  return ret;
684 
685  if (is_protected_kvm_enabled()) {
686  ret = pkvm_create_hyp_vm(kvm);
687  if (ret)
688  return ret;
689  }
690 
691  if (!irqchip_in_kernel(kvm)) {
692  /*
693  * Tell the rest of the code that there are userspace irqchip
694  * VMs in the wild.
695  */
696  static_branch_inc(&userspace_irqchip_in_use);
697  }
698 
699  /*
700  * Initialize traps for protected VMs.
701  * NOTE: Move to run in EL2 directly, rather than via a hypercall, once
702  * the code is in place for first run initialization at EL2.
703  */
704  if (kvm_vm_is_protected(kvm))
705  kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
706 
707  mutex_lock(&kvm->arch.config_lock);
708  set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags);
709  mutex_unlock(&kvm->arch.config_lock);
710 
711  return ret;
712 }
int kvm_timer_enable(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:1506
static void kvm_init_mpidr_data(struct kvm *kvm)
Definition: arm.c:583
void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
Definition: debug.c:137
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
Definition: fpsimd.c:39
int kvm_init_nv_sysregs(struct kvm *kvm)
Definition: nested.c:159
int pkvm_create_hyp_vm(struct kvm *host_kvm)
Definition: pkvm.c:204
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:816
void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
Definition: pkvm.c:205
int kvm_vgic_map_resources(struct kvm *kvm)
Definition: vgic-init.c:456
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_runnable()

int kvm_arch_vcpu_runnable ( struct kvm_vcpu *  v)

kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled @v: The VCPU pointer

If the guest CPU is not waiting for interrupts or an interrupt line is asserted, the CPU is by definition runnable.

Definition at line 559 of file arm.c.

560 {
561  bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
562  return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
563  && !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
564 }
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu)
Definition: arm.c:501
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
Definition: vgic.c:971
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arch_vcpu_should_kick()

int kvm_arch_vcpu_should_kick ( struct kvm_vcpu *  vcpu)

Definition at line 67 of file arm.c.

68 {
69  return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
70 }
Here is the caller graph for this function:

◆ kvm_arch_vcpu_unblocking()

void kvm_arch_vcpu_unblocking ( struct kvm_vcpu *  vcpu)

Definition at line 421 of file arm.c.

422 {
423 
424 }
Here is the caller graph for this function:

◆ kvm_arch_vm_ioctl()

int kvm_arch_vm_ioctl ( struct file *  filp,
unsigned int  ioctl,
unsigned long  arg 
)

Definition at line 1683 of file arm.c.

1684 {
1685  struct kvm *kvm = filp->private_data;
1686  void __user *argp = (void __user *)arg;
1687  struct kvm_device_attr attr;
1688 
1689  switch (ioctl) {
1690  case KVM_CREATE_IRQCHIP: {
1691  int ret;
1692  if (!vgic_present)
1693  return -ENXIO;
1694  mutex_lock(&kvm->lock);
1695  ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1696  mutex_unlock(&kvm->lock);
1697  return ret;
1698  }
1699  case KVM_ARM_SET_DEVICE_ADDR: {
1700  struct kvm_arm_device_addr dev_addr;
1701 
1702  if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
1703  return -EFAULT;
1704  return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1705  }
1706  case KVM_ARM_PREFERRED_TARGET: {
1707  struct kvm_vcpu_init init = {
1708  .target = KVM_ARM_TARGET_GENERIC_V8,
1709  };
1710 
1711  if (copy_to_user(argp, &init, sizeof(init)))
1712  return -EFAULT;
1713 
1714  return 0;
1715  }
1716  case KVM_ARM_MTE_COPY_TAGS: {
1717  struct kvm_arm_copy_mte_tags copy_tags;
1718 
1719  if (copy_from_user(&copy_tags, argp, sizeof(copy_tags)))
1720  return -EFAULT;
1721  return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
1722  }
1723  case KVM_ARM_SET_COUNTER_OFFSET: {
1724  struct kvm_arm_counter_offset offset;
1725 
1726  if (copy_from_user(&offset, argp, sizeof(offset)))
1727  return -EFAULT;
1728  return kvm_vm_ioctl_set_counter_offset(kvm, &offset);
1729  }
1730  case KVM_HAS_DEVICE_ATTR: {
1731  if (copy_from_user(&attr, argp, sizeof(attr)))
1732  return -EFAULT;
1733 
1734  return kvm_vm_has_attr(kvm, &attr);
1735  }
1736  case KVM_SET_DEVICE_ATTR: {
1737  if (copy_from_user(&attr, argp, sizeof(attr)))
1738  return -EFAULT;
1739 
1740  return kvm_vm_set_attr(kvm, &attr);
1741  }
1742  case KVM_ARM_GET_REG_WRITABLE_MASKS: {
1743  struct reg_mask_range range;
1744 
1745  if (copy_from_user(&range, argp, sizeof(range)))
1746  return -EFAULT;
1747  return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range);
1748  }
1749  default:
1750  return -EINVAL;
1751  }
1752 }
int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, struct kvm_arm_counter_offset *offset)
Definition: arch_timer.c:1651
static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
Definition: arm.c:1663
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
Definition: arm.c:1650
static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
Definition: arm.c:1673
int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, struct kvm_arm_copy_mte_tags *copy_tags)
Definition: guest.c:1014
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
Definition: sys_regs.c:3889
int kvm_vgic_create(struct kvm *kvm, u32 type)
Definition: vgic-init.c:71
Here is the call graph for this function:

◆ kvm_arm_default_max_vcpus()

static int kvm_arm_default_max_vcpus ( void  )
static

Definition at line 127 of file arm.c.

128 {
129  return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
130 }
static int kvm_vgic_get_max_vcpus(void)
Definition: arm_vgic.h:411
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_halt_guest()

void kvm_arm_halt_guest ( struct kvm *  kvm)

Definition at line 719 of file arm.c.

720 {
721  unsigned long i;
722  struct kvm_vcpu *vcpu;
723 
724  kvm_for_each_vcpu(i, vcpu, kvm)
725  vcpu->arch.pause = true;
726  kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
727 }
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
Definition: kvm_main.c:340
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_init()

static __init int kvm_arm_init ( void  )
static

Definition at line 2531 of file arm.c.

2532 {
2533  int err;
2534  bool in_hyp_mode;
2535 
2536  if (!is_hyp_mode_available()) {
2537  kvm_info("HYP mode not available\n");
2538  return -ENODEV;
2539  }
2540 
2541  if (kvm_get_mode() == KVM_MODE_NONE) {
2542  kvm_info("KVM disabled from command line\n");
2543  return -ENODEV;
2544  }
2545 
2546  err = kvm_sys_reg_table_init();
2547  if (err) {
2548  kvm_info("Error initializing system register tables");
2549  return err;
2550  }
2551 
2552  in_hyp_mode = is_kernel_in_hyp_mode();
2553 
2554  if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
2555  cpus_have_final_cap(ARM64_WORKAROUND_1508412))
2556  kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
2557  "Only trusted guests should be used on this system.\n");
2558 
2559  err = kvm_set_ipa_limit();
2560  if (err)
2561  return err;
2562 
2563  err = kvm_arm_init_sve();
2564  if (err)
2565  return err;
2566 
2567  err = kvm_arm_vmid_alloc_init();
2568  if (err) {
2569  kvm_err("Failed to initialize VMID allocator.\n");
2570  return err;
2571  }
2572 
2573  if (!in_hyp_mode) {
2574  err = init_hyp_mode();
2575  if (err)
2576  goto out_err;
2577  }
2578 
2579  err = kvm_init_vector_slots();
2580  if (err) {
2581  kvm_err("Cannot initialise vector slots\n");
2582  goto out_hyp;
2583  }
2584 
2585  err = init_subsystems();
2586  if (err)
2587  goto out_hyp;
2588 
2589  if (is_protected_kvm_enabled()) {
2590  kvm_info("Protected nVHE mode initialized successfully\n");
2591  } else if (in_hyp_mode) {
2592  kvm_info("VHE mode initialized successfully\n");
2593  } else {
2594  kvm_info("Hyp mode initialized successfully\n");
2595  }
2596 
2597  /*
2598  * FIXME: Do something reasonable if kvm_init() fails after pKVM
2599  * hypervisor protection is finalized.
2600  */
2601  err = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2602  if (err)
2603  goto out_subs;
2604 
2605  kvm_arm_initialised = true;
2606 
2607  return 0;
2608 
2609 out_subs:
2611 out_hyp:
2612  if (!in_hyp_mode)
2614 out_err:
2616  return err;
2617 }
static int __init init_subsystems(void)
Definition: arm.c:2131
static int __init init_hyp_mode(void)
Definition: arm.c:2298
static int kvm_init_vector_slots(void)
Definition: arm.c:1818
static void __init teardown_subsystems(void)
Definition: arm.c:2181
enum kvm_mode kvm_get_mode(void)
Definition: arm.c:2657
int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
Definition: kvm_main.c:6402
int __init kvm_arm_init_sve(void)
Definition: reset.c:50
int __init kvm_set_ipa_limit(void)
Definition: reset.c:274
int __init kvm_sys_reg_table_init(void)
Definition: sys_regs.c:3933
void __init kvm_arm_vmid_alloc_free(void)
Definition: vmid.c:197
int __init kvm_arm_vmid_alloc_init(void)
Definition: vmid.c:180
Here is the call graph for this function:

◆ kvm_arm_resume_guest()

void kvm_arm_resume_guest ( struct kvm *  kvm)

Definition at line 729 of file arm.c.

730 {
731  unsigned long i;
732  struct kvm_vcpu *vcpu;
733 
734  kvm_for_each_vcpu(i, vcpu, kvm) {
735  vcpu->arch.pause = false;
736  __kvm_vcpu_wake_up(vcpu);
737  }
738 }
Here is the caller graph for this function:

◆ kvm_arm_vcpu_enter_exit()

static int noinstr kvm_arm_vcpu_enter_exit ( struct kvm_vcpu *  vcpu)
static

Definition at line 944 of file arm.c.

945 {
946  int ret;
947 
948  guest_state_enter_irqoff();
949  ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
950  guest_state_exit_irqoff();
951 
952  return ret;
953 }
int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
Definition: switch.c:248
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_vcpu_get_attr()

static int kvm_arm_vcpu_get_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)
static

Definition at line 1462 of file arm.c.

1464 {
1465  int ret = -ENXIO;
1466 
1467  switch (attr->group) {
1468  default:
1469  ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
1470  break;
1471  }
1472 
1473  return ret;
1474 }
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
Definition: guest.c:968
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_vcpu_get_events()

static int kvm_arm_vcpu_get_events ( struct kvm_vcpu *  vcpu,
struct kvm_vcpu_events *  events 
)
static

Definition at line 1490 of file arm.c.

1492 {
1493  memset(events, 0, sizeof(*events));
1494 
1495  return __kvm_arm_vcpu_get_events(vcpu, events);
1496 }
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
Definition: guest.c:814
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_vcpu_has_attr()

static int kvm_arm_vcpu_has_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)
static

Definition at line 1476 of file arm.c.

1478 {
1479  int ret = -ENXIO;
1480 
1481  switch (attr->group) {
1482  default:
1483  ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
1484  break;
1485  }
1486 
1487  return ret;
1488 }
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
Definition: guest.c:991
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_vcpu_power_off()

void kvm_arm_vcpu_power_off ( struct kvm_vcpu *  vcpu)

Definition at line 494 of file arm.c.

495 {
496  spin_lock(&vcpu->arch.mp_state_lock);
498  spin_unlock(&vcpu->arch.mp_state_lock);
499 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_vcpu_set_attr()

static int kvm_arm_vcpu_set_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)
static

Definition at line 1448 of file arm.c.

1450 {
1451  int ret = -ENXIO;
1452 
1453  switch (attr->group) {
1454  default:
1455  ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
1456  break;
1457  }
1458 
1459  return ret;
1460 }
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
Definition: guest.c:943
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_vcpu_set_events()

static int kvm_arm_vcpu_set_events ( struct kvm_vcpu *  vcpu,
struct kvm_vcpu_events *  events 
)
static

Definition at line 1498 of file arm.c.

1500 {
1501  int i;
1502 
1503  /* check whether the reserved field is zero */
1504  for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
1505  if (events->reserved[i])
1506  return -EINVAL;
1507 
1508  /* check whether the pad field is zero */
1509  for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
1510  if (events->exception.pad[i])
1511  return -EINVAL;
1512 
1513  return __kvm_arm_vcpu_set_events(vcpu, events);
1514 }
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events)
Definition: guest.c:832
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_vcpu_stopped()

bool kvm_arm_vcpu_stopped ( struct kvm_vcpu *  vcpu)

Definition at line 501 of file arm.c.

502 {
503  return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
504 }
Here is the caller graph for this function:

◆ kvm_arm_vcpu_suspend()

static void kvm_arm_vcpu_suspend ( struct kvm_vcpu *  vcpu)
static

Definition at line 506 of file arm.c.

507 {
508  WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED);
509  kvm_make_request(KVM_REQ_SUSPEND, vcpu);
510  kvm_vcpu_kick(vcpu);
511 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_vcpu_suspended()

static bool kvm_arm_vcpu_suspended ( struct kvm_vcpu *  vcpu)
static

Definition at line 513 of file arm.c.

514 {
515  return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED;
516 }
Here is the caller graph for this function:

◆ kvm_get_mode()

enum kvm_mode kvm_get_mode ( void  )

Definition at line 2655 of file arm.c.

2658 {
2659  return kvm_mode;
2660 }
Here is the caller graph for this function:

◆ kvm_hyp_init_protection()

static int __init kvm_hyp_init_protection ( u32  hyp_va_bits)
static

Definition at line 2259 of file arm.c.

2260 {
2261  void *addr = phys_to_virt(hyp_mem_base);
2262  int ret;
2263 
2264  ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
2265  if (ret)
2266  return ret;
2267 
2268  ret = do_pkvm_init(hyp_va_bits);
2269  if (ret)
2270  return ret;
2271 
2272  free_hyp_pgds();
2273 
2274  return 0;
2275 }
static int __init do_pkvm_init(u32 hyp_va_bits)
Definition: arm.c:2198
void __init free_hyp_pgds(void)
Definition: mmu.c:372
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_hyp_init_symbols()

static void kvm_hyp_init_symbols ( void  )
static

Definition at line 2244 of file arm.c.

2245 {
2247  kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2248  kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
2249  kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
2250  kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
2251  kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
2252  kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
2253  kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2254  kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
2255  kvm_nvhe_sym(__icache_flags) = __icache_flags;
2256  kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
2257 }
static u64 get_hyp_id_aa64pfr0_el1(void)
Definition: arm.c:2220
unsigned int kvm_arm_vmid_bits
Definition: pkvm.c:19
unsigned long __icache_flags
Definition: pkvm.c:16
u64 id_aa64isar2_el1_sys_val
Definition: sys_regs.c:25
u64 id_aa64pfr0_el1_sys_val
Definition: sys_regs.c:21
u64 id_aa64pfr1_el1_sys_val
Definition: sys_regs.c:22
u64 id_aa64mmfr0_el1_sys_val
Definition: sys_regs.c:26
u64 id_aa64mmfr2_el1_sys_val
Definition: sys_regs.c:28
u64 id_aa64mmfr1_el1_sys_val
Definition: sys_regs.c:27
u64 id_aa64isar1_el1_sys_val
Definition: sys_regs.c:24
u64 id_aa64isar0_el1_sys_val
Definition: sys_regs.c:23
u64 id_aa64smfr0_el1_sys_val
Definition: sys_regs.c:29
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_init_mpidr_data()

static void kvm_init_mpidr_data ( struct kvm *  kvm)
static

Definition at line 583 of file arm.c.

584 {
585  struct kvm_mpidr_data *data = NULL;
586  unsigned long c, mask, nr_entries;
587  u64 aff_set = 0, aff_clr = ~0UL;
588  struct kvm_vcpu *vcpu;
589 
590  mutex_lock(&kvm->arch.config_lock);
591 
592  if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
593  goto out;
594 
595  kvm_for_each_vcpu(c, vcpu, kvm) {
596  u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
597  aff_set |= aff;
598  aff_clr &= aff;
599  }
600 
601  /*
602  * A significant bit can be either 0 or 1, and will only appear in
603  * aff_set. Use aff_clr to weed out the useless stuff.
604  */
605  mask = aff_set ^ aff_clr;
606  nr_entries = BIT_ULL(hweight_long(mask));
607 
608  /*
609  * Don't let userspace fool us. If we need more than a single page
610  * to describe the compressed MPIDR array, just fall back to the
611  * iterative method. Single vcpu VMs do not need this either.
612  */
613  if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE)
614  data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries),
615  GFP_KERNEL_ACCOUNT);
616 
617  if (!data)
618  goto out;
619 
620  data->mpidr_mask = mask;
621 
622  kvm_for_each_vcpu(c, vcpu, kvm) {
623  u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
624  u16 index = kvm_mpidr_index(data, aff);
625 
626  data->cmpidr_to_idx[index] = c;
627  }
628 
629  kvm->arch.mpidr_data = data;
630 out:
631  mutex_unlock(&kvm->arch.config_lock);
632 }
Here is the caller graph for this function:

◆ kvm_init_vector_slot()

static void kvm_init_vector_slot ( void *  base,
enum arm64_hyp_spectre_vector  slot 
)
static

Definition at line 1813 of file arm.c.

1814 {
1815  hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot);
1816 }
static unsigned long base
Definition: early_alloc.c:15
Here is the caller graph for this function:

◆ kvm_init_vector_slots()

static int kvm_init_vector_slots ( void  )
static

Definition at line 1818 of file arm.c.

1819 {
1820  int err;
1821  void *base;
1822 
1823  base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
1824  kvm_init_vector_slot(base, HYP_VECTOR_DIRECT);
1825 
1826  base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
1827  kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT);
1828 
1829  if (kvm_system_needs_idmapped_vectors() &&
1830  !is_protected_kvm_enabled()) {
1831  err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs),
1832  __BP_HARDEN_HYP_VECS_SZ, &base);
1833  if (err)
1834  return err;
1835  }
1836 
1837  kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT);
1838  kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT);
1839  return 0;
1840 }
static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
Definition: arm.c:1813
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, void **haddr)
Definition: mmu.c:778
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_mpidr_to_vcpu()

struct kvm_vcpu* kvm_mpidr_to_vcpu ( struct kvm *  kvm,
unsigned long  mpidr 
)

Definition at line 2460 of file arm.c.

2461 {
2462  struct kvm_vcpu *vcpu;
2463  unsigned long i;
2464 
2465  mpidr &= MPIDR_HWID_BITMASK;
2466 
2467  if (kvm->arch.mpidr_data) {
2468  u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
2469 
2470  vcpu = kvm_get_vcpu(kvm,
2471  kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
2472  if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
2473  vcpu = NULL;
2474 
2475  return vcpu;
2476  }
2477 
2478  kvm_for_each_vcpu(i, vcpu, kvm) {
2479  if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
2480  return vcpu;
2481  }
2482  return NULL;
2483 }
Here is the caller graph for this function:

◆ kvm_setup_vcpu()

static int kvm_setup_vcpu ( struct kvm_vcpu *  vcpu)
static

Definition at line 1326 of file arm.c.

1327 {
1328  struct kvm *kvm = vcpu->kvm;
1329  int ret = 0;
1330 
1331  /*
1332  * When the vCPU has a PMU, but no PMU is set for the guest
1333  * yet, set the default one.
1334  */
1335  if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
1336  ret = kvm_arm_set_default_pmu(kvm);
1337 
1338  return ret;
1339 }
#define kvm_vcpu_has_pmu(vcpu)
Definition: arm_pmu.h:170
int kvm_arm_set_default_pmu(struct kvm *kvm)
Definition: pmu-emul.c:939
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_exit_request()

static bool kvm_vcpu_exit_request ( struct kvm_vcpu *  vcpu,
int *  ret 
)
static

kvm_vcpu_exit_request - returns true if the VCPU should not enter the guest @vcpu: The VCPU pointer @ret: Pointer to write optional return code

Returns: true if the VCPU needs to return to a preemptible + interruptible and skip guest entry.

This function disambiguates between two different types of exits: exits to a preemptible + interruptible kernel context and exits to userspace. For an exit to userspace, this function will write the return code to ret and return true. For an exit to preemptible + interruptible kernel context (i.e. check for pending work and re-enter), return true without writing to ret.

Definition at line 905 of file arm.c.

906 {
907  struct kvm_run *run = vcpu->run;
908 
909  /*
910  * If we're using a userspace irqchip, then check if we need
911  * to tell a userspace irqchip about timer or PMU level
912  * changes and if so, exit to userspace (the actual level
913  * state gets updated in kvm_timer_update_run and
914  * kvm_pmu_update_run below).
915  */
916  if (static_branch_unlikely(&userspace_irqchip_in_use)) {
917  if (kvm_timer_should_notify_user(vcpu) ||
919  *ret = -EINTR;
920  run->exit_reason = KVM_EXIT_INTR;
921  return true;
922  }
923  }
924 
925  if (unlikely(vcpu_on_unsupported_cpu(vcpu))) {
926  run->exit_reason = KVM_EXIT_FAIL_ENTRY;
927  run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED;
928  run->fail_entry.cpu = smp_processor_id();
929  *ret = 0;
930  return true;
931  }
932 
933  return kvm_request_pending(vcpu) ||
934  xfer_to_guest_mode_work_pending();
935 }
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:860
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:373
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_init_changed()

static bool kvm_vcpu_init_changed ( struct kvm_vcpu *  vcpu,
const struct kvm_vcpu_init init 
)
static

Definition at line 1317 of file arm.c.

1319 {
1320  unsigned long features = init->features[0];
1321 
1322  return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features,
1323  KVM_VCPU_MAX_FEATURES);
1324 }
Here is the caller graph for this function:

◆ kvm_vcpu_init_check_features()

static int kvm_vcpu_init_check_features ( struct kvm_vcpu *  vcpu,
const struct kvm_vcpu_init init 
)
static

Definition at line 1273 of file arm.c.

1275 {
1276  unsigned long features = init->features[0];
1277  int i;
1278 
1279  if (features & ~KVM_VCPU_VALID_FEATURES)
1280  return -ENOENT;
1281 
1282  for (i = 1; i < ARRAY_SIZE(init->features); i++) {
1283  if (init->features[i])
1284  return -ENOENT;
1285  }
1286 
1287  if (features & ~system_supported_vcpu_features())
1288  return -EINVAL;
1289 
1290  /*
1291  * For now make sure that both address/generic pointer authentication
1292  * features are requested by the userspace together.
1293  */
1294  if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) !=
1295  test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features))
1296  return -EINVAL;
1297 
1298  /* Disallow NV+SVE for the time being */
1299  if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features) &&
1300  test_bit(KVM_ARM_VCPU_SVE, &features))
1301  return -EINVAL;
1302 
1303  if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features))
1304  return 0;
1305 
1306  /* MTE is incompatible with AArch32 */
1307  if (kvm_has_mte(vcpu->kvm))
1308  return -EINVAL;
1309 
1310  /* NV is incompatible with AArch32 */
1311  if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features))
1312  return -EINVAL;
1313 
1314  return 0;
1315 }
static unsigned long system_supported_vcpu_features(void)
Definition: arm.c:1249
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_initialized()

static int kvm_vcpu_initialized ( struct kvm_vcpu *  vcpu)
static

Definition at line 578 of file arm.c.

579 {
580  return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
581 }
Here is the caller graph for this function:

◆ kvm_vcpu_set_target()

static int kvm_vcpu_set_target ( struct kvm_vcpu *  vcpu,
const struct kvm_vcpu_init init 
)
static

Definition at line 1371 of file arm.c.

1373 {
1374  int ret;
1375 
1376  if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
1377  init->target != kvm_target_cpu())
1378  return -EINVAL;
1379 
1380  ret = kvm_vcpu_init_check_features(vcpu, init);
1381  if (ret)
1382  return ret;
1383 
1384  if (!kvm_vcpu_initialized(vcpu))
1385  return __kvm_vcpu_set_target(vcpu, init);
1386 
1387  if (kvm_vcpu_init_changed(vcpu, init))
1388  return -EINVAL;
1389 
1390  kvm_reset_vcpu(vcpu);
1391  return 0;
1392 }
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
Definition: arm.c:1341
static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init)
Definition: arm.c:1273
u32 __attribute_const__ kvm_target_cpu(void)
Definition: guest.c:857
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_sleep()

static void kvm_vcpu_sleep ( struct kvm_vcpu *  vcpu)
static

Definition at line 740 of file arm.c.

741 {
742  struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
743 
744  rcuwait_wait_event(wait,
745  (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
746  TASK_INTERRUPTIBLE);
747 
748  if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) {
749  /* Awaken to handle a signal, request we sleep again later. */
750  kvm_make_request(KVM_REQ_SLEEP, vcpu);
751  }
752 
753  /*
754  * Make sure we will observe a potential reset request if we've
755  * observed a change to the power state. Pairs with the smp_wmb() in
756  * kvm_psci_vcpu_on().
757  */
758  smp_rmb();
759 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_suspend()

static int kvm_vcpu_suspend ( struct kvm_vcpu *  vcpu)
static

Definition at line 796 of file arm.c.

797 {
798  if (!kvm_arm_vcpu_suspended(vcpu))
799  return 1;
800 
801  kvm_vcpu_wfi(vcpu);
802 
803  /*
804  * The suspend state is sticky; we do not leave it until userspace
805  * explicitly marks the vCPU as runnable. Request that we suspend again
806  * later.
807  */
808  kvm_make_request(KVM_REQ_SUSPEND, vcpu);
809 
810  /*
811  * Check to make sure the vCPU is actually runnable. If so, exit to
812  * userspace informing it of the wakeup condition.
813  */
814  if (kvm_arch_vcpu_runnable(vcpu)) {
815  memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
816  vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP;
817  vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
818  return 0;
819  }
820 
821  /*
822  * Otherwise, we were unblocked to process a different event, such as a
823  * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to
824  * process the event.
825  */
826  return 1;
827 }
static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu)
Definition: arm.c:513
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
Definition: arm.c:769
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
Definition: arm.c:559
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_wfi()

void kvm_vcpu_wfi ( struct kvm_vcpu *  vcpu)

kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior @vcpu: The VCPU pointer

Suspend execution of a vCPU until a valid wake event is detected, i.e. until the vCPU is runnable. The vCPU may or may not be scheduled out, depending on when a wake event arrives, e.g. there may already be a pending wake event.

Definition at line 769 of file arm.c.

770 {
771  /*
772  * Sync back the state of the GIC CPU interface so that we have
773  * the latest PMR and group enables. This ensures that
774  * kvm_arch_vcpu_runnable has up-to-date data to decide whether
775  * we have pending interrupts, e.g. when determining if the
776  * vCPU should block.
777  *
778  * For the same reason, we want to tell GICv4 that we need
779  * doorbells to be signalled, should an interrupt become pending.
780  */
781  preempt_disable();
782  kvm_vgic_vmcr_sync(vcpu);
783  vcpu_set_flag(vcpu, IN_WFI);
784  vgic_v4_put(vcpu);
785  preempt_enable();
786 
787  kvm_vcpu_halt(vcpu);
788  vcpu_clear_flag(vcpu, IN_WFIT);
789 
790  preempt_disable();
791  vcpu_clear_flag(vcpu, IN_WFI);
792  vgic_v4_load(vcpu);
793  preempt_enable();
794 }
void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3842
void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
Definition: vgic.c:960
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vm_has_attr()

static int kvm_vm_has_attr ( struct kvm *  kvm,
struct kvm_device_attr *  attr 
)
static

Definition at line 1663 of file arm.c.

1664 {
1665  switch (attr->group) {
1666  case KVM_ARM_VM_SMCCC_CTRL:
1667  return kvm_vm_smccc_has_attr(kvm, attr);
1668  default:
1669  return -ENXIO;
1670  }
1671 }
int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
Definition: hypercalls.c:642
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vm_ioctl_check_extension()

int kvm_vm_ioctl_check_extension ( struct kvm *  kvm,
long  ext 
)

Definition at line 216 of file arm.c.

217 {
218  int r;
219  switch (ext) {
220  case KVM_CAP_IRQCHIP:
221  r = vgic_present;
222  break;
223  case KVM_CAP_IOEVENTFD:
224  case KVM_CAP_USER_MEMORY:
225  case KVM_CAP_SYNC_MMU:
226  case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
227  case KVM_CAP_ONE_REG:
228  case KVM_CAP_ARM_PSCI:
229  case KVM_CAP_ARM_PSCI_0_2:
230  case KVM_CAP_READONLY_MEM:
231  case KVM_CAP_MP_STATE:
232  case KVM_CAP_IMMEDIATE_EXIT:
233  case KVM_CAP_VCPU_EVENTS:
234  case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
235  case KVM_CAP_ARM_NISV_TO_USER:
236  case KVM_CAP_ARM_INJECT_EXT_DABT:
237  case KVM_CAP_SET_GUEST_DEBUG:
238  case KVM_CAP_VCPU_ATTRIBUTES:
239  case KVM_CAP_PTP_KVM:
240  case KVM_CAP_ARM_SYSTEM_SUSPEND:
241  case KVM_CAP_IRQFD_RESAMPLE:
242  case KVM_CAP_COUNTER_OFFSET:
243  r = 1;
244  break;
245  case KVM_CAP_SET_GUEST_DEBUG2:
246  return KVM_GUESTDBG_VALID_MASK;
247  case KVM_CAP_ARM_SET_DEVICE_ADDR:
248  r = 1;
249  break;
250  case KVM_CAP_NR_VCPUS:
251  /*
252  * ARM64 treats KVM_CAP_NR_CPUS differently from all other
253  * architectures, as it does not always bound it to
254  * KVM_CAP_MAX_VCPUS. It should not matter much because
255  * this is just an advisory value.
256  */
257  r = min_t(unsigned int, num_online_cpus(),
259  break;
260  case KVM_CAP_MAX_VCPUS:
261  case KVM_CAP_MAX_VCPU_ID:
262  if (kvm)
263  r = kvm->max_vcpus;
264  else
266  break;
267  case KVM_CAP_MSI_DEVID:
268  if (!kvm)
269  r = -EINVAL;
270  else
271  r = kvm->arch.vgic.msis_require_devid;
272  break;
273  case KVM_CAP_ARM_USER_IRQ:
274  /*
275  * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
276  * (bump this number if adding more devices)
277  */
278  r = 1;
279  break;
280  case KVM_CAP_ARM_MTE:
281  r = system_supports_mte();
282  break;
283  case KVM_CAP_STEAL_TIME:
285  break;
286  case KVM_CAP_ARM_EL1_32BIT:
287  r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
288  break;
289  case KVM_CAP_GUEST_DEBUG_HW_BPS:
290  r = get_num_brps();
291  break;
292  case KVM_CAP_GUEST_DEBUG_HW_WPS:
293  r = get_num_wrps();
294  break;
295  case KVM_CAP_ARM_PMU_V3:
297  break;
298  case KVM_CAP_ARM_INJECT_SERROR_ESR:
299  r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
300  break;
301  case KVM_CAP_ARM_VM_IPA_SIZE:
302  r = get_kvm_ipa_limit();
303  break;
304  case KVM_CAP_ARM_SVE:
305  r = system_supports_sve();
306  break;
307  case KVM_CAP_ARM_PTRAUTH_ADDRESS:
308  case KVM_CAP_ARM_PTRAUTH_GENERIC:
309  r = system_has_full_ptr_auth();
310  break;
311  case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
312  if (kvm)
313  r = kvm->arch.mmu.split_page_chunk_size;
314  else
315  r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
316  break;
317  case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
318  r = kvm_supported_block_sizes();
319  break;
320  case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES:
321  r = BIT(0);
322  break;
323  default:
324  r = 0;
325  }
326 
327  return r;
328 }
static bool kvm_arm_support_pmu_v3(void)
Definition: arm_pmu.h:113
bool kvm_arm_pvtime_supported(void)
Definition: pvtime.c:70
u32 get_kvm_ipa_limit(void)
Definition: reset.c:269
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vm_ioctl_enable_cap()

int kvm_vm_ioctl_enable_cap ( struct kvm *  kvm,
struct kvm_enable_cap *  cap 
)

Definition at line 72 of file arm.c.

74 {
75  int r;
76  u64 new_cap;
77 
78  if (cap->flags)
79  return -EINVAL;
80 
81  switch (cap->cap) {
82  case KVM_CAP_ARM_NISV_TO_USER:
83  r = 0;
84  set_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
85  &kvm->arch.flags);
86  break;
87  case KVM_CAP_ARM_MTE:
88  mutex_lock(&kvm->lock);
89  if (!system_supports_mte() || kvm->created_vcpus) {
90  r = -EINVAL;
91  } else {
92  r = 0;
93  set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags);
94  }
95  mutex_unlock(&kvm->lock);
96  break;
97  case KVM_CAP_ARM_SYSTEM_SUSPEND:
98  r = 0;
99  set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags);
100  break;
101  case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
102  new_cap = cap->args[0];
103 
104  mutex_lock(&kvm->slots_lock);
105  /*
106  * To keep things simple, allow changing the chunk
107  * size only when no memory slots have been created.
108  */
109  if (!kvm_are_all_memslots_empty(kvm)) {
110  r = -EINVAL;
111  } else if (new_cap && !kvm_is_block_size_supported(new_cap)) {
112  r = -EINVAL;
113  } else {
114  r = 0;
115  kvm->arch.mmu.split_page_chunk_size = new_cap;
116  }
117  mutex_unlock(&kvm->slots_lock);
118  break;
119  default:
120  r = -EINVAL;
121  break;
122  }
123 
124  return r;
125 }
bool kvm_are_all_memslots_empty(struct kvm *kvm)
Definition: kvm_main.c:4958
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vm_ioctl_irq_line()

int kvm_vm_ioctl_irq_line ( struct kvm *  kvm,
struct kvm_irq_level *  irq_level,
bool  line_status 
)

Definition at line 1196 of file arm.c.

1198 {
1199  u32 irq = irq_level->irq;
1200  unsigned int irq_type, vcpu_id, irq_num;
1201  struct kvm_vcpu *vcpu = NULL;
1202  bool level = irq_level->level;
1203 
1204  irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
1205  vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
1206  vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
1207  irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
1208 
1209  trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level);
1210 
1211  switch (irq_type) {
1212  case KVM_ARM_IRQ_TYPE_CPU:
1213  if (irqchip_in_kernel(kvm))
1214  return -ENXIO;
1215 
1216  vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1217  if (!vcpu)
1218  return -EINVAL;
1219 
1220  if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
1221  return -EINVAL;
1222 
1223  return vcpu_interrupt_line(vcpu, irq_num, level);
1224  case KVM_ARM_IRQ_TYPE_PPI:
1225  if (!irqchip_in_kernel(kvm))
1226  return -ENXIO;
1227 
1228  vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1229  if (!vcpu)
1230  return -EINVAL;
1231 
1232  if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
1233  return -EINVAL;
1234 
1235  return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL);
1236  case KVM_ARM_IRQ_TYPE_SPI:
1237  if (!irqchip_in_kernel(kvm))
1238  return -ENXIO;
1239 
1240  if (irq_num < VGIC_NR_PRIVATE_IRQS)
1241  return -EINVAL;
1242 
1243  return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL);
1244  }
1245 
1246  return -EINVAL;
1247 }
static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
Definition: arm.c:1162
#define VGIC_NR_PRIVATE_IRQS
Definition: arm_vgic.h:27
int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned int intid, bool level, void *owner)
Definition: vgic.c:439
Here is the call graph for this function:

◆ kvm_vm_ioctl_set_device_addr()

static int kvm_vm_ioctl_set_device_addr ( struct kvm *  kvm,
struct kvm_arm_device_addr *  dev_addr 
)
static

Definition at line 1650 of file arm.c.

1652 {
1653  switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) {
1654  case KVM_ARM_DEVICE_VGIC_V2:
1655  if (!vgic_present)
1656  return -ENXIO;
1657  return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr);
1658  default:
1659  return -ENODEV;
1660  }
1661 }
int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vm_set_attr()

static int kvm_vm_set_attr ( struct kvm *  kvm,
struct kvm_device_attr *  attr 
)
static

Definition at line 1673 of file arm.c.

1674 {
1675  switch (attr->group) {
1676  case KVM_ARM_VM_SMCCC_CTRL:
1677  return kvm_vm_smccc_set_attr(kvm, attr);
1678  default:
1679  return -ENXIO;
1680  }
1681 }
int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
Definition: hypercalls.c:652
Here is the call graph for this function:
Here is the caller graph for this function:

◆ lock_all_vcpus()

bool lock_all_vcpus ( struct kvm *  kvm)

Definition at line 1773 of file arm.c.

1774 {
1775  struct kvm_vcpu *tmp_vcpu;
1776  unsigned long c;
1777 
1778  lockdep_assert_held(&kvm->lock);
1779 
1780  /*
1781  * Any time a vcpu is in an ioctl (including running), the
1782  * core KVM code tries to grab the vcpu->mutex.
1783  *
1784  * By grabbing the vcpu->mutex of all VCPUs we ensure that no
1785  * other VCPUs can fiddle with the state while we access it.
1786  */
1787  kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
1788  if (!mutex_trylock(&tmp_vcpu->mutex)) {
1789  unlock_vcpus(kvm, c - 1);
1790  return false;
1791  }
1792  }
1793 
1794  return true;
1795 }
static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
Definition: arm.c:1755
Here is the call graph for this function:
Here is the caller graph for this function:

◆ module_init()

module_init ( kvm_arm_init  )

◆ nvhe_percpu_order()

static unsigned long nvhe_percpu_order ( void  )
static

Definition at line 1803 of file arm.c.

1804 {
1805  unsigned long size = nvhe_percpu_size();
1806 
1807  return size ? get_order(size) : 0;
1808 }
size_t size
Definition: gen-hyprel.c:133
Here is the call graph for this function:
Here is the caller graph for this function:

◆ nvhe_percpu_size()

static unsigned long nvhe_percpu_size ( void  )
static

Definition at line 1797 of file arm.c.

1798 {
1799  return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
1800  (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
1801 }
Here is the caller graph for this function:

◆ pkvm_hyp_init_ptrauth()

static void pkvm_hyp_init_ptrauth ( void  )
static

Definition at line 2277 of file arm.c.

2278 {
2279  struct kvm_cpu_context *hyp_ctxt;
2280  int cpu;
2281 
2282  for_each_possible_cpu(cpu) {
2283  hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu);
2284  hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long();
2285  hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long();
2286  hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long();
2287  hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long();
2288  hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long();
2289  hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long();
2290  hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long();
2291  hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long();
2292  hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long();
2293  hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long();
2294  }
2295 }
Here is the caller graph for this function:

◆ system_supported_vcpu_features()

static unsigned long system_supported_vcpu_features ( void  )
static

Definition at line 1249 of file arm.c.

1250 {
1251  unsigned long features = KVM_VCPU_VALID_FEATURES;
1252 
1253  if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
1254  clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
1255 
1256  if (!kvm_arm_support_pmu_v3())
1257  clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
1258 
1259  if (!system_supports_sve())
1260  clear_bit(KVM_ARM_VCPU_SVE, &features);
1261 
1262  if (!system_has_full_ptr_auth()) {
1263  clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features);
1264  clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features);
1265  }
1266 
1267  if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
1268  clear_bit(KVM_ARM_VCPU_HAS_EL2, &features);
1269 
1270  return features;
1271 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ teardown_hyp_mode()

static void __init teardown_hyp_mode ( void  )
static

Definition at line 2187 of file arm.c.

2188 {
2189  int cpu;
2190 
2191  free_hyp_pgds();
2192  for_each_possible_cpu(cpu) {
2193  free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
2194  free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
2195  }
2196 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ teardown_subsystems()

static void __init teardown_subsystems ( void  )
static

Definition at line 2181 of file arm.c.

2182 {
2183  kvm_unregister_perf_callbacks();
2184  hyp_cpu_pm_exit();
2185 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ unlock_all_vcpus()

void unlock_all_vcpus ( struct kvm *  kvm)

Definition at line 1765 of file arm.c.

1766 {
1767  lockdep_assert_held(&kvm->lock);
1768 
1769  unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
1770 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ unlock_vcpus()

static void unlock_vcpus ( struct kvm *  kvm,
int  vcpu_lock_idx 
)
static

Definition at line 1755 of file arm.c.

1756 {
1757  struct kvm_vcpu *tmp_vcpu;
1758 
1759  for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1760  tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1761  mutex_unlock(&tmp_vcpu->mutex);
1762  }
1763 }
Here is the caller graph for this function:

◆ vcpu_interrupt_line()

static int vcpu_interrupt_line ( struct kvm_vcpu *  vcpu,
int  number,
bool  level 
)
static

Definition at line 1162 of file arm.c.

1163 {
1164  int bit_index;
1165  bool set;
1166  unsigned long *hcr;
1167 
1168  if (number == KVM_ARM_IRQ_CPU_IRQ)
1169  bit_index = __ffs(HCR_VI);
1170  else /* KVM_ARM_IRQ_CPU_FIQ */
1171  bit_index = __ffs(HCR_VF);
1172 
1173  hcr = vcpu_hcr(vcpu);
1174  if (level)
1175  set = test_and_set_bit(bit_index, hcr);
1176  else
1177  set = test_and_clear_bit(bit_index, hcr);
1178 
1179  /*
1180  * If we didn't change anything, no need to wake up or kick other CPUs
1181  */
1182  if (set == level)
1183  return 0;
1184 
1185  /*
1186  * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
1187  * trigger a world-switch round on the running physical CPU to set the
1188  * virtual IRQ/FIQ fields in the HCR appropriately.
1189  */
1190  kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1191  kvm_vcpu_kick(vcpu);
1192 
1193  return 0;
1194 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vcpu_mode_is_bad_32bit()

static bool vcpu_mode_is_bad_32bit ( struct kvm_vcpu *  vcpu)
static

Definition at line 880 of file arm.c.

881 {
882  if (likely(!vcpu_mode_is_32bit(vcpu)))
883  return false;
884 
885  if (vcpu_has_nv(vcpu))
886  return true;
887 
888  return !kvm_supports_32bit_el0();
889 }
Here is the caller graph for this function:

Variable Documentation

◆ hyp_spectre_vector_selector

void* hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]
static

Definition at line 1811 of file arm.c.

◆ kvm_arm_initialised

bool kvm_arm_initialised
static

Definition at line 57 of file arm.c.

◆ kvm_mode

enum kvm_mode kvm_mode = KVM_MODE_DEFAULT
static

Definition at line 1 of file arm.c.

◆ vgic_present

bool vgic_present
static

Definition at line 57 of file arm.c.