KVM
Classes | Macros | Functions
vgic.h File Reference
#include <linux/irqchip/arm-gic-common.h>
#include <asm/kvm_mmu.h>
Include dependency graph for vgic.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  vgic_vmcr
 
struct  vgic_reg_attr
 

Macros

#define PRODUCT_ID_KVM   0x4b /* ASCII code K */
 
#define IMPLEMENTER_ARM   0x43b
 
#define VGIC_ADDR_UNDEF   (-1)
 
#define IS_VGIC_ADDR_UNDEF(_x)   ((_x) == VGIC_ADDR_UNDEF)
 
#define INTERRUPT_ID_BITS_SPIS   10
 
#define INTERRUPT_ID_BITS_ITS   16
 
#define VGIC_PRI_BITS   5
 
#define vgic_irq_is_sgi(intid)   ((intid) < VGIC_NR_SGIS)
 
#define VGIC_AFFINITY_0_SHIFT   0
 
#define VGIC_AFFINITY_0_MASK   (0xffUL << VGIC_AFFINITY_0_SHIFT)
 
#define VGIC_AFFINITY_1_SHIFT   8
 
#define VGIC_AFFINITY_1_MASK   (0xffUL << VGIC_AFFINITY_1_SHIFT)
 
#define VGIC_AFFINITY_2_SHIFT   16
 
#define VGIC_AFFINITY_2_MASK   (0xffUL << VGIC_AFFINITY_2_SHIFT)
 
#define VGIC_AFFINITY_3_SHIFT   24
 
#define VGIC_AFFINITY_3_MASK   (0xffUL << VGIC_AFFINITY_3_SHIFT)
 
#define VGIC_AFFINITY_LEVEL(reg, level)
 
#define VGIC_TO_MPIDR(val)
 
#define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK   0x000000000000c000
 
#define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT   14
 
#define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK   0x0000000000003800
 
#define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT   11
 
#define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK   0x0000000000000780
 
#define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT   7
 
#define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK   0x0000000000000078
 
#define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT   3
 
#define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK   0x0000000000000007
 
#define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT   0
 
#define KVM_DEV_ARM_VGIC_SYSREG_MASK
 
#define KVM_ITS_CTE_VALID_SHIFT   63
 
#define KVM_ITS_CTE_VALID_MASK   BIT_ULL(63)
 
#define KVM_ITS_CTE_RDBASE_SHIFT   16
 
#define KVM_ITS_CTE_ICID_MASK   GENMASK_ULL(15, 0)
 
#define KVM_ITS_ITE_NEXT_SHIFT   48
 
#define KVM_ITS_ITE_PINTID_SHIFT   16
 
#define KVM_ITS_ITE_PINTID_MASK   GENMASK_ULL(47, 16)
 
#define KVM_ITS_ITE_ICID_MASK   GENMASK_ULL(15, 0)
 
#define KVM_ITS_DTE_VALID_SHIFT   63
 
#define KVM_ITS_DTE_VALID_MASK   BIT_ULL(63)
 
#define KVM_ITS_DTE_NEXT_SHIFT   49
 
#define KVM_ITS_DTE_NEXT_MASK   GENMASK_ULL(62, 49)
 
#define KVM_ITS_DTE_ITTADDR_SHIFT   5
 
#define KVM_ITS_DTE_ITTADDR_MASK   GENMASK_ULL(48, 5)
 
#define KVM_ITS_DTE_SIZE_MASK   GENMASK_ULL(4, 0)
 
#define KVM_ITS_L1E_VALID_MASK   BIT_ULL(63)
 
#define KVM_ITS_L1E_ADDR_MASK   GENMASK_ULL(51, 16)
 
#define KVM_VGIC_V3_RDIST_INDEX_MASK   GENMASK_ULL(11, 0)
 
#define KVM_VGIC_V3_RDIST_FLAGS_MASK   GENMASK_ULL(15, 12)
 
#define KVM_VGIC_V3_RDIST_FLAGS_SHIFT   12
 
#define KVM_VGIC_V3_RDIST_BASE_MASK   GENMASK_ULL(51, 16)
 
#define KVM_VGIC_V3_RDIST_COUNT_MASK   GENMASK_ULL(63, 52)
 
#define KVM_VGIC_V3_RDIST_COUNT_SHIFT   52
 
#define DEBUG_SPINLOCK_BUG_ON(p)
 

Functions

static u32 vgic_get_implementation_rev (struct kvm_vcpu *vcpu)
 
static bool irq_is_pending (struct vgic_irq *irq)
 
static bool vgic_irq_is_mapped_level (struct vgic_irq *irq)
 
static int vgic_irq_get_lr_count (struct vgic_irq *irq)
 
static bool vgic_irq_is_multi_sgi (struct vgic_irq *irq)
 
static int vgic_write_guest_lock (struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len)
 
int vgic_v3_parse_attr (struct kvm_device *dev, struct kvm_device_attr *attr, struct vgic_reg_attr *reg_attr)
 
int vgic_v2_parse_attr (struct kvm_device *dev, struct kvm_device_attr *attr, struct vgic_reg_attr *reg_attr)
 
const struct vgic_register_regionvgic_get_mmio_region (struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, gpa_t addr, int len)
 
struct vgic_irqvgic_get_irq (struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid)
 
void __vgic_put_lpi_locked (struct kvm *kvm, struct vgic_irq *irq)
 
void vgic_put_irq (struct kvm *kvm, struct vgic_irq *irq)
 
bool vgic_get_phys_line_level (struct vgic_irq *irq)
 
void vgic_irq_set_phys_pending (struct vgic_irq *irq, bool pending)
 
void vgic_irq_set_phys_active (struct vgic_irq *irq, bool active)
 
bool vgic_queue_irq_unlock (struct kvm *kvm, struct vgic_irq *irq, unsigned long flags)
 
void vgic_kick_vcpus (struct kvm *kvm)
 
void vgic_irq_handle_resampling (struct vgic_irq *irq, bool lr_deactivated, bool lr_pending)
 
int vgic_check_iorange (struct kvm *kvm, phys_addr_t ioaddr, phys_addr_t addr, phys_addr_t alignment, phys_addr_t size)
 
void vgic_v2_fold_lr_state (struct kvm_vcpu *vcpu)
 
void vgic_v2_populate_lr (struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
 
void vgic_v2_clear_lr (struct kvm_vcpu *vcpu, int lr)
 
void vgic_v2_set_underflow (struct kvm_vcpu *vcpu)
 
int vgic_v2_has_attr_regs (struct kvm_device *dev, struct kvm_device_attr *attr)
 
int vgic_v2_dist_uaccess (struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val)
 
int vgic_v2_cpuif_uaccess (struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val)
 
void vgic_v2_set_vmcr (struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 
void vgic_v2_get_vmcr (struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 
void vgic_v2_enable (struct kvm_vcpu *vcpu)
 
int vgic_v2_probe (const struct gic_kvm_info *info)
 
int vgic_v2_map_resources (struct kvm *kvm)
 
int vgic_register_dist_iodev (struct kvm *kvm, gpa_t dist_base_address, enum vgic_type)
 
void vgic_v2_init_lrs (void)
 
void vgic_v2_load (struct kvm_vcpu *vcpu)
 
void vgic_v2_put (struct kvm_vcpu *vcpu)
 
void vgic_v2_vmcr_sync (struct kvm_vcpu *vcpu)
 
void vgic_v2_save_state (struct kvm_vcpu *vcpu)
 
void vgic_v2_restore_state (struct kvm_vcpu *vcpu)
 
static void vgic_get_irq_kref (struct vgic_irq *irq)
 
void vgic_v3_fold_lr_state (struct kvm_vcpu *vcpu)
 
void vgic_v3_populate_lr (struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
 
void vgic_v3_clear_lr (struct kvm_vcpu *vcpu, int lr)
 
void vgic_v3_set_underflow (struct kvm_vcpu *vcpu)
 
void vgic_v3_set_vmcr (struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 
void vgic_v3_get_vmcr (struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 
void vgic_v3_enable (struct kvm_vcpu *vcpu)
 
int vgic_v3_probe (const struct gic_kvm_info *info)
 
int vgic_v3_map_resources (struct kvm *kvm)
 
int vgic_v3_lpi_sync_pending_status (struct kvm *kvm, struct vgic_irq *irq)
 
int vgic_v3_save_pending_tables (struct kvm *kvm)
 
int vgic_v3_set_redist_base (struct kvm *kvm, u32 index, u64 addr, u32 count)
 
int vgic_register_redist_iodev (struct kvm_vcpu *vcpu)
 
void vgic_unregister_redist_iodev (struct kvm_vcpu *vcpu)
 
bool vgic_v3_check_base (struct kvm *kvm)
 
void vgic_v3_load (struct kvm_vcpu *vcpu)
 
void vgic_v3_put (struct kvm_vcpu *vcpu)
 
void vgic_v3_vmcr_sync (struct kvm_vcpu *vcpu)
 
bool vgic_has_its (struct kvm *kvm)
 
int kvm_vgic_register_its_device (void)
 
void vgic_enable_lpis (struct kvm_vcpu *vcpu)
 
void vgic_flush_pending_lpis (struct kvm_vcpu *vcpu)
 
int vgic_its_inject_msi (struct kvm *kvm, struct kvm_msi *msi)
 
int vgic_v3_has_attr_regs (struct kvm_device *dev, struct kvm_device_attr *attr)
 
int vgic_v3_dist_uaccess (struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val)
 
int vgic_v3_redist_uaccess (struct kvm_vcpu *vcpu, bool is_write, int offset, u32 *val)
 
int vgic_v3_cpu_sysregs_uaccess (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr, bool is_write)
 
int vgic_v3_has_cpu_sysregs_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
int vgic_v3_line_level_info_uaccess (struct kvm_vcpu *vcpu, bool is_write, u32 intid, u32 *val)
 
int kvm_register_vgic_device (unsigned long type)
 
void vgic_set_vmcr (struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 
void vgic_get_vmcr (struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
 
int vgic_lazy_init (struct kvm *kvm)
 
int vgic_init (struct kvm *kvm)
 
void vgic_debug_init (struct kvm *kvm)
 
void vgic_debug_destroy (struct kvm *kvm)
 
static int vgic_v3_max_apr_idx (struct kvm_vcpu *vcpu)
 
static bool vgic_v3_redist_region_full (struct vgic_redist_region *region)
 
struct vgic_redist_regionvgic_v3_rdist_free_slot (struct list_head *rdregs)
 
static size_t vgic_v3_rd_region_size (struct kvm *kvm, struct vgic_redist_region *rdreg)
 
struct vgic_redist_regionvgic_v3_rdist_region_from_index (struct kvm *kvm, u32 index)
 
void vgic_v3_free_redist_region (struct vgic_redist_region *rdreg)
 
bool vgic_v3_rdist_overlap (struct kvm *kvm, gpa_t base, size_t size)
 
static bool vgic_dist_overlap (struct kvm *kvm, gpa_t base, size_t size)
 
bool vgic_lpis_enabled (struct kvm_vcpu *vcpu)
 
int vgic_copy_lpi_list (struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
 
int vgic_its_resolve_lpi (struct kvm *kvm, struct vgic_its *its, u32 devid, u32 eventid, struct vgic_irq **irq)
 
struct vgic_itsvgic_msi_to_its (struct kvm *kvm, struct kvm_msi *msi)
 
int vgic_its_inject_cached_translation (struct kvm *kvm, struct kvm_msi *msi)
 
void vgic_lpi_translation_cache_init (struct kvm *kvm)
 
void vgic_lpi_translation_cache_destroy (struct kvm *kvm)
 
void vgic_its_invalidate_cache (struct kvm *kvm)
 
int vgic_its_inv_lpi (struct kvm *kvm, struct vgic_irq *irq)
 
int vgic_its_invall (struct kvm_vcpu *vcpu)
 
bool vgic_supports_direct_msis (struct kvm *kvm)
 
int vgic_v4_init (struct kvm *kvm)
 
void vgic_v4_teardown (struct kvm *kvm)
 
void vgic_v4_configure_vsgis (struct kvm *kvm)
 
void vgic_v4_get_vlpi_state (struct vgic_irq *irq, bool *val)
 
int vgic_v4_request_vpe_irq (struct kvm_vcpu *vcpu, int irq)
 

Macro Definition Documentation

◆ DEBUG_SPINLOCK_BUG_ON

#define DEBUG_SPINLOCK_BUG_ON (   p)

Definition at line 99 of file vgic.h.

◆ IMPLEMENTER_ARM

#define IMPLEMENTER_ARM   0x43b

Definition at line 12 of file vgic.h.

◆ INTERRUPT_ID_BITS_ITS

#define INTERRUPT_ID_BITS_ITS   16

Definition at line 18 of file vgic.h.

◆ INTERRUPT_ID_BITS_SPIS

#define INTERRUPT_ID_BITS_SPIS   10

Definition at line 17 of file vgic.h.

◆ IS_VGIC_ADDR_UNDEF

#define IS_VGIC_ADDR_UNDEF (   _x)    ((_x) == VGIC_ADDR_UNDEF)

Definition at line 15 of file vgic.h.

◆ KVM_DEV_ARM_VGIC_SYSREG_MASK

#define KVM_DEV_ARM_VGIC_SYSREG_MASK
Value:
KVM_REG_ARM_VGIC_SYSREG_OP1_MASK | \
KVM_REG_ARM_VGIC_SYSREG_CRN_MASK | \
KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \
KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
#define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK
Definition: vgic.h:49

Definition at line 60 of file vgic.h.

◆ KVM_ITS_CTE_ICID_MASK

#define KVM_ITS_CTE_ICID_MASK   GENMASK_ULL(15, 0)

Definition at line 73 of file vgic.h.

◆ KVM_ITS_CTE_RDBASE_SHIFT

#define KVM_ITS_CTE_RDBASE_SHIFT   16

Definition at line 72 of file vgic.h.

◆ KVM_ITS_CTE_VALID_MASK

#define KVM_ITS_CTE_VALID_MASK   BIT_ULL(63)

Definition at line 71 of file vgic.h.

◆ KVM_ITS_CTE_VALID_SHIFT

#define KVM_ITS_CTE_VALID_SHIFT   63

Definition at line 70 of file vgic.h.

◆ KVM_ITS_DTE_ITTADDR_MASK

#define KVM_ITS_DTE_ITTADDR_MASK   GENMASK_ULL(48, 5)

Definition at line 83 of file vgic.h.

◆ KVM_ITS_DTE_ITTADDR_SHIFT

#define KVM_ITS_DTE_ITTADDR_SHIFT   5

Definition at line 82 of file vgic.h.

◆ KVM_ITS_DTE_NEXT_MASK

#define KVM_ITS_DTE_NEXT_MASK   GENMASK_ULL(62, 49)

Definition at line 81 of file vgic.h.

◆ KVM_ITS_DTE_NEXT_SHIFT

#define KVM_ITS_DTE_NEXT_SHIFT   49

Definition at line 80 of file vgic.h.

◆ KVM_ITS_DTE_SIZE_MASK

#define KVM_ITS_DTE_SIZE_MASK   GENMASK_ULL(4, 0)

Definition at line 84 of file vgic.h.

◆ KVM_ITS_DTE_VALID_MASK

#define KVM_ITS_DTE_VALID_MASK   BIT_ULL(63)

Definition at line 79 of file vgic.h.

◆ KVM_ITS_DTE_VALID_SHIFT

#define KVM_ITS_DTE_VALID_SHIFT   63

Definition at line 78 of file vgic.h.

◆ KVM_ITS_ITE_ICID_MASK

#define KVM_ITS_ITE_ICID_MASK   GENMASK_ULL(15, 0)

Definition at line 77 of file vgic.h.

◆ KVM_ITS_ITE_NEXT_SHIFT

#define KVM_ITS_ITE_NEXT_SHIFT   48

Definition at line 74 of file vgic.h.

◆ KVM_ITS_ITE_PINTID_MASK

#define KVM_ITS_ITE_PINTID_MASK   GENMASK_ULL(47, 16)

Definition at line 76 of file vgic.h.

◆ KVM_ITS_ITE_PINTID_SHIFT

#define KVM_ITS_ITE_PINTID_SHIFT   16

Definition at line 75 of file vgic.h.

◆ KVM_ITS_L1E_ADDR_MASK

#define KVM_ITS_L1E_ADDR_MASK   GENMASK_ULL(51, 16)

Definition at line 87 of file vgic.h.

◆ KVM_ITS_L1E_VALID_MASK

#define KVM_ITS_L1E_VALID_MASK   BIT_ULL(63)

Definition at line 85 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_CRM_MASK

#define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK   0x0000000000000078

Definition at line 55 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT

#define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT   3

Definition at line 56 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_CRN_MASK

#define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK   0x0000000000000780

Definition at line 53 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT

#define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT   7

Definition at line 54 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_OP0_MASK

#define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK   0x000000000000c000

Definition at line 49 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT

#define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT   14

Definition at line 50 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_OP1_MASK

#define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK   0x0000000000003800

Definition at line 51 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT

#define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT   11

Definition at line 52 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_OP2_MASK

#define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK   0x0000000000000007

Definition at line 57 of file vgic.h.

◆ KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT

#define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT   0

Definition at line 58 of file vgic.h.

◆ KVM_VGIC_V3_RDIST_BASE_MASK

#define KVM_VGIC_V3_RDIST_BASE_MASK   GENMASK_ULL(51, 16)

Definition at line 92 of file vgic.h.

◆ KVM_VGIC_V3_RDIST_COUNT_MASK

#define KVM_VGIC_V3_RDIST_COUNT_MASK   GENMASK_ULL(63, 52)

Definition at line 93 of file vgic.h.

◆ KVM_VGIC_V3_RDIST_COUNT_SHIFT

#define KVM_VGIC_V3_RDIST_COUNT_SHIFT   52

Definition at line 94 of file vgic.h.

◆ KVM_VGIC_V3_RDIST_FLAGS_MASK

#define KVM_VGIC_V3_RDIST_FLAGS_MASK   GENMASK_ULL(15, 12)

Definition at line 90 of file vgic.h.

◆ KVM_VGIC_V3_RDIST_FLAGS_SHIFT

#define KVM_VGIC_V3_RDIST_FLAGS_SHIFT   12

Definition at line 91 of file vgic.h.

◆ KVM_VGIC_V3_RDIST_INDEX_MASK

#define KVM_VGIC_V3_RDIST_INDEX_MASK   GENMASK_ULL(11, 0)

Definition at line 89 of file vgic.h.

◆ PRODUCT_ID_KVM

#define PRODUCT_ID_KVM   0x4b /* ASCII code K */

Definition at line 11 of file vgic.h.

◆ VGIC_ADDR_UNDEF

#define VGIC_ADDR_UNDEF   (-1)

Definition at line 14 of file vgic.h.

◆ VGIC_AFFINITY_0_MASK

#define VGIC_AFFINITY_0_MASK   (0xffUL << VGIC_AFFINITY_0_SHIFT)

Definition at line 24 of file vgic.h.

◆ VGIC_AFFINITY_0_SHIFT

#define VGIC_AFFINITY_0_SHIFT   0

Definition at line 23 of file vgic.h.

◆ VGIC_AFFINITY_1_MASK

#define VGIC_AFFINITY_1_MASK   (0xffUL << VGIC_AFFINITY_1_SHIFT)

Definition at line 26 of file vgic.h.

◆ VGIC_AFFINITY_1_SHIFT

#define VGIC_AFFINITY_1_SHIFT   8

Definition at line 25 of file vgic.h.

◆ VGIC_AFFINITY_2_MASK

#define VGIC_AFFINITY_2_MASK   (0xffUL << VGIC_AFFINITY_2_SHIFT)

Definition at line 28 of file vgic.h.

◆ VGIC_AFFINITY_2_SHIFT

#define VGIC_AFFINITY_2_SHIFT   16

Definition at line 27 of file vgic.h.

◆ VGIC_AFFINITY_3_MASK

#define VGIC_AFFINITY_3_MASK   (0xffUL << VGIC_AFFINITY_3_SHIFT)

Definition at line 30 of file vgic.h.

◆ VGIC_AFFINITY_3_SHIFT

#define VGIC_AFFINITY_3_SHIFT   24

Definition at line 29 of file vgic.h.

◆ VGIC_AFFINITY_LEVEL

#define VGIC_AFFINITY_LEVEL (   reg,
  level 
)
Value:
((((reg) & VGIC_AFFINITY_## level ##_MASK) \
>> VGIC_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))

Definition at line 32 of file vgic.h.

◆ vgic_irq_is_sgi

#define vgic_irq_is_sgi (   intid)    ((intid) < VGIC_NR_SGIS)

Definition at line 21 of file vgic.h.

◆ VGIC_PRI_BITS

#define VGIC_PRI_BITS   5

Definition at line 19 of file vgic.h.

◆ VGIC_TO_MPIDR

#define VGIC_TO_MPIDR (   val)
Value:
(VGIC_AFFINITY_LEVEL(val, 0) | \
VGIC_AFFINITY_LEVEL(val, 1) | \
VGIC_AFFINITY_LEVEL(val, 2) | \
VGIC_AFFINITY_LEVEL(val, 3))
#define VGIC_AFFINITY_LEVEL(reg, level)
Definition: vgic.h:32

Definition at line 40 of file vgic.h.

Function Documentation

◆ __vgic_put_lpi_locked()

void __vgic_put_lpi_locked ( struct kvm *  kvm,
struct vgic_irq irq 
)

Definition at line 126 of file vgic.c.

127 {
128  struct vgic_dist *dist = &kvm->arch.vgic;
129 
130  if (!kref_put(&irq->refcount, vgic_irq_release))
131  return;
132 
133  list_del(&irq->lpi_list);
134  dist->lpi_list_count--;
135 
136  kfree(irq);
137 }
int lpi_list_count
Definition: arm_vgic.h:279
struct list_head lpi_list
Definition: arm_vgic.h:119
struct kref refcount
Definition: arm_vgic.h:141
static void vgic_irq_release(struct kref *ref)
Definition: vgic.c:119
Here is the call graph for this function:
Here is the caller graph for this function:

◆ irq_is_pending()

static bool irq_is_pending ( struct vgic_irq irq)
inlinestatic

Definition at line 108 of file vgic.h.

109 {
110  if (irq->config == VGIC_CONFIG_EDGE)
111  return irq->pending_latch;
112  else
113  return irq->pending_latch || irq->line_level;
114 }
@ VGIC_CONFIG_EDGE
Definition: arm_vgic.h:93
bool line_level
Definition: arm_vgic.h:134
bool pending_latch
Definition: arm_vgic.h:135
enum vgic_irq_config config
Definition: arm_vgic.h:152
Here is the caller graph for this function:

◆ kvm_register_vgic_device()

int kvm_register_vgic_device ( unsigned long  type)

Definition at line 316 of file vgic-kvm-device.c.

317 {
318  int ret = -ENODEV;
319 
320  switch (type) {
321  case KVM_DEV_TYPE_ARM_VGIC_V2:
323  KVM_DEV_TYPE_ARM_VGIC_V2);
324  break;
325  case KVM_DEV_TYPE_ARM_VGIC_V3:
327  KVM_DEV_TYPE_ARM_VGIC_V3);
328 
329  if (ret)
330  break;
332  break;
333  }
334 
335  return ret;
336 }
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
Definition: kvm_main.c:4742
int kvm_vgic_register_its_device(void)
Definition: vgic-its.c:2914
struct kvm_device_ops kvm_arm_vgic_v2_ops
struct kvm_device_ops kvm_arm_vgic_v3_ops
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_register_its_device()

int kvm_vgic_register_its_device ( void  )

Definition at line 2914 of file vgic-its.c.

2915 {
2917  KVM_DEV_TYPE_ARM_VGIC_ITS);
2918 }
static struct kvm_device_ops kvm_arm_vgic_its_ops
Definition: vgic-its.c:2905
Here is the caller graph for this function:

◆ vgic_check_iorange()

int vgic_check_iorange ( struct kvm *  kvm,
phys_addr_t  ioaddr,
phys_addr_t  addr,
phys_addr_t  alignment,
phys_addr_t  size 
)

Definition at line 17 of file vgic-kvm-device.c.

20 {
21  if (!IS_VGIC_ADDR_UNDEF(ioaddr))
22  return -EEXIST;
23 
24  if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
25  return -EINVAL;
26 
27  if (addr + size < addr)
28  return -EINVAL;
29 
30  if (addr & ~kvm_phys_mask(&kvm->arch.mmu) ||
31  (addr + size) > kvm_phys_size(&kvm->arch.mmu))
32  return -E2BIG;
33 
34  return 0;
35 }
size_t size
Definition: gen-hyprel.c:133
#define IS_VGIC_ADDR_UNDEF(_x)
Definition: vgic.h:15
Here is the caller graph for this function:

◆ vgic_copy_lpi_list()

int vgic_copy_lpi_list ( struct kvm *  kvm,
struct kvm_vcpu *  vcpu,
u32 **  intid_ptr 
)

Definition at line 319 of file vgic-its.c.

320 {
321  struct vgic_dist *dist = &kvm->arch.vgic;
322  struct vgic_irq *irq;
323  unsigned long flags;
324  u32 *intids;
325  int irq_count, i = 0;
326 
327  /*
328  * There is an obvious race between allocating the array and LPIs
329  * being mapped/unmapped. If we ended up here as a result of a
330  * command, we're safe (locks are held, preventing another
331  * command). If coming from another path (such as enabling LPIs),
332  * we must be careful not to overrun the array.
333  */
334  irq_count = READ_ONCE(dist->lpi_list_count);
335  intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
336  if (!intids)
337  return -ENOMEM;
338 
339  raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
340  list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
341  if (i == irq_count)
342  break;
343  /* We don't need to "get" the IRQ, as we hold the list lock. */
344  if (vcpu && irq->target_vcpu != vcpu)
345  continue;
346  intids[i++] = irq->intid;
347  }
348  raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
349 
350  *intid_ptr = intids;
351  return i;
352 }
struct list_head lpi_list_head
Definition: arm_vgic.h:278
raw_spinlock_t lpi_list_lock
Definition: arm_vgic.h:277
u32 intid
Definition: arm_vgic.h:133
struct kvm_vcpu * vcpu
Definition: arm_vgic.h:122
struct kvm_vcpu * target_vcpu
Definition: arm_vgic.h:127
Here is the caller graph for this function:

◆ vgic_debug_destroy()

void vgic_debug_destroy ( struct kvm *  kvm)

Definition at line 278 of file vgic-debug.c.

279 {
280 }
Here is the caller graph for this function:

◆ vgic_debug_init()

void vgic_debug_init ( struct kvm *  kvm)

Definition at line 272 of file vgic-debug.c.

273 {
274  debugfs_create_file("vgic-state", 0444, kvm->debugfs_dentry, kvm,
275  &vgic_debug_fops);
276 }
Here is the caller graph for this function:

◆ vgic_dist_overlap()

static bool vgic_dist_overlap ( struct kvm *  kvm,
gpa_t  base,
size_t  size 
)
inlinestatic

Definition at line 317 of file vgic.h.

318 {
319  struct vgic_dist *d = &kvm->arch.vgic;
320 
321  return (base + size > d->vgic_dist_base) &&
322  (base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
323 }
static unsigned long base
Definition: early_alloc.c:15
gpa_t vgic_dist_base
Definition: arm_vgic.h:247
Here is the caller graph for this function:

◆ vgic_enable_lpis()

void vgic_enable_lpis ( struct kvm_vcpu *  vcpu)

Definition at line 1866 of file vgic-its.c.

1867 {
1868  if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1870 }
static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
Definition: vgic-its.c:434
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_flush_pending_lpis()

void vgic_flush_pending_lpis ( struct kvm_vcpu *  vcpu)

Definition at line 152 of file vgic.c.

153 {
154  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
155  struct vgic_irq *irq, *tmp;
156  unsigned long flags;
157 
158  raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
159 
160  list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
161  if (irq->intid >= VGIC_MIN_LPI) {
162  raw_spin_lock(&irq->irq_lock);
163  list_del(&irq->ap_list);
164  irq->vcpu = NULL;
165  raw_spin_unlock(&irq->irq_lock);
166  vgic_put_irq(vcpu->kvm, irq);
167  }
168  }
169 
170  raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
171 }
#define VGIC_MIN_LPI
Definition: arm_vgic.h:31
raw_spinlock_t ap_list_lock
Definition: arm_vgic.h:334
struct list_head ap_list_head
Definition: arm_vgic.h:342
struct list_head ap_list
Definition: arm_vgic.h:120
raw_spinlock_t irq_lock
Definition: arm_vgic.h:118
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
Definition: vgic.c:139
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_get_implementation_rev()

static u32 vgic_get_implementation_rev ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 102 of file vgic.h.

103 {
104  return vcpu->kvm->arch.vgic.implementation_rev;
105 }
Here is the caller graph for this function:

◆ vgic_get_irq()

struct vgic_irq* vgic_get_irq ( struct kvm *  kvm,
struct kvm_vcpu *  vcpu,
u32  intid 
)

Definition at line 92 of file vgic.c.

94 {
95  /* SGIs and PPIs */
96  if (intid <= VGIC_MAX_PRIVATE) {
97  intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
98  return &vcpu->arch.vgic_cpu.private_irqs[intid];
99  }
100 
101  /* SPIs */
102  if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
103  intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
104  return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
105  }
106 
107  /* LPIs */
108  if (intid >= VGIC_MIN_LPI)
109  return vgic_get_lpi(kvm, intid);
110 
111  return NULL;
112 }
#define VGIC_MAX_PRIVATE
Definition: arm_vgic.h:28
#define VGIC_NR_PRIVATE_IRQS
Definition: arm_vgic.h:27
static struct vgic_irq * vgic_get_lpi(struct kvm *kvm, u32 intid)
Definition: vgic.c:60
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_get_irq_kref()

static void vgic_get_irq_kref ( struct vgic_irq irq)
inlinestatic

Definition at line 223 of file vgic.h.

224 {
225  if (irq->intid < VGIC_MIN_LPI)
226  return;
227 
228  kref_get(&irq->refcount);
229 }
Here is the caller graph for this function:

◆ vgic_get_mmio_region()

const struct vgic_register_region* vgic_get_mmio_region ( struct kvm_vcpu *  vcpu,
struct vgic_io_device iodev,
gpa_t  addr,
int  len 
)

Definition at line 950 of file vgic-mmio.c.

952 {
953  const struct vgic_register_region *region;
954 
955  region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
956  addr - iodev->base_addr);
957  if (!region || !check_region(vcpu->kvm, region, addr, len))
958  return NULL;
959 
960  return region;
961 }
const struct vgic_register_region * regions
Definition: arm_vgic.h:181
gpa_t base_addr
Definition: arm_vgic.h:176
unsigned int len
Definition: vgic-mmio.h:10
static bool check_region(const struct kvm *kvm, const struct vgic_register_region *region, gpa_t addr, int len)
Definition: vgic-mmio.c:918
const struct vgic_register_region * vgic_find_mmio_region(const struct vgic_register_region *regions, int nr_regions, unsigned int offset)
Definition: vgic-mmio.c:836
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_get_phys_line_level()

bool vgic_get_phys_line_level ( struct vgic_irq irq)

Definition at line 180 of file vgic.c.

181 {
182  bool line_level;
183 
184  BUG_ON(!irq->hw);
185 
186  if (irq->ops && irq->ops->get_input_level)
187  return irq->ops->get_input_level(irq->intid);
188 
189  WARN_ON(irq_get_irqchip_state(irq->host_irq,
190  IRQCHIP_STATE_PENDING,
191  &line_level));
192  return line_level;
193 }
bool(* get_input_level)(int vintid)
Definition: arm_vgic.h:114
struct irq_ops * ops
Definition: arm_vgic.h:154
unsigned int host_irq
Definition: arm_vgic.h:143
bool hw
Definition: arm_vgic.h:140
Here is the caller graph for this function:

◆ vgic_get_vmcr()

void vgic_get_vmcr ( struct kvm_vcpu *  vcpu,
struct vgic_vmcr vmcr 
)

Definition at line 851 of file vgic-mmio.c.

852 {
854  vgic_v2_get_vmcr(vcpu, vmcr);
855  else
856  vgic_v3_get_vmcr(vcpu, vmcr);
857 }
struct vgic_global kvm_vgic_global_state
@ VGIC_V2
Definition: arm_vgic.h:39
enum vgic_type type
Definition: arm_vgic.h:46
void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
Definition: vgic-v2.c:232
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
Definition: vgic-v3.c:224
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_has_its()

bool vgic_has_its ( struct kvm *  kvm)

Definition at line 41 of file vgic-mmio-v3.c.

42 {
43  struct vgic_dist *dist = &kvm->arch.vgic;
44 
45  if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
46  return false;
47 
48  return dist->has_its;
49 }
bool has_its
Definition: arm_vgic.h:265
u32 vgic_model
Definition: arm_vgic.h:230
Here is the caller graph for this function:

◆ vgic_init()

int vgic_init ( struct kvm *  kvm)

Definition at line 262 of file vgic-init.c.

263 {
264  struct vgic_dist *dist = &kvm->arch.vgic;
265  struct kvm_vcpu *vcpu;
266  int ret = 0, i;
267  unsigned long idx;
268 
269  lockdep_assert_held(&kvm->arch.config_lock);
270 
271  if (vgic_initialized(kvm))
272  return 0;
273 
274  /* Are we also in the middle of creating a VCPU? */
275  if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
276  return -EBUSY;
277 
278  /* freeze the number of spis */
279  if (!dist->nr_spis)
281 
282  ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
283  if (ret)
284  goto out;
285 
286  /* Initialize groups on CPUs created before the VGIC type was known */
287  kvm_for_each_vcpu(idx, vcpu, kvm) {
288  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
289 
290  for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
291  struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
292  switch (dist->vgic_model) {
293  case KVM_DEV_TYPE_ARM_VGIC_V3:
294  irq->group = 1;
295  irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
296  break;
297  case KVM_DEV_TYPE_ARM_VGIC_V2:
298  irq->group = 0;
299  irq->targets = 1U << idx;
300  break;
301  default:
302  ret = -EINVAL;
303  goto out;
304  }
305  }
306  }
307 
308  if (vgic_has_its(kvm))
310 
311  /*
312  * If we have GICv4.1 enabled, unconditionnaly request enable the
313  * v4 support so that we get HW-accelerated vSGIs. Otherwise, only
314  * enable it if we present a virtual ITS to the guest.
315  */
316  if (vgic_supports_direct_msis(kvm)) {
317  ret = vgic_v4_init(kvm);
318  if (ret)
319  goto out;
320  }
321 
322  kvm_for_each_vcpu(idx, vcpu, kvm)
324 
326  if (ret)
327  goto out;
328 
329  vgic_debug_init(kvm);
330 
331  /*
332  * If userspace didn't set the GIC implementation revision,
333  * default to the latest and greatest. You know want it.
334  */
335  if (!dist->implementation_rev)
337  dist->initialized = true;
338 
339 out:
340  return ret;
341 }
#define VGIC_NR_IRQS_LEGACY
Definition: arm_vgic.h:24
#define KVM_VGIC_IMP_REV_LATEST
Definition: arm_vgic.h:236
#define vgic_initialized(k)
Definition: arm_vgic.h:393
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]
Definition: arm_vgic.h:332
int nr_spis
Definition: arm_vgic.h:244
u32 implementation_rev
Definition: arm_vgic.h:233
bool initialized
Definition: arm_vgic.h:227
u32 mpidr
Definition: arm_vgic.h:146
u8 targets
Definition: arm_vgic.h:145
u8 group
Definition: arm_vgic.h:151
void vgic_debug_init(struct kvm *kvm)
Definition: vgic-debug.c:272
static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
Definition: vgic-init.c:140
static void kvm_vgic_vcpu_enable(struct kvm_vcpu *vcpu)
Definition: vgic-init.c:245
int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
Definition: vgic-irqfd.c:135
void vgic_lpi_translation_cache_init(struct kvm *kvm)
Definition: vgic-its.c:1903
bool vgic_has_its(struct kvm *kvm)
Definition: vgic-mmio-v3.c:41
bool vgic_supports_direct_msis(struct kvm *kvm)
Definition: vgic-mmio-v3.c:51
int vgic_v4_init(struct kvm *kvm)
Definition: vgic-v4.c:239
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_irq_get_lr_count()

static int vgic_irq_get_lr_count ( struct vgic_irq irq)
inlinestatic

Definition at line 121 of file vgic.h.

122 {
123  /* Account for the active state as an interrupt */
124  if (vgic_irq_is_sgi(irq->intid) && irq->source)
125  return hweight8(irq->source) + irq->active;
126 
127  return irq_is_pending(irq) || irq->active;
128 }
bool active
Definition: arm_vgic.h:138
u8 source
Definition: arm_vgic.h:148
static bool irq_is_pending(struct vgic_irq *irq)
Definition: vgic.h:108
#define vgic_irq_is_sgi(intid)
Definition: vgic.h:21
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_irq_handle_resampling()

void vgic_irq_handle_resampling ( struct vgic_irq irq,
bool  lr_deactivated,
bool  lr_pending 
)

Definition at line 1060 of file vgic.c.

1062 {
1063  if (vgic_irq_is_mapped_level(irq)) {
1064  bool resample = false;
1065 
1066  if (unlikely(vgic_irq_needs_resampling(irq))) {
1067  resample = !(irq->active || irq->pending_latch);
1068  } else if (lr_pending || (lr_deactivated && irq->line_level)) {
1070  resample = !irq->line_level;
1071  }
1072 
1073  if (resample)
1074  vgic_irq_set_phys_active(irq, false);
1075  }
1076 }
static bool vgic_irq_needs_resampling(struct vgic_irq *irq)
Definition: arm_vgic.h:160
bool vgic_get_phys_line_level(struct vgic_irq *irq)
Definition: vgic.c:180
void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
Definition: vgic.c:196
static bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
Definition: vgic.h:116
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_irq_is_mapped_level()

static bool vgic_irq_is_mapped_level ( struct vgic_irq irq)
inlinestatic

Definition at line 116 of file vgic.h.

117 {
118  return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
119 }
@ VGIC_CONFIG_LEVEL
Definition: arm_vgic.h:94
Here is the caller graph for this function:

◆ vgic_irq_is_multi_sgi()

static bool vgic_irq_is_multi_sgi ( struct vgic_irq irq)
inlinestatic

Definition at line 130 of file vgic.h.

131 {
132  return vgic_irq_get_lr_count(irq) > 1;
133 }
static int vgic_irq_get_lr_count(struct vgic_irq *irq)
Definition: vgic.h:121
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_irq_set_phys_active()

void vgic_irq_set_phys_active ( struct vgic_irq irq,
bool  active 
)

Definition at line 196 of file vgic.c.

197 {
198 
199  BUG_ON(!irq->hw);
200  WARN_ON(irq_set_irqchip_state(irq->host_irq,
201  IRQCHIP_STATE_ACTIVE,
202  active));
203 }
Here is the caller graph for this function:

◆ vgic_irq_set_phys_pending()

void vgic_irq_set_phys_pending ( struct vgic_irq irq,
bool  pending 
)

Definition at line 173 of file vgic.c.

174 {
175  WARN_ON(irq_set_irqchip_state(irq->host_irq,
176  IRQCHIP_STATE_PENDING,
177  pending));
178 }
Here is the caller graph for this function:

◆ vgic_its_inject_cached_translation()

int vgic_its_inject_cached_translation ( struct kvm *  kvm,
struct kvm_msi *  msi 
)

Definition at line 765 of file vgic-its.c.

766 {
767  struct vgic_irq *irq;
768  unsigned long flags;
769  phys_addr_t db;
770 
771  db = (u64)msi->address_hi << 32 | msi->address_lo;
772  irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
773  if (!irq)
774  return -EWOULDBLOCK;
775 
776  raw_spin_lock_irqsave(&irq->irq_lock, flags);
777  irq->pending_latch = true;
778  vgic_queue_irq_unlock(kvm, irq, flags);
779  vgic_put_irq(kvm, irq);
780 
781  return 0;
782 }
static struct vgic_irq * vgic_its_check_cache(struct kvm *kvm, phys_addr_t db, u32 devid, u32 eventid)
Definition: vgic-its.c:588
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, unsigned long flags)
Definition: vgic.c:336
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_its_inject_msi()

int vgic_its_inject_msi ( struct kvm *  kvm,
struct kvm_msi *  msi 
)

Definition at line 790 of file vgic-its.c.

791 {
792  struct vgic_its *its;
793  int ret;
794 
795  if (!vgic_its_inject_cached_translation(kvm, msi))
796  return 1;
797 
798  its = vgic_msi_to_its(kvm, msi);
799  if (IS_ERR(its))
800  return PTR_ERR(its);
801 
802  mutex_lock(&its->its_lock);
803  ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
804  mutex_unlock(&its->its_lock);
805 
806  if (ret < 0)
807  return ret;
808 
809  /*
810  * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
811  * if the guest has blocked the MSI. So we map any LPI mapping
812  * related error to that.
813  */
814  if (ret)
815  return 0;
816  else
817  return 1;
818 }
struct mutex its_lock
Definition: arm_vgic.h:209
int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
Definition: vgic-its.c:765
struct vgic_its * vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
Definition: vgic-its.c:708
static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, u32 devid, u32 eventid)
Definition: vgic-its.c:743
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_its_inv_lpi()

int vgic_its_inv_lpi ( struct kvm *  kvm,
struct vgic_irq irq 
)

Definition at line 1323 of file vgic-its.c.

1324 {
1325  return update_lpi_config(kvm, irq, NULL, true);
1326 }
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, struct kvm_vcpu *filter_vcpu, bool needs_inv)
Definition: vgic-its.c:280
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_its_invalidate_cache()

void vgic_its_invalidate_cache ( struct kvm *  kvm)

Definition at line 659 of file vgic-its.c.

660 {
661  struct vgic_dist *dist = &kvm->arch.vgic;
662  struct vgic_translation_cache_entry *cte;
663  unsigned long flags;
664 
665  raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
666 
667  list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
668  /*
669  * If we hit a NULL entry, there is nothing after this
670  * point.
671  */
672  if (!cte->irq)
673  break;
674 
675  __vgic_put_lpi_locked(kvm, cte->irq);
676  cte->irq = NULL;
677  }
678 
679  raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
680 }
struct list_head lpi_translation_cache
Definition: arm_vgic.h:282
Definition: vgic-its.c:148
struct list_head entry
Definition: vgic-its.c:149
struct vgic_irq * irq
Definition: vgic-its.c:153
void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
Definition: vgic.c:126
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_its_invall()

int vgic_its_invall ( struct kvm_vcpu *  vcpu)

vgic_its_invall - invalidate all LPIs targetting a given vcpu @vcpu: the vcpu for which the RD is targetted by an invalidation

Contrary to the INVALL command, this targets a RD instead of a collection, and we don't need to hold the its_lock, since no ITS is involved here.

Definition at line 1355 of file vgic-its.c.

1356 {
1357  struct kvm *kvm = vcpu->kvm;
1358  int irq_count, i = 0;
1359  u32 *intids;
1360 
1361  irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
1362  if (irq_count < 0)
1363  return irq_count;
1364 
1365  for (i = 0; i < irq_count; i++) {
1366  struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intids[i]);
1367  if (!irq)
1368  continue;
1369  update_lpi_config(kvm, irq, vcpu, false);
1370  vgic_put_irq(kvm, irq);
1371  }
1372 
1373  kfree(intids);
1374 
1375  if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1376  its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1377 
1378  return 0;
1379 }
int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
Definition: vgic-its.c:319
struct vgic_irq * vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid)
Definition: vgic.c:92
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_its_resolve_lpi()

int vgic_its_resolve_lpi ( struct kvm *  kvm,
struct vgic_its its,
u32  devid,
u32  eventid,
struct vgic_irq **  irq 
)

Definition at line 682 of file vgic-its.c.

684 {
685  struct kvm_vcpu *vcpu;
686  struct its_ite *ite;
687 
688  if (!its->enabled)
689  return -EBUSY;
690 
691  ite = find_ite(its, devid, eventid);
692  if (!ite || !its_is_collection_mapped(ite->collection))
693  return E_ITS_INT_UNMAPPED_INTERRUPT;
694 
695  vcpu = collection_to_vcpu(kvm, ite->collection);
696  if (!vcpu)
697  return E_ITS_INT_UNMAPPED_INTERRUPT;
698 
699  if (!vgic_lpis_enabled(vcpu))
700  return -EBUSY;
701 
702  vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
703 
704  *irq = ite->irq;
705  return 0;
706 }
struct vgic_irq * irq
Definition: vgic-its.c:143
struct its_collection * collection
Definition: vgic-its.c:144
bool enabled
Definition: arm_vgic.h:191
static struct kvm_vcpu * collection_to_vcpu(struct kvm *kvm, struct its_collection *col)
Definition: vgic-its.c:381
static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its, u32 devid, u32 eventid, struct vgic_irq *irq)
Definition: vgic-its.c:606
static struct its_ite * find_ite(struct vgic_its *its, u32 device_id, u32 event_id)
Definition: vgic-its.c:226
#define its_is_collection_mapped(coll)
Definition: vgic-its.c:137
bool vgic_lpis_enabled(struct kvm_vcpu *vcpu)
Definition: vgic-mmio-v3.c:238
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_kick_vcpus()

void vgic_kick_vcpus ( struct kvm *  kvm)

Definition at line 1005 of file vgic.c.

1006 {
1007  struct kvm_vcpu *vcpu;
1008  unsigned long c;
1009 
1010  /*
1011  * We've injected an interrupt, time to find out who deserves
1012  * a good kick...
1013  */
1014  kvm_for_each_vcpu(c, vcpu, kvm) {
1015  if (kvm_vgic_vcpu_pending_irq(vcpu)) {
1016  kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1017  kvm_vcpu_kick(vcpu);
1018  }
1019  }
1020 }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3931
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
Definition: vgic.c:971
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_lazy_init()

int vgic_lazy_init ( struct kvm *  kvm)

vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest is a GICv2. A GICv3 must be explicitly initialized by userspace using the KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group. @kvm: kvm struct pointer

Definition at line 423 of file vgic-init.c.

424 {
425  int ret = 0;
426 
427  if (unlikely(!vgic_initialized(kvm))) {
428  /*
429  * We only provide the automatic initialization of the VGIC
430  * for the legacy case of a GICv2. Any other type must
431  * be explicitly initialized once setup with the respective
432  * KVM device call.
433  */
434  if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
435  return -EBUSY;
436 
437  mutex_lock(&kvm->arch.config_lock);
438  ret = vgic_init(kvm);
439  mutex_unlock(&kvm->arch.config_lock);
440  }
441 
442  return ret;
443 }
int vgic_init(struct kvm *kvm)
Definition: vgic-init.c:262
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_lpi_translation_cache_destroy()

void vgic_lpi_translation_cache_destroy ( struct kvm *  kvm)

Definition at line 1927 of file vgic-its.c.

1928 {
1929  struct vgic_dist *dist = &kvm->arch.vgic;
1930  struct vgic_translation_cache_entry *cte, *tmp;
1931 
1933 
1934  list_for_each_entry_safe(cte, tmp,
1935  &dist->lpi_translation_cache, entry) {
1936  list_del(&cte->entry);
1937  kfree(cte);
1938  }
1939 }
void vgic_its_invalidate_cache(struct kvm *kvm)
Definition: vgic-its.c:659
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_lpi_translation_cache_init()

void vgic_lpi_translation_cache_init ( struct kvm *  kvm)

Definition at line 1903 of file vgic-its.c.

1904 {
1905  struct vgic_dist *dist = &kvm->arch.vgic;
1906  unsigned int sz;
1907  int i;
1908 
1909  if (!list_empty(&dist->lpi_translation_cache))
1910  return;
1911 
1912  sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
1913 
1914  for (i = 0; i < sz; i++) {
1915  struct vgic_translation_cache_entry *cte;
1916 
1917  /* An allocation failure is not fatal */
1918  cte = kzalloc(sizeof(*cte), GFP_KERNEL_ACCOUNT);
1919  if (WARN_ON(!cte))
1920  break;
1921 
1922  INIT_LIST_HEAD(&cte->entry);
1923  list_add(&cte->entry, &dist->lpi_translation_cache);
1924  }
1925 }
#define LPI_DEFAULT_PCPU_CACHE_SIZE
Definition: vgic-its.c:1901
Here is the caller graph for this function:

◆ vgic_lpis_enabled()

bool vgic_lpis_enabled ( struct kvm_vcpu *  vcpu)

Definition at line 238 of file vgic-mmio-v3.c.

239 {
240  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
241 
242  return atomic_read(&vgic_cpu->ctlr) == GICR_CTLR_ENABLE_LPIS;
243 }
atomic_t ctlr
Definition: arm_vgic.h:356
Here is the caller graph for this function:

◆ vgic_msi_to_its()

struct vgic_its* vgic_msi_to_its ( struct kvm *  kvm,
struct kvm_msi *  msi 
)

Definition at line 708 of file vgic-its.c.

709 {
710  u64 address;
711  struct kvm_io_device *kvm_io_dev;
712  struct vgic_io_device *iodev;
713 
714  if (!vgic_has_its(kvm))
715  return ERR_PTR(-ENODEV);
716 
717  if (!(msi->flags & KVM_MSI_VALID_DEVID))
718  return ERR_PTR(-EINVAL);
719 
720  address = (u64)msi->address_hi << 32 | msi->address_lo;
721 
722  kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
723  if (!kvm_io_dev)
724  return ERR_PTR(-EINVAL);
725 
726  if (kvm_io_dev->ops != &kvm_io_gic_ops)
727  return ERR_PTR(-EINVAL);
728 
729  iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
730  if (iodev->iodev_type != IODEV_ITS)
731  return ERR_PTR(-EINVAL);
732 
733  return iodev->its;
734 }
@ IODEV_ITS
Definition: arm_vgic.h:172
struct kvm_io_device * kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr)
Definition: kvm_main.c:5989
const struct kvm_io_device_ops * ops
Definition: iodev.h:33
struct vgic_its * its
Definition: arm_vgic.h:179
enum iodev_type iodev_type
Definition: arm_vgic.h:182
struct kvm_io_device dev
Definition: arm_vgic.h:184
const struct kvm_io_device_ops kvm_io_gic_ops
Definition: vgic-mmio.c:1075
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_put_irq()

void vgic_put_irq ( struct kvm *  kvm,
struct vgic_irq irq 
)

Definition at line 139 of file vgic.c.

140 {
141  struct vgic_dist *dist = &kvm->arch.vgic;
142  unsigned long flags;
143 
144  if (irq->intid < VGIC_MIN_LPI)
145  return;
146 
147  raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
148  __vgic_put_lpi_locked(kvm, irq);
149  raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_queue_irq_unlock()

bool vgic_queue_irq_unlock ( struct kvm *  kvm,
struct vgic_irq irq,
unsigned long  flags 
)

Definition at line 336 of file vgic.c.

338 {
339  struct kvm_vcpu *vcpu;
340 
341  lockdep_assert_held(&irq->irq_lock);
342 
343 retry:
344  vcpu = vgic_target_oracle(irq);
345  if (irq->vcpu || !vcpu) {
346  /*
347  * If this IRQ is already on a VCPU's ap_list, then it
348  * cannot be moved or modified and there is no more work for
349  * us to do.
350  *
351  * Otherwise, if the irq is not pending and enabled, it does
352  * not need to be inserted into an ap_list and there is also
353  * no more work for us to do.
354  */
355  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
356 
357  /*
358  * We have to kick the VCPU here, because we could be
359  * queueing an edge-triggered interrupt for which we
360  * get no EOI maintenance interrupt. In that case,
361  * while the IRQ is already on the VCPU's AP list, the
362  * VCPU could have EOI'ed the original interrupt and
363  * won't see this one until it exits for some other
364  * reason.
365  */
366  if (vcpu) {
367  kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
368  kvm_vcpu_kick(vcpu);
369  }
370  return false;
371  }
372 
373  /*
374  * We must unlock the irq lock to take the ap_list_lock where
375  * we are going to insert this new pending interrupt.
376  */
377  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
378 
379  /* someone can do stuff here, which we re-check below */
380 
381  raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
382  raw_spin_lock(&irq->irq_lock);
383 
384  /*
385  * Did something change behind our backs?
386  *
387  * There are two cases:
388  * 1) The irq lost its pending state or was disabled behind our
389  * backs and/or it was queued to another VCPU's ap_list.
390  * 2) Someone changed the affinity on this irq behind our
391  * backs and we are now holding the wrong ap_list_lock.
392  *
393  * In both cases, drop the locks and retry.
394  */
395 
396  if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
397  raw_spin_unlock(&irq->irq_lock);
398  raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
399  flags);
400 
401  raw_spin_lock_irqsave(&irq->irq_lock, flags);
402  goto retry;
403  }
404 
405  /*
406  * Grab a reference to the irq to reflect the fact that it is
407  * now in the ap_list.
408  */
409  vgic_get_irq_kref(irq);
410  list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
411  irq->vcpu = vcpu;
412 
413  raw_spin_unlock(&irq->irq_lock);
414  raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
415 
416  kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
417  kvm_vcpu_kick(vcpu);
418 
419  return true;
420 }
static struct kvm_vcpu * vgic_target_oracle(struct vgic_irq *irq)
Definition: vgic.c:216
static void vgic_get_irq_kref(struct vgic_irq *irq)
Definition: vgic.h:223
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_register_dist_iodev()

int vgic_register_dist_iodev ( struct kvm *  kvm,
gpa_t  dist_base_address,
enum  vgic_type 
)

Definition at line 1080 of file vgic-mmio.c.

1082 {
1083  struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
1084  unsigned int len;
1085 
1086  switch (type) {
1087  case VGIC_V2:
1088  len = vgic_v2_init_dist_iodev(io_device);
1089  break;
1090  case VGIC_V3:
1091  len = vgic_v3_init_dist_iodev(io_device);
1092  break;
1093  default:
1094  BUG_ON(1);
1095  }
1096 
1097  io_device->base_addr = dist_base_address;
1098  io_device->iodev_type = IODEV_DIST;
1099  io_device->redist_vcpu = NULL;
1100 
1101  return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
1102  len, &io_device->dev);
1103 }
@ VGIC_V3
Definition: arm_vgic.h:40
@ IODEV_DIST
Definition: arm_vgic.h:170
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev)
Definition: kvm_main.c:5897
struct kvm_vcpu * redist_vcpu
Definition: arm_vgic.h:178
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
Definition: vgic-mmio-v2.c:487
unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
Definition: vgic-mmio-v3.c:727
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_register_redist_iodev()

int vgic_register_redist_iodev ( struct kvm_vcpu *  vcpu)

vgic_register_redist_iodev - register a single redist iodev @vcpu: The VCPU to which the redistributor belongs

Register a KVM iodev for this VCPU's redistributor using the address provided.

Return 0 on success, -ERRNO otherwise.

Definition at line 746 of file vgic-mmio-v3.c.

747 {
748  struct kvm *kvm = vcpu->kvm;
749  struct vgic_dist *vgic = &kvm->arch.vgic;
750  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
751  struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
752  struct vgic_redist_region *rdreg;
753  gpa_t rd_base;
754  int ret = 0;
755 
756  lockdep_assert_held(&kvm->slots_lock);
757  mutex_lock(&kvm->arch.config_lock);
758 
760  goto out_unlock;
761 
762  /*
763  * We may be creating VCPUs before having set the base address for the
764  * redistributor region, in which case we will come back to this
765  * function for all VCPUs when the base address is set. Just return
766  * without doing any work for now.
767  */
768  rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
769  if (!rdreg)
770  goto out_unlock;
771 
772  if (!vgic_v3_check_base(kvm)) {
773  ret = -EINVAL;
774  goto out_unlock;
775  }
776 
777  vgic_cpu->rdreg = rdreg;
778  vgic_cpu->rdreg_index = rdreg->free_index;
779 
780  rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
781 
783  rd_dev->base_addr = rd_base;
784  rd_dev->iodev_type = IODEV_REDIST;
785  rd_dev->regions = vgic_v3_rd_registers;
786  rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
787  rd_dev->redist_vcpu = vcpu;
788 
789  mutex_unlock(&kvm->arch.config_lock);
790 
791  ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
792  2 * SZ_64K, &rd_dev->dev);
793  if (ret)
794  return ret;
795 
796  /* Protected by slots_lock */
797  rdreg->free_index++;
798  return 0;
799 
800 out_unlock:
801  mutex_unlock(&kvm->arch.config_lock);
802  return ret;
803 }
@ IODEV_REDIST
Definition: arm_vgic.h:171
static void kvm_iodevice_init(struct kvm_io_device *dev, const struct kvm_io_device_ops *ops)
Definition: iodev.h:36
struct vgic_io_device rd_iodev
Definition: arm_vgic.h:348
struct vgic_redist_region * rdreg
Definition: arm_vgic.h:349
u32 rdreg_index
Definition: arm_vgic.h:350
struct list_head rd_regions
Definition: arm_vgic.h:252
static const struct vgic_register_region vgic_v3_rd_registers[]
Definition: vgic-mmio-v3.c:649
struct vgic_redist_region * vgic_v3_rdist_free_slot(struct list_head *rd_regions)
Definition: vgic-v3.c:513
bool vgic_v3_check_base(struct kvm *kvm)
Definition: vgic-v3.c:477
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_set_vmcr()

void vgic_set_vmcr ( struct kvm_vcpu *  vcpu,
struct vgic_vmcr vmcr 
)

Definition at line 843 of file vgic-mmio.c.

844 {
846  vgic_v2_set_vmcr(vcpu, vmcr);
847  else
848  vgic_v3_set_vmcr(vcpu, vmcr);
849 }
void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
Definition: vgic-v2.c:205
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
Definition: vgic-v3.c:194
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_supports_direct_msis()

bool vgic_supports_direct_msis ( struct kvm *  kvm)

Definition at line 51 of file vgic-mmio-v3.c.

52 {
55 }
bool has_gicv4_1
Definition: arm_vgic.h:75
bool has_gicv4
Definition: arm_vgic.h:74
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_unregister_redist_iodev()

void vgic_unregister_redist_iodev ( struct kvm_vcpu *  vcpu)

Definition at line 805 of file vgic-mmio-v3.c.

806 {
807  struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
808 
809  kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
810 }
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, struct kvm_io_device *dev)
Definition: kvm_main.c:5941
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_clear_lr()

void vgic_v2_clear_lr ( struct kvm_vcpu *  vcpu,
int  lr 
)

Definition at line 200 of file vgic-v2.c.

201 {
202  vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
203 }
Here is the caller graph for this function:

◆ vgic_v2_cpuif_uaccess()

int vgic_v2_cpuif_uaccess ( struct kvm_vcpu *  vcpu,
bool  is_write,
int  offset,
u32 *  val 
)

Definition at line 539 of file vgic-mmio-v2.c.

541 {
542  struct vgic_io_device dev = {
543  .regions = vgic_v2_cpu_registers,
544  .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
545  .iodev_type = IODEV_CPUIF,
546  };
547 
548  return vgic_uaccess(vcpu, &dev, is_write, offset, val);
549 }
@ IODEV_CPUIF
Definition: arm_vgic.h:169
static const struct vgic_register_region vgic_v2_cpu_registers[]
Definition: vgic-mmio-v2.c:466
int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, bool is_write, int offset, u32 *val)
Definition: vgic-mmio.c:1005
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_dist_uaccess()

int vgic_v2_dist_uaccess ( struct kvm_vcpu *  vcpu,
bool  is_write,
int  offset,
u32 *  val 
)

Definition at line 551 of file vgic-mmio-v2.c.

553 {
554  struct vgic_io_device dev = {
555  .regions = vgic_v2_dist_registers,
556  .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
557  .iodev_type = IODEV_DIST,
558  };
559 
560  return vgic_uaccess(vcpu, &dev, is_write, offset, val);
561 }
static const struct vgic_register_region vgic_v2_dist_registers[]
Definition: vgic-mmio-v2.c:413
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_enable()

void vgic_v2_enable ( struct kvm_vcpu *  vcpu)

Definition at line 260 of file vgic-v2.c.

261 {
262  /*
263  * By forcing VMCR to zero, the GIC will restore the binary
264  * points to their reset values. Anything else resets to zero
265  * anyway.
266  */
267  vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
268 
269  /* Get the show on the road... */
270  vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
271 }
Here is the caller graph for this function:

◆ vgic_v2_fold_lr_state()

void vgic_v2_fold_lr_state ( struct kvm_vcpu *  vcpu)

Definition at line 49 of file vgic-v2.c.

50 {
51  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
52  struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
53  int lr;
54 
55  DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
56 
57  cpuif->vgic_hcr &= ~GICH_HCR_UIE;
58 
59  for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
60  u32 val = cpuif->vgic_lr[lr];
61  u32 cpuid, intid = val & GICH_LR_VIRTUALID;
62  struct vgic_irq *irq;
63  bool deactivated;
64 
65  /* Extract the source vCPU id from the LR */
66  cpuid = val & GICH_LR_PHYSID_CPUID;
67  cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
68  cpuid &= 7;
69 
70  /* Notify fds when the guest EOI'ed a level-triggered SPI */
71  if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
72  kvm_notify_acked_irq(vcpu->kvm, 0,
74 
75  irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
76 
77  raw_spin_lock(&irq->irq_lock);
78 
79  /* Always preserve the active bit, note deactivation */
80  deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
81  irq->active = !!(val & GICH_LR_ACTIVE_BIT);
82 
83  if (irq->active && vgic_irq_is_sgi(intid))
84  irq->active_source = cpuid;
85 
86  /* Edge is the only case where we preserve the pending bit */
87  if (irq->config == VGIC_CONFIG_EDGE &&
88  (val & GICH_LR_PENDING_BIT)) {
89  irq->pending_latch = true;
90 
92  irq->source |= (1 << cpuid);
93  }
94 
95  /*
96  * Clear soft pending state when level irqs have been acked.
97  */
98  if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
99  irq->pending_latch = false;
100 
101  /* Handle resampling for mapped interrupts if required */
102  vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
103 
104  raw_spin_unlock(&irq->irq_lock);
105  vgic_put_irq(vcpu->kvm, irq);
106  }
107 
108  cpuif->used_lrs = 0;
109 }
#define vgic_valid_spi(k, i)
Definition: arm_vgic.h:395
struct vgic_v2_cpu_if vgic_v2
Definition: arm_vgic.h:328
u8 active_source
Definition: arm_vgic.h:149
unsigned int used_lrs
Definition: arm_vgic.h:303
u32 vgic_lr[VGIC_V2_MAX_LRS]
Definition: arm_vgic.h:301
static bool lr_signals_eoi_mi(u32 lr_val)
Definition: vgic-v2.c:36
void vgic_irq_handle_resampling(struct vgic_irq *irq, bool lr_deactivated, bool lr_pending)
Definition: vgic.c:1060
#define DEBUG_SPINLOCK_BUG_ON(p)
Definition: vgic.h:99
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_get_vmcr()

void vgic_v2_get_vmcr ( struct kvm_vcpu *  vcpu,
struct vgic_vmcr vmcr 
)

Definition at line 232 of file vgic-v2.c.

233 {
234  struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
235  u32 vmcr;
236 
237  vmcr = cpu_if->vgic_vmcr;
238 
239  vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
240  GICH_VMCR_ENABLE_GRP0_SHIFT;
241  vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
242  GICH_VMCR_ENABLE_GRP1_SHIFT;
243  vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
244  GICH_VMCR_ACK_CTL_SHIFT;
245  vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
246  GICH_VMCR_FIQ_EN_SHIFT;
247  vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
248  GICH_VMCR_CBPR_SHIFT;
249  vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
250  GICH_VMCR_EOI_MODE_SHIFT;
251 
252  vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
253  GICH_VMCR_ALIAS_BINPOINT_SHIFT;
254  vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
255  GICH_VMCR_BINPOINT_SHIFT;
256  vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
257  GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
258 }
Here is the caller graph for this function:

◆ vgic_v2_has_attr_regs()

int vgic_v2_has_attr_regs ( struct kvm_device *  dev,
struct kvm_device_attr *  attr 
)

Definition at line 497 of file vgic-mmio-v2.c.

498 {
499  const struct vgic_register_region *region;
500  struct vgic_io_device iodev;
501  struct vgic_reg_attr reg_attr;
502  struct kvm_vcpu *vcpu;
503  gpa_t addr;
504  int ret;
505 
506  ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
507  if (ret)
508  return ret;
509 
510  vcpu = reg_attr.vcpu;
511  addr = reg_attr.addr;
512 
513  switch (attr->group) {
514  case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
515  iodev.regions = vgic_v2_dist_registers;
516  iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
517  iodev.base_addr = 0;
518  break;
519  case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
520  iodev.regions = vgic_v2_cpu_registers;
521  iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
522  iodev.base_addr = 0;
523  break;
524  default:
525  return -ENXIO;
526  }
527 
528  /* We only support aligned 32-bit accesses. */
529  if (addr & 3)
530  return -ENXIO;
531 
532  region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
533  if (!region)
534  return -ENXIO;
535 
536  return 0;
537 }
int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, struct vgic_reg_attr *reg_attr)
const struct vgic_register_region * vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, gpa_t addr, int len)
Definition: vgic-mmio.c:950
Here is the call graph for this function:

◆ vgic_v2_init_lrs()

void vgic_v2_init_lrs ( void  )

Definition at line 21 of file vgic-v2.c.

22 {
23  int i;
24 
25  for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
26  vgic_v2_write_lr(i, 0);
27 }
int nr_lr
Definition: arm_vgic.h:62
static void vgic_v2_write_lr(int lr, u32 val)
Definition: vgic-v2.c:14
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_load()

void vgic_v2_load ( struct kvm_vcpu *  vcpu)

Definition at line 457 of file vgic-v2.c.

458 {
459  struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
460 
461  writel_relaxed(cpu_if->vgic_vmcr,
462  kvm_vgic_global_state.vctrl_base + GICH_VMCR);
463  writel_relaxed(cpu_if->vgic_apr,
464  kvm_vgic_global_state.vctrl_base + GICH_APR);
465 }
void __iomem * vctrl_base
Definition: arm_vgic.h:57
Here is the caller graph for this function:

◆ vgic_v2_map_resources()

int vgic_v2_map_resources ( struct kvm *  kvm)

Definition at line 289 of file vgic-v2.c.

290 {
291  struct vgic_dist *dist = &kvm->arch.vgic;
292  int ret = 0;
293 
294  if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
296  kvm_debug("Need to set vgic cpu and dist addresses first\n");
297  return -ENXIO;
298  }
299 
300  if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
301  kvm_debug("VGIC CPU and dist frames overlap\n");
302  return -EINVAL;
303  }
304 
305  /*
306  * Initialize the vgic if this hasn't already been done on demand by
307  * accessing the vgic state from userspace.
308  */
309  ret = vgic_init(kvm);
310  if (ret) {
311  kvm_err("Unable to initialize VGIC dynamic data structures\n");
312  return ret;
313  }
314 
315  if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
316  ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
318  KVM_VGIC_V2_CPU_SIZE, true);
319  if (ret) {
320  kvm_err("Unable to remap VGIC CPU to VCPU\n");
321  return ret;
322  }
323  }
324 
325  return 0;
326 }
struct static_key_false vgic_v2_cpuif_trap
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t pa, unsigned long size, bool writable)
Definition: mmu.c:1066
gpa_t vgic_cpu_base
Definition: arm_vgic.h:250
phys_addr_t vcpu_base
Definition: arm_vgic.h:49
static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
Definition: vgic-v2.c:274
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_parse_attr()

int vgic_v2_parse_attr ( struct kvm_device *  dev,
struct kvm_device_attr *  attr,
struct vgic_reg_attr reg_attr 
)

Definition at line 338 of file vgic-kvm-device.c.

340 {
341  int cpuid;
342 
343  cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
344 
345  reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
346  reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
347 
348  return 0;
349 }
struct kvm_vcpu * vcpu
Definition: vgic.h:170
gpa_t addr
Definition: vgic.h:171
Here is the caller graph for this function:

◆ vgic_v2_populate_lr()

void vgic_v2_populate_lr ( struct kvm_vcpu *  vcpu,
struct vgic_irq irq,
int  lr 
)

Definition at line 122 of file vgic-v2.c.

123 {
124  u32 val = irq->intid;
125  bool allow_pending = true;
126 
127  if (irq->active) {
128  val |= GICH_LR_ACTIVE_BIT;
129  if (vgic_irq_is_sgi(irq->intid))
130  val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
131  if (vgic_irq_is_multi_sgi(irq)) {
132  allow_pending = false;
133  val |= GICH_LR_EOI;
134  }
135  }
136 
137  if (irq->group)
138  val |= GICH_LR_GROUP1;
139 
140  if (irq->hw && !vgic_irq_needs_resampling(irq)) {
141  val |= GICH_LR_HW;
142  val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
143  /*
144  * Never set pending+active on a HW interrupt, as the
145  * pending state is kept at the physical distributor
146  * level.
147  */
148  if (irq->active)
149  allow_pending = false;
150  } else {
151  if (irq->config == VGIC_CONFIG_LEVEL) {
152  val |= GICH_LR_EOI;
153 
154  /*
155  * Software resampling doesn't work very well
156  * if we allow P+A, so let's not do that.
157  */
158  if (irq->active)
159  allow_pending = false;
160  }
161  }
162 
163  if (allow_pending && irq_is_pending(irq)) {
164  val |= GICH_LR_PENDING_BIT;
165 
166  if (irq->config == VGIC_CONFIG_EDGE)
167  irq->pending_latch = false;
168 
169  if (vgic_irq_is_sgi(irq->intid)) {
170  u32 src = ffs(irq->source);
171 
172  if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
173  irq->intid))
174  return;
175 
176  val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
177  irq->source &= ~(1 << (src - 1));
178  if (irq->source) {
179  irq->pending_latch = true;
180  val |= GICH_LR_EOI;
181  }
182  }
183  }
184 
185  /*
186  * Level-triggered mapped IRQs are special because we only observe
187  * rising edges as input to the VGIC. We therefore lower the line
188  * level here, so that we can take new virtual IRQs. See
189  * vgic_v2_fold_lr_state for more info.
190  */
191  if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT))
192  irq->line_level = false;
193 
194  /* The GICv2 LR only holds five bits of priority. */
195  val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
196 
197  vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
198 }
u8 priority
Definition: arm_vgic.h:150
u32 hwintid
Definition: arm_vgic.h:142
static bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
Definition: vgic.h:130
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_probe()

int vgic_v2_probe ( const struct gic_kvm_info info)

vgic_v2_probe - probe for a VGICv2 compatible interrupt controller @info: pointer to the GIC description

Returns 0 if the VGICv2 has been probed successfully, returns an error code otherwise

Definition at line 337 of file vgic-v2.c.

338 {
339  int ret;
340  u32 vtr;
341 
342  if (is_protected_kvm_enabled()) {
343  kvm_err("GICv2 not supported in protected mode\n");
344  return -ENXIO;
345  }
346 
347  if (!info->vctrl.start) {
348  kvm_err("GICH not present in the firmware table\n");
349  return -ENXIO;
350  }
351 
352  if (!PAGE_ALIGNED(info->vcpu.start) ||
353  !PAGE_ALIGNED(resource_size(&info->vcpu))) {
354  kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
355 
356  ret = create_hyp_io_mappings(info->vcpu.start,
357  resource_size(&info->vcpu),
360  if (ret) {
361  kvm_err("Cannot map GICV into hyp\n");
362  goto out;
363  }
364 
365  static_branch_enable(&vgic_v2_cpuif_trap);
366  }
367 
368  ret = create_hyp_io_mappings(info->vctrl.start,
369  resource_size(&info->vctrl),
372  if (ret) {
373  kvm_err("Cannot map VCTRL into hyp\n");
374  goto out;
375  }
376 
377  vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
378  kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
379 
380  ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
381  if (ret) {
382  kvm_err("Cannot register GICv2 KVM device\n");
383  goto out;
384  }
385 
387  kvm_vgic_global_state.vcpu_base = info->vcpu.start;
390 
391  kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
392 
393  return 0;
394 out:
399 
400  return ret;
401 }
#define VGIC_V2_MAX_CPUS
Definition: arm_vgic.h:23
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, void __iomem **kaddr, void __iomem **haddr)
Definition: mmu.c:740
bool can_emulate_gicv2
Definition: arm_vgic.h:71
void __iomem * vcpu_base_va
Definition: arm_vgic.h:52
void __iomem * vcpu_hyp_va
Definition: arm_vgic.h:54
void __iomem * vctrl_hyp
Definition: arm_vgic.h:59
int max_gic_vcpus
Definition: arm_vgic.h:68
int kvm_register_vgic_device(unsigned long type)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_put()

void vgic_v2_put ( struct kvm_vcpu *  vcpu)

Definition at line 474 of file vgic-v2.c.

475 {
476  struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
477 
478  vgic_v2_vmcr_sync(vcpu);
479  cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
480 }
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
Definition: vgic-v2.c:467
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_restore_state()

void vgic_v2_restore_state ( struct kvm_vcpu *  vcpu)

Definition at line 438 of file vgic-v2.c.

439 {
440  struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
441  void __iomem *base = kvm_vgic_global_state.vctrl_base;
442  u64 used_lrs = cpu_if->used_lrs;
443  int i;
444 
445  if (!base)
446  return;
447 
448  if (used_lrs) {
449  writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
450  for (i = 0; i < used_lrs; i++) {
451  writel_relaxed(cpu_if->vgic_lr[i],
452  base + GICH_LR0 + (i * 4));
453  }
454  }
455 }
Here is the caller graph for this function:

◆ vgic_v2_save_state()

void vgic_v2_save_state ( struct kvm_vcpu *  vcpu)

Definition at line 424 of file vgic-v2.c.

425 {
426  void __iomem *base = kvm_vgic_global_state.vctrl_base;
427  u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
428 
429  if (!base)
430  return;
431 
432  if (used_lrs) {
433  save_lrs(vcpu, base);
434  writel_relaxed(0, base + GICH_HCR);
435  }
436 }
static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
Definition: vgic-v2.c:403
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v2_set_underflow()

void vgic_v2_set_underflow ( struct kvm_vcpu *  vcpu)

Definition at line 29 of file vgic-v2.c.

30 {
31  struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
32 
33  cpuif->vgic_hcr |= GICH_HCR_UIE;
34 }
Here is the caller graph for this function:

◆ vgic_v2_set_vmcr()

void vgic_v2_set_vmcr ( struct kvm_vcpu *  vcpu,
struct vgic_vmcr vmcr 
)

Definition at line 205 of file vgic-v2.c.

206 {
207  struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
208  u32 vmcr;
209 
210  vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
211  GICH_VMCR_ENABLE_GRP0_MASK;
212  vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
213  GICH_VMCR_ENABLE_GRP1_MASK;
214  vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
215  GICH_VMCR_ACK_CTL_MASK;
216  vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
217  GICH_VMCR_FIQ_EN_MASK;
218  vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
219  GICH_VMCR_CBPR_MASK;
220  vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
221  GICH_VMCR_EOI_MODE_MASK;
222  vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
223  GICH_VMCR_ALIAS_BINPOINT_MASK;
224  vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
225  GICH_VMCR_BINPOINT_MASK;
226  vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
227  GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
228 
229  cpu_if->vgic_vmcr = vmcr;
230 }
Here is the caller graph for this function:

◆ vgic_v2_vmcr_sync()

void vgic_v2_vmcr_sync ( struct kvm_vcpu *  vcpu)

Definition at line 467 of file vgic-v2.c.

468 {
469  struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
470 
471  cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
472 }
Here is the caller graph for this function:

◆ vgic_v3_check_base()

bool vgic_v3_check_base ( struct kvm *  kvm)

Definition at line 477 of file vgic-v3.c.

478 {
479  struct vgic_dist *d = &kvm->arch.vgic;
480  struct vgic_redist_region *rdreg;
481 
483  d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
484  return false;
485 
486  list_for_each_entry(rdreg, &d->rd_regions, list) {
487  size_t sz = vgic_v3_rd_region_size(kvm, rdreg);
488 
490  rdreg->base, SZ_64K, sz))
491  return false;
492  }
493 
495  return true;
496 
497  return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
498  KVM_VGIC_V3_DIST_SIZE);
499 }
struct list_head list
Definition: arm_vgic.h:221
int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, phys_addr_t addr, phys_addr_t alignment, phys_addr_t size)
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
Definition: vgic-v3.c:460
static size_t vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
Definition: vgic.h:303
#define VGIC_ADDR_UNDEF
Definition: vgic.h:14
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_clear_lr()

void vgic_v3_clear_lr ( struct kvm_vcpu *  vcpu,
int  lr 
)

Definition at line 189 of file vgic-v3.c.

190 {
191  vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
192 }
Here is the caller graph for this function:

◆ vgic_v3_cpu_sysregs_uaccess()

int vgic_v3_cpu_sysregs_uaccess ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr,
bool  is_write 
)

Definition at line 351 of file vgic-sys-reg-v3.c.

354 {
355  struct kvm_one_reg reg = {
356  .id = attr_to_id(attr->attr),
357  .addr = attr->addr,
358  };
359 
360  if (is_write)
361  return kvm_sys_reg_set_user(vcpu, &reg, gic_v3_icc_reg_descs,
362  ARRAY_SIZE(gic_v3_icc_reg_descs));
363  else
364  return kvm_sys_reg_get_user(vcpu, &reg, gic_v3_icc_reg_descs,
365  ARRAY_SIZE(gic_v3_icc_reg_descs));
366 }
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, const struct sys_reg_desc table[], unsigned int num)
Definition: sys_regs.c:3680
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, const struct sys_reg_desc table[], unsigned int num)
Definition: sys_regs.c:3721
static u64 attr_to_id(u64 attr)
static const struct sys_reg_desc gic_v3_icc_reg_descs[]
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_dist_uaccess()

int vgic_v3_dist_uaccess ( struct kvm_vcpu *  vcpu,
bool  is_write,
int  offset,
u32 *  val 
)

Definition at line 1095 of file vgic-mmio-v3.c.

1097 {
1098  struct vgic_io_device dev = {
1099  .regions = vgic_v3_dist_registers,
1100  .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
1101  };
1102 
1103  return vgic_uaccess(vcpu, &dev, is_write, offset, val);
1104 }
static const struct vgic_register_region vgic_v3_dist_registers[]
Definition: vgic-mmio-v3.c:594
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_enable()

void vgic_v3_enable ( struct kvm_vcpu *  vcpu)

Definition at line 260 of file vgic-v3.c.

261 {
262  struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
263 
264  /*
265  * By forcing VMCR to zero, the GIC will restore the binary
266  * points to their reset values. Anything else resets to zero
267  * anyway.
268  */
269  vgic_v3->vgic_vmcr = 0;
270 
271  /*
272  * If we are emulating a GICv3, we do it in an non-GICv2-compatible
273  * way, so we force SRE to 1 to demonstrate this to the guest.
274  * Also, we don't support any form of IRQ/FIQ bypass.
275  * This goes with the spec allowing the value to be RAO/WI.
276  */
277  if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
278  vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
279  ICC_SRE_EL1_DFB |
280  ICC_SRE_EL1_SRE);
281  vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
282  } else {
283  vgic_v3->vgic_sre = 0;
284  }
285 
286  vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
287  ICH_VTR_ID_BITS_MASK) >>
288  ICH_VTR_ID_BITS_SHIFT;
289  vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
290  ICH_VTR_PRI_BITS_MASK) >>
291  ICH_VTR_PRI_BITS_SHIFT) + 1;
292 
293  /* Get the show on the road... */
294  vgic_v3->vgic_hcr = ICH_HCR_EN;
295  if (group0_trap)
296  vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
297  if (group1_trap)
298  vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
299  if (common_trap)
300  vgic_v3->vgic_hcr |= ICH_HCR_TC;
301  if (dir_trap)
302  vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
303 }
u32 ich_vtr_el2
Definition: arm_vgic.h:83
static bool group1_trap
Definition: vgic-v3.c:17
static bool common_trap
Definition: vgic-v3.c:18
#define INITIAL_PENDBASER_VALUE
Definition: vgic-v3.c:255
static bool dir_trap
Definition: vgic-v3.c:19
static bool group0_trap
Definition: vgic-v3.c:16
Here is the caller graph for this function:

◆ vgic_v3_fold_lr_state()

void vgic_v3_fold_lr_state ( struct kvm_vcpu *  vcpu)

Definition at line 35 of file vgic-v3.c.

36 {
37  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
38  struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
39  u32 model = vcpu->kvm->arch.vgic.vgic_model;
40  int lr;
41 
42  DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
43 
44  cpuif->vgic_hcr &= ~ICH_HCR_UIE;
45 
46  for (lr = 0; lr < cpuif->used_lrs; lr++) {
47  u64 val = cpuif->vgic_lr[lr];
48  u32 intid, cpuid;
49  struct vgic_irq *irq;
50  bool is_v2_sgi = false;
51  bool deactivated;
52 
53  cpuid = val & GICH_LR_PHYSID_CPUID;
54  cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
55 
56  if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
57  intid = val & ICH_LR_VIRTUAL_ID_MASK;
58  } else {
59  intid = val & GICH_LR_VIRTUALID;
60  is_v2_sgi = vgic_irq_is_sgi(intid);
61  }
62 
63  /* Notify fds when the guest EOI'ed a level-triggered IRQ */
64  if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
65  kvm_notify_acked_irq(vcpu->kvm, 0,
67 
68  irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
69  if (!irq) /* An LPI could have been unmapped. */
70  continue;
71 
72  raw_spin_lock(&irq->irq_lock);
73 
74  /* Always preserve the active bit, note deactivation */
75  deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
76  irq->active = !!(val & ICH_LR_ACTIVE_BIT);
77 
78  if (irq->active && is_v2_sgi)
79  irq->active_source = cpuid;
80 
81  /* Edge is the only case where we preserve the pending bit */
82  if (irq->config == VGIC_CONFIG_EDGE &&
83  (val & ICH_LR_PENDING_BIT)) {
84  irq->pending_latch = true;
85 
86  if (is_v2_sgi)
87  irq->source |= (1 << cpuid);
88  }
89 
90  /*
91  * Clear soft pending state when level irqs have been acked.
92  */
93  if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
94  irq->pending_latch = false;
95 
96  /* Handle resampling for mapped interrupts if required */
97  vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
98 
99  raw_spin_unlock(&irq->irq_lock);
100  vgic_put_irq(vcpu->kvm, irq);
101  }
102 
103  cpuif->used_lrs = 0;
104 }
struct vgic_v3_cpu_if vgic_v3
Definition: arm_vgic.h:329
unsigned int used_lrs
Definition: arm_vgic.h:322
u64 vgic_lr[VGIC_V3_MAX_LRS]
Definition: arm_vgic.h:312
static bool lr_signals_eoi_mi(u64 lr_val)
Definition: vgic-v3.c:29
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_free_redist_region()

void vgic_v3_free_redist_region ( struct vgic_redist_region rdreg)

Definition at line 922 of file vgic-mmio-v3.c.

923 {
924  list_del(&rdreg->list);
925  kfree(rdreg);
926 }
Here is the caller graph for this function:

◆ vgic_v3_get_vmcr()

void vgic_v3_get_vmcr ( struct kvm_vcpu *  vcpu,
struct vgic_vmcr vmcr 
)

Definition at line 224 of file vgic-v3.c.

225 {
226  struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
227  u32 model = vcpu->kvm->arch.vgic.vgic_model;
228  u32 vmcr;
229 
230  vmcr = cpu_if->vgic_vmcr;
231 
232  if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
233  vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
234  ICH_VMCR_ACK_CTL_SHIFT;
235  vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
236  ICH_VMCR_FIQ_EN_SHIFT;
237  } else {
238  /*
239  * When emulating GICv3 on GICv3 with SRE=1 on the
240  * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
241  */
242  vmcrp->fiqen = 1;
243  vmcrp->ackctl = 0;
244  }
245 
246  vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
247  vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
248  vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
249  vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
250  vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
251  vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
252  vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
253 }
Here is the caller graph for this function:

◆ vgic_v3_has_attr_regs()

int vgic_v3_has_attr_regs ( struct kvm_device *  dev,
struct kvm_device_attr *  attr 
)

Definition at line 956 of file vgic-mmio-v3.c.

957 {
958  const struct vgic_register_region *region;
959  struct vgic_io_device iodev;
960  struct vgic_reg_attr reg_attr;
961  struct kvm_vcpu *vcpu;
962  gpa_t addr;
963  int ret;
964 
965  ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
966  if (ret)
967  return ret;
968 
969  vcpu = reg_attr.vcpu;
970  addr = reg_attr.addr;
971 
972  switch (attr->group) {
973  case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
974  iodev.regions = vgic_v3_dist_registers;
975  iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
976  iodev.base_addr = 0;
977  break;
978  case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
979  iodev.regions = vgic_v3_rd_registers;
980  iodev.nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
981  iodev.base_addr = 0;
982  break;
983  }
984  case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
985  return vgic_v3_has_cpu_sysregs_attr(vcpu, attr);
986  default:
987  return -ENXIO;
988  }
989 
990  /* We only support aligned 32-bit accesses. */
991  if (addr & 3)
992  return -ENXIO;
993 
994  region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
995  if (!region)
996  return -ENXIO;
997 
998  return 0;
999 }
int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, struct vgic_reg_attr *reg_attr)
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
Here is the call graph for this function:

◆ vgic_v3_has_cpu_sysregs_attr()

int vgic_v3_has_cpu_sysregs_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)

Definition at line 342 of file vgic-sys-reg-v3.c.

343 {
345  ARRAY_SIZE(gic_v3_icc_reg_descs)))
346  return 0;
347 
348  return -ENXIO;
349 }
const struct sys_reg_desc * get_reg_by_id(u64 id, const struct sys_reg_desc table[], unsigned int num)
Definition: sys_regs.c:3528
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_line_level_info_uaccess()

int vgic_v3_line_level_info_uaccess ( struct kvm_vcpu *  vcpu,
bool  is_write,
u32  intid,
u32 *  val 
)

Definition at line 1117 of file vgic-mmio-v3.c.

1119 {
1120  if (intid % 32)
1121  return -EINVAL;
1122 
1123  if (is_write)
1124  vgic_write_irq_line_level_info(vcpu, intid, *val);
1125  else
1126  *val = vgic_read_irq_line_level_info(vcpu, intid);
1127 
1128  return 0;
1129 }
void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, const u32 val)
Definition: vgic-mmio.c:788
u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
Definition: vgic-mmio.c:766
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_load()

void vgic_v3_load ( struct kvm_vcpu *  vcpu)

Definition at line 720 of file vgic-v3.c.

721 {
722  struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
723 
724  /*
725  * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
726  * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
727  * VMCR_EL2 save/restore in the world switch.
728  */
729  if (likely(cpu_if->vgic_sre))
730  kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
731 
732  kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
733 
734  if (has_vhe())
735  __vgic_v3_activate_traps(cpu_if);
736 
737  WARN_ON(vgic_v4_load(vcpu));
738 }
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
Definition: vgic-v3-sr.c:366
void __vgic_v3_write_vmcr(u32 vmcr)
Definition: vgic-v3-sr.c:463
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
Definition: vgic-v3-sr.c:260
int vgic_v4_load(struct kvm_vcpu *vcpu)
Definition: vgic-v4.c:349
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_lpi_sync_pending_status()

int vgic_v3_lpi_sync_pending_status ( struct kvm *  kvm,
struct vgic_irq irq 
)

Definition at line 305 of file vgic-v3.c.

306 {
307  struct kvm_vcpu *vcpu;
308  int byte_offset, bit_nr;
309  gpa_t pendbase, ptr;
310  bool status;
311  u8 val;
312  int ret;
313  unsigned long flags;
314 
315 retry:
316  vcpu = irq->target_vcpu;
317  if (!vcpu)
318  return 0;
319 
320  pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
321 
322  byte_offset = irq->intid / BITS_PER_BYTE;
323  bit_nr = irq->intid % BITS_PER_BYTE;
324  ptr = pendbase + byte_offset;
325 
326  ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
327  if (ret)
328  return ret;
329 
330  status = val & (1 << bit_nr);
331 
332  raw_spin_lock_irqsave(&irq->irq_lock, flags);
333  if (irq->target_vcpu != vcpu) {
334  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
335  goto retry;
336  }
337  irq->pending_latch = status;
338  vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
339 
340  if (status) {
341  /* clear consumed data */
342  val &= ~(1 << bit_nr);
343  ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
344  if (ret)
345  return ret;
346  }
347  return 0;
348 }
static int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len)
Definition: vgic.h:135
Here is the call graph for this function:

◆ vgic_v3_map_resources()

int vgic_v3_map_resources ( struct kvm *  kvm)

Definition at line 538 of file vgic-v3.c.

539 {
540  struct vgic_dist *dist = &kvm->arch.vgic;
541  struct kvm_vcpu *vcpu;
542  unsigned long c;
543 
544  kvm_for_each_vcpu(c, vcpu, kvm) {
545  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
546 
548  kvm_debug("vcpu %ld redistributor base not set\n", c);
549  return -ENXIO;
550  }
551  }
552 
553  if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
554  kvm_debug("Need to set vgic distributor addresses first\n");
555  return -ENXIO;
556  }
557 
558  if (!vgic_v3_check_base(kvm)) {
559  kvm_debug("VGIC redist and dist frames overlap\n");
560  return -EINVAL;
561  }
562 
563  /*
564  * For a VGICv3 we require the userland to explicitly initialize
565  * the VGIC before we need to use it.
566  */
567  if (!vgic_initialized(kvm)) {
568  return -EBUSY;
569  }
570 
573 
574  return 0;
575 }
void vgic_v4_configure_vsgis(struct kvm *kvm)
Definition: vgic-v4.c:187
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_max_apr_idx()

static int vgic_v3_max_apr_idx ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 275 of file vgic.h.

276 {
277  struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
278 
279  /*
280  * num_pri_bits are initialized with HW supported values.
281  * We can rely safely on num_pri_bits even if VM has not
282  * restored ICC_CTLR_EL1 before restoring APnR registers.
283  */
284  switch (cpu_if->num_pri_bits) {
285  case 7: return 3;
286  case 6: return 1;
287  default: return 0;
288  }
289 }
u32 num_pri_bits
Definition: arm_vgic.h:359
Here is the caller graph for this function:

◆ vgic_v3_parse_attr()

int vgic_v3_parse_attr ( struct kvm_device *  dev,
struct kvm_device_attr *  attr,
struct vgic_reg_attr reg_attr 
)

Definition at line 474 of file vgic-kvm-device.c.

476 {
477  unsigned long vgic_mpidr, mpidr_reg;
478 
479  /*
480  * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
481  * attr might not hold MPIDR. Hence assume vcpu0.
482  */
483  if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
484  vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
485  KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
486 
487  mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
488  reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
489  } else {
490  reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
491  }
492 
493  if (!reg_attr->vcpu)
494  return -EINVAL;
495 
496  reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
497 
498  return 0;
499 }
struct kvm_vcpu * kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
Definition: arm.c:2460
#define VGIC_TO_MPIDR(val)
Definition: vgic.h:40
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_populate_lr()

void vgic_v3_populate_lr ( struct kvm_vcpu *  vcpu,
struct vgic_irq irq,
int  lr 
)

Definition at line 107 of file vgic-v3.c.

108 {
109  u32 model = vcpu->kvm->arch.vgic.vgic_model;
110  u64 val = irq->intid;
111  bool allow_pending = true, is_v2_sgi;
112 
113  is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
114  model == KVM_DEV_TYPE_ARM_VGIC_V2);
115 
116  if (irq->active) {
117  val |= ICH_LR_ACTIVE_BIT;
118  if (is_v2_sgi)
119  val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
120  if (vgic_irq_is_multi_sgi(irq)) {
121  allow_pending = false;
122  val |= ICH_LR_EOI;
123  }
124  }
125 
126  if (irq->hw && !vgic_irq_needs_resampling(irq)) {
127  val |= ICH_LR_HW;
128  val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
129  /*
130  * Never set pending+active on a HW interrupt, as the
131  * pending state is kept at the physical distributor
132  * level.
133  */
134  if (irq->active)
135  allow_pending = false;
136  } else {
137  if (irq->config == VGIC_CONFIG_LEVEL) {
138  val |= ICH_LR_EOI;
139 
140  /*
141  * Software resampling doesn't work very well
142  * if we allow P+A, so let's not do that.
143  */
144  if (irq->active)
145  allow_pending = false;
146  }
147  }
148 
149  if (allow_pending && irq_is_pending(irq)) {
150  val |= ICH_LR_PENDING_BIT;
151 
152  if (irq->config == VGIC_CONFIG_EDGE)
153  irq->pending_latch = false;
154 
155  if (vgic_irq_is_sgi(irq->intid) &&
156  model == KVM_DEV_TYPE_ARM_VGIC_V2) {
157  u32 src = ffs(irq->source);
158 
159  if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
160  irq->intid))
161  return;
162 
163  val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
164  irq->source &= ~(1 << (src - 1));
165  if (irq->source) {
166  irq->pending_latch = true;
167  val |= ICH_LR_EOI;
168  }
169  }
170  }
171 
172  /*
173  * Level-triggered mapped IRQs are special because we only observe
174  * rising edges as input to the VGIC. We therefore lower the line
175  * level here, so that we can take new virtual IRQs. See
176  * vgic_v3_fold_lr_state for more info.
177  */
178  if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
179  irq->line_level = false;
180 
181  if (irq->group)
182  val |= ICH_LR_GROUP;
183 
184  val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
185 
186  vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
187 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_probe()

int vgic_v3_probe ( const struct gic_kvm_info info)

vgic_v3_probe - probe for a VGICv3 compatible interrupt controller @info: pointer to the GIC description

Returns 0 if the VGICv3 has been probed successfully, returns an error code otherwise

Definition at line 632 of file vgic-v3.c.

633 {
634  u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
635  bool has_v2;
636  int ret;
637 
638  has_v2 = ich_vtr_el2 >> 63;
639  ich_vtr_el2 = (u32)ich_vtr_el2;
640 
641  /*
642  * The ListRegs field is 5 bits, but there is an architectural
643  * maximum of 16 list registers. Just ignore bit 4...
644  */
645  kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
647  kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
648 
649  /* GICv4 support? */
650  if (info->has_v4) {
652  kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
653  kvm_info("GICv4%s support %sabled\n",
655  gicv4_enable ? "en" : "dis");
656  }
657 
659 
660  if (!info->vcpu.start) {
661  kvm_info("GICv3: no GICV resource entry\n");
662  } else if (!has_v2) {
663  pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
664  } else if (!PAGE_ALIGNED(info->vcpu.start)) {
665  pr_warn("GICV physical address 0x%llx not page aligned\n",
666  (unsigned long long)info->vcpu.start);
667  } else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
668  kvm_vgic_global_state.vcpu_base = info->vcpu.start;
670  ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
671  if (ret) {
672  kvm_err("Cannot register GICv2 KVM device.\n");
673  return ret;
674  }
675  kvm_info("vgic-v2@%llx\n", info->vcpu.start);
676  }
677  ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
678  if (ret) {
679  kvm_err("Cannot register GICv3 KVM device.\n");
680  kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
681  return ret;
682  }
683 
685  kvm_info("disabling GICv2 emulation\n");
686 
687  if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
688  group0_trap = true;
689  group1_trap = true;
690  }
691 
692  if (vgic_v3_broken_seis()) {
693  kvm_info("GICv3 with broken locally generated SEI\n");
694 
695  kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
696  group0_trap = true;
697  group1_trap = true;
698  if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
699  dir_trap = true;
700  else
701  common_trap = true;
702  }
703 
705  kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
706  group0_trap ? "G0" : "",
707  group1_trap ? "G1" : "",
708  common_trap ? "C" : "",
709  dir_trap ? "D" : "");
710  static_branch_enable(&vgic_v3_cpuif_trap);
711  }
712 
716 
717  return 0;
718 }
enum kvm_mode kvm_get_mode(void)
Definition: arm.c:2657
#define VGIC_V3_MAX_CPUS
Definition: arm_vgic.h:22
struct static_key_false vgic_v3_cpuif_trap
void kvm_unregister_device_ops(u32 type)
Definition: kvm_main.c:4754
u64 __vgic_v3_get_gic_config(void)
Definition: vgic-v3-sr.c:414
static bool gicv4_enable
Definition: vgic-v3.c:20
static bool vgic_v3_broken_seis(void)
Definition: vgic-v3.c:619
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_put()

void vgic_v3_put ( struct kvm_vcpu *  vcpu)

Definition at line 748 of file vgic-v3.c.

749 {
750  struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
751 
752  WARN_ON(vgic_v4_put(vcpu));
753 
754  vgic_v3_vmcr_sync(vcpu);
755 
756  kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
757 
758  if (has_vhe())
760 }
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
Definition: vgic-v3-sr.c:333
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
Definition: vgic-v3-sr.c:307
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
Definition: vgic-v3.c:740
int vgic_v4_put(struct kvm_vcpu *vcpu)
Definition: vgic-v4.c:339
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_rd_region_size()

static size_t vgic_v3_rd_region_size ( struct kvm *  kvm,
struct vgic_redist_region rdreg 
)
inlinestatic

Definition at line 303 of file vgic.h.

304 {
305  if (!rdreg->count)
306  return atomic_read(&kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE;
307  else
308  return rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
309 }
Here is the caller graph for this function:

◆ vgic_v3_rdist_free_slot()

struct vgic_redist_region* vgic_v3_rdist_free_slot ( struct list_head *  rd_regions)

vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one which has free space to put a new rdist region.

@rd_regions: redistributor region list head

A redistributor regions maps n redistributors, n = region size / (2 x 64kB). Stride between redistributors is 0 and regions are filled in the index order.

Return: the redist region handle, if any, that has space to map a new rdist region.

Definition at line 513 of file vgic-v3.c.

514 {
515  struct vgic_redist_region *rdreg;
516 
517  list_for_each_entry(rdreg, rd_regions, list) {
518  if (!vgic_v3_redist_region_full(rdreg))
519  return rdreg;
520  }
521  return NULL;
522 }
static bool vgic_v3_redist_region_full(struct vgic_redist_region *region)
Definition: vgic.h:292
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_rdist_overlap()

bool vgic_v3_rdist_overlap ( struct kvm *  kvm,
gpa_t  base,
size_t  size 
)

vgic_v3_rdist_overlap - check if a region overlaps with any existing redistributor region

@kvm: kvm handle @base: base of the region @size: size of region

Return: true if there is an overlap

Definition at line 460 of file vgic-v3.c.

461 {
462  struct vgic_dist *d = &kvm->arch.vgic;
463  struct vgic_redist_region *rdreg;
464 
465  list_for_each_entry(rdreg, &d->rd_regions, list) {
466  if ((base + size > rdreg->base) &&
467  (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
468  return true;
469  }
470  return false;
471 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_rdist_region_from_index()

struct vgic_redist_region* vgic_v3_rdist_region_from_index ( struct kvm *  kvm,
u32  index 
)

Definition at line 524 of file vgic-v3.c.

526 {
527  struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
528  struct vgic_redist_region *rdreg;
529 
530  list_for_each_entry(rdreg, rd_regions, list) {
531  if (rdreg->index == index)
532  return rdreg;
533  }
534  return NULL;
535 }
Here is the caller graph for this function:

◆ vgic_v3_redist_region_full()

static bool vgic_v3_redist_region_full ( struct vgic_redist_region region)
inlinestatic

Definition at line 292 of file vgic.h.

293 {
294  if (!region->count)
295  return false;
296 
297  return (region->free_index >= region->count);
298 }
Here is the caller graph for this function:

◆ vgic_v3_redist_uaccess()

int vgic_v3_redist_uaccess ( struct kvm_vcpu *  vcpu,
bool  is_write,
int  offset,
u32 *  val 
)

Definition at line 1106 of file vgic-mmio-v3.c.

1108 {
1109  struct vgic_io_device rd_dev = {
1111  .nr_regions = ARRAY_SIZE(vgic_v3_rd_registers),
1112  };
1113 
1114  return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
1115 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_save_pending_tables()

int vgic_v3_save_pending_tables ( struct kvm *  kvm)

vgic_v3_save_pending_tables - Save the pending tables into guest RAM kvm lock and all vcpu lock must be held

Definition at line 377 of file vgic-v3.c.

378 {
379  struct vgic_dist *dist = &kvm->arch.vgic;
380  struct vgic_irq *irq;
381  gpa_t last_ptr = ~(gpa_t)0;
382  bool vlpi_avail = false;
383  int ret = 0;
384  u8 val;
385 
386  if (unlikely(!vgic_initialized(kvm)))
387  return -ENXIO;
388 
389  /*
390  * A preparation for getting any VLPI states.
391  * The above vgic initialized check also ensures that the allocation
392  * and enabling of the doorbells have already been done.
393  */
395  unmap_all_vpes(kvm);
396  vlpi_avail = true;
397  }
398 
399  list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
400  int byte_offset, bit_nr;
401  struct kvm_vcpu *vcpu;
402  gpa_t pendbase, ptr;
403  bool is_pending;
404  bool stored;
405 
406  vcpu = irq->target_vcpu;
407  if (!vcpu)
408  continue;
409 
410  pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
411 
412  byte_offset = irq->intid / BITS_PER_BYTE;
413  bit_nr = irq->intid % BITS_PER_BYTE;
414  ptr = pendbase + byte_offset;
415 
416  if (ptr != last_ptr) {
417  ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
418  if (ret)
419  goto out;
420  last_ptr = ptr;
421  }
422 
423  stored = val & (1U << bit_nr);
424 
425  is_pending = irq->pending_latch;
426 
427  if (irq->hw && vlpi_avail)
428  vgic_v4_get_vlpi_state(irq, &is_pending);
429 
430  if (stored == is_pending)
431  continue;
432 
433  if (is_pending)
434  val |= 1 << bit_nr;
435  else
436  val &= ~(1 << bit_nr);
437 
438  ret = vgic_write_guest_lock(kvm, ptr, &val, 1);
439  if (ret)
440  goto out;
441  }
442 
443 out:
444  if (vlpi_avail)
445  map_all_vpes(kvm);
446 
447  return ret;
448 }
static void map_all_vpes(struct kvm *kvm)
Definition: vgic-v3.c:363
static void unmap_all_vpes(struct kvm *kvm)
Definition: vgic-v3.c:354
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
Definition: vgic-v4.c:213
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_set_redist_base()

int vgic_v3_set_redist_base ( struct kvm *  kvm,
u32  index,
u64  addr,
u32  count 
)

Definition at line 928 of file vgic-mmio-v3.c.

929 {
930  int ret;
931 
932  mutex_lock(&kvm->arch.config_lock);
933  ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
934  mutex_unlock(&kvm->arch.config_lock);
935  if (ret)
936  return ret;
937 
938  /*
939  * Register iodevs for each existing VCPU. Adding more VCPUs
940  * afterwards will register the iodevs when needed.
941  */
943  if (ret) {
944  struct vgic_redist_region *rdreg;
945 
946  mutex_lock(&kvm->arch.config_lock);
949  mutex_unlock(&kvm->arch.config_lock);
950  return ret;
951  }
952 
953  return 0;
954 }
static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index, gpa_t base, uint32_t count)
Definition: vgic-mmio-v3.c:854
static int vgic_register_all_redist_iodevs(struct kvm *kvm)
Definition: vgic-mmio-v3.c:812
void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
Definition: vgic-mmio-v3.c:922
struct vgic_redist_region * vgic_v3_rdist_region_from_index(struct kvm *kvm, u32 index)
Definition: vgic-v3.c:524
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v3_set_underflow()

void vgic_v3_set_underflow ( struct kvm_vcpu *  vcpu)

Definition at line 22 of file vgic-v3.c.

23 {
24  struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
25 
26  cpuif->vgic_hcr |= ICH_HCR_UIE;
27 }
Here is the caller graph for this function:

◆ vgic_v3_set_vmcr()

void vgic_v3_set_vmcr ( struct kvm_vcpu *  vcpu,
struct vgic_vmcr vmcr 
)

Definition at line 194 of file vgic-v3.c.

195 {
196  struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
197  u32 model = vcpu->kvm->arch.vgic.vgic_model;
198  u32 vmcr;
199 
200  if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
201  vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
202  ICH_VMCR_ACK_CTL_MASK;
203  vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
204  ICH_VMCR_FIQ_EN_MASK;
205  } else {
206  /*
207  * When emulating GICv3 on GICv3 with SRE=1 on the
208  * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
209  */
210  vmcr = ICH_VMCR_FIQ_EN_MASK;
211  }
212 
213  vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
214  vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
215  vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
216  vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
217  vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
218  vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
219  vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
220 
221  cpu_if->vgic_vmcr = vmcr;
222 }
Here is the caller graph for this function:

◆ vgic_v3_vmcr_sync()

void vgic_v3_vmcr_sync ( struct kvm_vcpu *  vcpu)

Definition at line 740 of file vgic-v3.c.

741 {
742  struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
743 
744  if (likely(cpu_if->vgic_sre))
745  cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
746 }
u64 __vgic_v3_read_vmcr(void)
Definition: vgic-v3-sr.c:458
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v4_configure_vsgis()

void vgic_v4_configure_vsgis ( struct kvm *  kvm)

Definition at line 187 of file vgic-v4.c.

188 {
189  struct vgic_dist *dist = &kvm->arch.vgic;
190  struct kvm_vcpu *vcpu;
191  unsigned long i;
192 
193  lockdep_assert_held(&kvm->arch.config_lock);
194 
195  kvm_arm_halt_guest(kvm);
196 
197  kvm_for_each_vcpu(i, vcpu, kvm) {
198  if (dist->nassgireq)
199  vgic_v4_enable_vsgis(vcpu);
200  else
201  vgic_v4_disable_vsgis(vcpu);
202  }
203 
205 }
void kvm_arm_resume_guest(struct kvm *kvm)
Definition: arm.c:729
void kvm_arm_halt_guest(struct kvm *kvm)
Definition: arm.c:719
bool nassgireq
Definition: arm_vgic.h:259
static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
Definition: vgic-v4.c:158
static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
Definition: vgic-v4.c:115
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v4_get_vlpi_state()

void vgic_v4_get_vlpi_state ( struct vgic_irq irq,
bool *  val 
)

Definition at line 213 of file vgic-v4.c.

214 {
215  struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
216  int mask = BIT(irq->intid % BITS_PER_BYTE);
217  void *va;
218  u8 *ptr;
219 
220  va = page_address(vpe->vpt_page);
221  ptr = va + irq->intid / BITS_PER_BYTE;
222 
223  *val = !!(*ptr & mask);
224 }
Here is the caller graph for this function:

◆ vgic_v4_init()

int vgic_v4_init ( struct kvm *  kvm)

vgic_v4_init - Initialize the GICv4 data structures @kvm: Pointer to the VM being initialized

We may be called each time a vITS is created, or when the vgic is initialized. In both cases, the number of vcpus should now be fixed.

Definition at line 239 of file vgic-v4.c.

240 {
241  struct vgic_dist *dist = &kvm->arch.vgic;
242  struct kvm_vcpu *vcpu;
243  int nr_vcpus, ret;
244  unsigned long i;
245 
246  lockdep_assert_held(&kvm->arch.config_lock);
247 
249  return 0; /* Nothing to see here... move along. */
250 
251  if (dist->its_vm.vpes)
252  return 0;
253 
254  nr_vcpus = atomic_read(&kvm->online_vcpus);
255 
256  dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
257  GFP_KERNEL_ACCOUNT);
258  if (!dist->its_vm.vpes)
259  return -ENOMEM;
260 
261  dist->its_vm.nr_vpes = nr_vcpus;
262 
263  kvm_for_each_vcpu(i, vcpu, kvm)
264  dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
265 
266  ret = its_alloc_vcpu_irqs(&dist->its_vm);
267  if (ret < 0) {
268  kvm_err("VPE IRQ allocation failure\n");
269  kfree(dist->its_vm.vpes);
270  dist->its_vm.nr_vpes = 0;
271  dist->its_vm.vpes = NULL;
272  return ret;
273  }
274 
275  kvm_for_each_vcpu(i, vcpu, kvm) {
276  int irq = dist->its_vm.vpes[i]->irq;
277  unsigned long irq_flags = DB_IRQ_FLAGS;
278 
279  /*
280  * Don't automatically enable the doorbell, as we're
281  * flipping it back and forth when the vcpu gets
282  * blocked. Also disable the lazy disabling, as the
283  * doorbell could kick us out of the guest too
284  * early...
285  *
286  * On GICv4.1, the doorbell is managed in HW and must
287  * be left enabled.
288  */
290  irq_flags &= ~IRQ_NOAUTOEN;
291  irq_set_status_flags(irq, irq_flags);
292 
293  ret = vgic_v4_request_vpe_irq(vcpu, irq);
294  if (ret) {
295  kvm_err("failed to allocate vcpu IRQ%d\n", irq);
296  /*
297  * Trick: adjust the number of vpes so we know
298  * how many to nuke on teardown...
299  */
300  dist->its_vm.nr_vpes = i;
301  break;
302  }
303  }
304 
305  if (ret)
306  vgic_v4_teardown(kvm);
307 
308  return ret;
309 }
struct its_vm its_vm
Definition: arm_vgic.h:294
#define DB_IRQ_FLAGS
Definition: vgic-v4.c:82
void vgic_v4_teardown(struct kvm *kvm)
Definition: vgic-v4.c:315
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
Definition: vgic-v4.c:226
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v4_request_vpe_irq()

int vgic_v4_request_vpe_irq ( struct kvm_vcpu *  vcpu,
int  irq 
)

Definition at line 226 of file vgic-v4.c.

227 {
228  return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
229 }
static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
Definition: vgic-v4.c:84
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_v4_teardown()

void vgic_v4_teardown ( struct kvm *  kvm)

vgic_v4_teardown - Free the GICv4 data structures @kvm: Pointer to the VM being destroyed

Definition at line 315 of file vgic-v4.c.

316 {
317  struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
318  int i;
319 
320  lockdep_assert_held(&kvm->arch.config_lock);
321 
322  if (!its_vm->vpes)
323  return;
324 
325  for (i = 0; i < its_vm->nr_vpes; i++) {
326  struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
327  int irq = its_vm->vpes[i]->irq;
328 
329  irq_clear_status_flags(irq, DB_IRQ_FLAGS);
330  free_irq(irq, vcpu);
331  }
332 
333  its_free_vcpu_irqs(its_vm);
334  kfree(its_vm->vpes);
335  its_vm->nr_vpes = 0;
336  its_vm->vpes = NULL;
337 }
Here is the caller graph for this function:

◆ vgic_write_guest_lock()

static int vgic_write_guest_lock ( struct kvm *  kvm,
gpa_t  gpa,
const void *  data,
unsigned long  len 
)
inlinestatic

Definition at line 135 of file vgic.h.

137 {
138  struct vgic_dist *dist = &kvm->arch.vgic;
139  int ret;
140 
141  dist->table_write_in_progress = true;
142  ret = kvm_write_guest_lock(kvm, gpa, data, len);
143  dist->table_write_in_progress = false;
144 
145  return ret;
146 }
bool table_write_in_progress
Definition: arm_vgic.h:266
Here is the caller graph for this function: