3 #include <linux/irqchip/arm-gic-v3.h>
5 #include <linux/irqdomain.h>
6 #include <linux/kstrtox.h>
8 #include <linux/kvm_host.h>
10 #include <asm/kvm_hyp.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/kvm_asm.h>
31 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
32 !(lr_val & ICH_LR_HW);
39 u32 model = vcpu->kvm->arch.vgic.vgic_model;
46 for (lr = 0; lr < cpuif->
used_lrs; lr++) {
50 bool is_v2_sgi =
false;
53 cpuid = val & GICH_LR_PHYSID_CPUID;
54 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
56 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
57 intid = val & ICH_LR_VIRTUAL_ID_MASK;
59 intid = val & GICH_LR_VIRTUALID;
65 kvm_notify_acked_irq(
vcpu->kvm, 0,
75 deactivated = irq->
active && !(val & ICH_LR_ACTIVE_BIT);
76 irq->
active = !!(val & ICH_LR_ACTIVE_BIT);
78 if (irq->
active && is_v2_sgi)
83 (val & ICH_LR_PENDING_BIT)) {
87 irq->
source |= (1 << cpuid);
109 u32 model =
vcpu->kvm->arch.vgic.vgic_model;
110 u64 val = irq->
intid;
111 bool allow_pending =
true, is_v2_sgi;
114 model == KVM_DEV_TYPE_ARM_VGIC_V2);
117 val |= ICH_LR_ACTIVE_BIT;
121 allow_pending =
false;
128 val |= ((u64)irq->
hwintid) << ICH_LR_PHYS_ID_SHIFT;
135 allow_pending =
false;
145 allow_pending =
false;
150 val |= ICH_LR_PENDING_BIT;
156 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
157 u32 src = ffs(irq->
source);
159 if (WARN_RATELIMIT(!src,
"No SGI source for INTID %d\n",
163 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
164 irq->
source &= ~(1 << (src - 1));
184 val |= (u64)irq->
priority << ICH_LR_PRIORITY_SHIFT;
186 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
191 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
197 u32 model = vcpu->kvm->arch.vgic.vgic_model;
200 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
201 vmcr = (vmcrp->
ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
202 ICH_VMCR_ACK_CTL_MASK;
203 vmcr |= (vmcrp->
fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
204 ICH_VMCR_FIQ_EN_MASK;
210 vmcr = ICH_VMCR_FIQ_EN_MASK;
213 vmcr |= (vmcrp->
cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
214 vmcr |= (vmcrp->
eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
215 vmcr |= (vmcrp->
abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
216 vmcr |= (vmcrp->
bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
217 vmcr |= (vmcrp->
pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
218 vmcr |= (vmcrp->
grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
219 vmcr |= (vmcrp->
grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
227 u32 model = vcpu->kvm->arch.vgic.vgic_model;
232 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
233 vmcrp->
ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
234 ICH_VMCR_ACK_CTL_SHIFT;
235 vmcrp->
fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
236 ICH_VMCR_FIQ_EN_SHIFT;
246 vmcrp->
cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
247 vmcrp->
eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
248 vmcrp->
abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
249 vmcrp->
bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
250 vmcrp->
pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
251 vmcrp->
grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
252 vmcrp->
grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
255 #define INITIAL_PENDBASER_VALUE \
256 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
257 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
258 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
277 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
278 vgic_v3->
vgic_sre = (ICC_SRE_EL1_DIB |
287 ICH_VTR_ID_BITS_MASK) >>
288 ICH_VTR_ID_BITS_SHIFT;
290 ICH_VTR_PRI_BITS_MASK) >>
291 ICH_VTR_PRI_BITS_SHIFT) + 1;
307 struct kvm_vcpu *vcpu;
308 int byte_offset, bit_nr;
320 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
322 byte_offset = irq->
intid / BITS_PER_BYTE;
323 bit_nr = irq->
intid % BITS_PER_BYTE;
324 ptr = pendbase + byte_offset;
326 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
330 status = val & (1 << bit_nr);
332 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
334 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
342 val &= ~(1 << bit_nr);
356 struct vgic_dist *dist = &kvm->arch.vgic;
359 for (i = 0; i < dist->
its_vm.nr_vpes; i++)
360 free_irq(dist->
its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
365 struct vgic_dist *dist = &kvm->arch.vgic;
368 for (i = 0; i < dist->
its_vm.nr_vpes; i++)
370 dist->
its_vm.vpes[i]->irq));
379 struct vgic_dist *dist = &kvm->arch.vgic;
381 gpa_t last_ptr = ~(gpa_t)0;
382 bool vlpi_avail =
false;
400 int byte_offset, bit_nr;
401 struct kvm_vcpu *vcpu;
410 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
412 byte_offset = irq->
intid / BITS_PER_BYTE;
413 bit_nr = irq->
intid % BITS_PER_BYTE;
414 ptr = pendbase + byte_offset;
416 if (ptr != last_ptr) {
417 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
423 stored = val & (1U << bit_nr);
427 if (irq->
hw && vlpi_avail)
430 if (stored == is_pending)
436 val &= ~(1 << bit_nr);
490 rdreg->
base, SZ_64K, sz))
498 KVM_VGIC_V3_DIST_SIZE);
517 list_for_each_entry(rdreg, rd_regions,
list) {
527 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
530 list_for_each_entry(rdreg, rd_regions,
list) {
540 struct vgic_dist *dist = &kvm->arch.vgic;
541 struct kvm_vcpu *vcpu;
544 kvm_for_each_vcpu(c, vcpu, kvm) {
548 kvm_debug(
"vcpu %ld redistributor base not set\n", c);
554 kvm_debug(
"Need to set vgic distributor addresses first\n");
559 kvm_debug(
"VGIC redist and dist frames overlap\n");
604 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
605 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
606 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
607 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
608 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
609 MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
610 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
611 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
612 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
613 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
614 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
615 MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
622 is_midr_in_range_list(read_cpuid_id(),
broken_seis));
638 has_v2 = ich_vtr_el2 >> 63;
639 ich_vtr_el2 = (u32)ich_vtr_el2;
653 kvm_info(
"GICv4%s support %sabled\n",
660 if (!info->vcpu.start) {
661 kvm_info(
"GICv3: no GICV resource entry\n");
662 }
else if (!has_v2) {
663 pr_warn(FW_BUG
"CPU interface incapable of MMIO access\n");
664 }
else if (!PAGE_ALIGNED(info->vcpu.start)) {
665 pr_warn(
"GICV physical address 0x%llx not page aligned\n",
666 (
unsigned long long)info->vcpu.start);
672 kvm_err(
"Cannot register GICv2 KVM device.\n");
675 kvm_info(
"vgic-v2@%llx\n", info->vcpu.start);
679 kvm_err(
"Cannot register GICv3 KVM device.\n");
685 kvm_info(
"disabling GICv2 emulation\n");
687 if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
693 kvm_info(
"GICv3 with broken locally generated SEI\n");
698 if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
705 kvm_info(
"GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
enum kvm_mode kvm_get_mode(void)
struct vgic_global kvm_vgic_global_state
struct static_key_false vgic_v3_cpuif_trap
#define vgic_initialized(k)
static bool vgic_irq_needs_resampling(struct vgic_irq *irq)
#define vgic_valid_spi(k, i)
#define VGIC_NR_PRIVATE_IRQS
static unsigned long base
void kvm_unregister_device_ops(u32 type)
struct vgic_io_device rd_iodev
struct vgic_v3_cpu_if vgic_v3
struct list_head rd_regions
struct list_head lpi_list_head
void __iomem * vctrl_base
struct list_head lpi_list
struct kvm_vcpu * target_vcpu
enum vgic_irq_config config
u64 vgic_lr[VGIC_V3_MAX_LRS]
static struct gic_kvm_info * gic_kvm_info
int kvm_register_vgic_device(unsigned long type)
int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, phys_addr_t addr, phys_addr_t alignment, phys_addr_t size)
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
u64 __vgic_v3_read_vmcr(void)
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
void __vgic_v3_write_vmcr(u32 vmcr)
u64 __vgic_v3_get_gic_config(void)
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
static int __init early_group0_trap_cfg(char *buf)
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
static int __init early_gicv4_enable(char *buf)
struct vgic_redist_region * vgic_v3_rdist_region_from_index(struct kvm *kvm, u32 index)
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
struct vgic_redist_region * vgic_v3_rdist_free_slot(struct list_head *rd_regions)
static int __init early_common_trap_cfg(char *buf)
early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg)
static void map_all_vpes(struct kvm *kvm)
void vgic_v3_put(struct kvm_vcpu *vcpu)
static bool vgic_v3_broken_seis(void)
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
int vgic_v3_map_resources(struct kvm *kvm)
bool vgic_v3_check_base(struct kvm *kvm)
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
static const struct midr_range broken_seis[]
void vgic_v3_enable(struct kvm_vcpu *vcpu)
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
int vgic_v3_probe(const struct gic_kvm_info *info)
int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
#define INITIAL_PENDBASER_VALUE
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap)
void vgic_v3_load(struct kvm_vcpu *vcpu)
static int __init early_group1_trap_cfg(char *buf)
static void unmap_all_vpes(struct kvm *kvm)
int vgic_v3_save_pending_tables(struct kvm *kvm)
static bool lr_signals_eoi_mi(u64 lr_val)
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
void vgic_v4_configure_vsgis(struct kvm *kvm)
int vgic_v4_load(struct kvm_vcpu *vcpu)
int vgic_v4_put(struct kvm_vcpu *vcpu)
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
void vgic_irq_handle_resampling(struct vgic_irq *irq, bool lr_deactivated, bool lr_pending)
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, unsigned long flags)
struct vgic_irq * vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid)
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
static size_t vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
static bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
#define DEBUG_SPINLOCK_BUG_ON(p)
static bool irq_is_pending(struct vgic_irq *irq)
#define vgic_irq_is_sgi(intid)
static bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
static bool vgic_v3_redist_region_full(struct vgic_redist_region *region)
static int vgic_write_guest_lock(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len)
#define IS_VGIC_ADDR_UNDEF(_x)