7 #include <linux/interrupt.h>
9 #include <linux/irqdomain.h>
10 #include <linux/kvm_host.h>
11 #include <linux/irqchip/arm-gic-v3.h>
82 #define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
86 struct kvm_vcpu *vcpu = info;
90 !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
91 disable_irq_nosync(irq);
98 raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
99 vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last =
true;
100 raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
102 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
111 vpe->sgi_config[irq->
intid].group = irq->
group;
117 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
127 struct irq_desc *desc;
131 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
137 irq->
host_irq = irq_find_mapping(vpe->sgi_domain, i);
142 ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
146 ret = irq_set_irqchip_state(irq->
host_irq,
147 IRQCHIP_STATE_PENDING,
153 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
164 struct irq_desc *desc;
168 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
174 ret = irq_get_irqchip_state(irq->
host_irq,
175 IRQCHIP_STATE_PENDING,
180 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
182 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
189 struct vgic_dist *dist = &kvm->arch.vgic;
190 struct kvm_vcpu *vcpu;
193 lockdep_assert_held(&kvm->arch.config_lock);
197 kvm_for_each_vcpu(i, vcpu, kvm) {
215 struct its_vpe *vpe = &irq->
target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
216 int mask = BIT(irq->
intid % BITS_PER_BYTE);
220 va = page_address(vpe->vpt_page);
221 ptr = va + irq->
intid / BITS_PER_BYTE;
223 *val = !!(*ptr & mask);
241 struct vgic_dist *dist = &kvm->arch.vgic;
242 struct kvm_vcpu *vcpu;
246 lockdep_assert_held(&kvm->arch.config_lock);
254 nr_vcpus = atomic_read(&kvm->online_vcpus);
256 dist->
its_vm.vpes = kcalloc(nr_vcpus,
sizeof(*dist->
its_vm.vpes),
261 dist->
its_vm.nr_vpes = nr_vcpus;
263 kvm_for_each_vcpu(i, vcpu, kvm)
264 dist->
its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
266 ret = its_alloc_vcpu_irqs(&dist->
its_vm);
268 kvm_err(
"VPE IRQ allocation failure\n");
275 kvm_for_each_vcpu(i, vcpu, kvm) {
276 int irq = dist->
its_vm.vpes[i]->irq;
290 irq_flags &= ~IRQ_NOAUTOEN;
291 irq_set_status_flags(irq, irq_flags);
295 kvm_err(
"failed to allocate vcpu IRQ%d\n", irq);
317 struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
320 lockdep_assert_held(&kvm->arch.config_lock);
325 for (i = 0; i < its_vm->nr_vpes; i++) {
326 struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
327 int irq = its_vm->vpes[i]->irq;
333 its_free_vcpu_irqs(its_vm);
341 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
346 return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI));
351 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
357 if (vcpu_get_flag(vcpu, IN_WFI))
366 err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
370 err = its_make_vpe_resident(vpe,
false, vcpu->kvm->arch.vgic.enabled);
380 err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING,
false);
387 struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
398 struct kvm_kernel_irq_routing_entry *irq_entry)
400 struct kvm_msi msi = (
struct kvm_msi) {
401 .address_lo = irq_entry->msi.address_lo,
402 .address_hi = irq_entry->msi.address_hi,
403 .data = irq_entry->msi.data,
404 .flags = irq_entry->msi.flags,
405 .devid = irq_entry->msi.devid,
412 struct kvm_kernel_irq_routing_entry *irq_entry)
416 struct its_vlpi_map map;
435 irq_entry->msi.data, &irq);
449 map = (
struct its_vlpi_map) {
450 .vm = &kvm->arch.vgic.its_vm,
451 .vpe = &irq->
target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
452 .vintid = irq->
intid,
453 .properties = ((irq->
priority & 0xfc) |
454 (irq->
enabled ? LPI_PROP_ENABLED : 0) |
459 ret = its_map_vlpi(virq, &map);
465 atomic_inc(&map.vpe->vlpi_count);
468 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
470 ret = irq_set_irqchip_state(irq->
host_irq,
471 IRQCHIP_STATE_PENDING,
473 WARN_RATELIMIT(ret,
"IRQ %d", irq->
host_irq);
482 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
491 struct kvm_kernel_irq_routing_entry *irq_entry)
511 irq_entry->msi.data, &irq);
517 atomic_dec(&irq->
target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
519 ret = its_unmap_vlpi(virq);
void kvm_arm_resume_guest(struct kvm *kvm)
void kvm_arm_halt_guest(struct kvm *kvm)
struct vgic_global kvm_vgic_global_state
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
struct kvm_vcpu * target_vcpu
int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, u32 devid, u32 eventid, struct vgic_irq **irq)
struct vgic_its * vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
bool vgic_supports_direct_msis(struct kvm *kvm)
void vgic_v4_commit(struct kvm_vcpu *vcpu)
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, struct kvm_kernel_irq_routing_entry *irq_entry)
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, struct kvm_kernel_irq_routing_entry *irq_entry)
static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
static struct vgic_its * vgic_get_its(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *irq_entry)
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
int vgic_v4_init(struct kvm *kvm)
void vgic_v4_configure_vsgis(struct kvm *kvm)
static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
int vgic_v4_load(struct kvm_vcpu *vcpu)
void vgic_v4_teardown(struct kvm *kvm)
int vgic_v4_put(struct kvm_vcpu *vcpu)
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, unsigned long flags)
struct vgic_irq * vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid)
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)