6 #include <linux/bitops.h>
7 #include <linux/bsearch.h>
8 #include <linux/interrupt.h>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
20 gpa_t addr,
unsigned int len)
26 gpa_t addr,
unsigned int len)
32 unsigned int len,
unsigned long val)
38 unsigned int len,
unsigned long val)
45 gpa_t addr,
unsigned int len)
52 for (i = 0; i < len * 8; i++) {
70 unsigned int len,
unsigned long val)
76 for (i = 0; i < len * 8; i++) {
79 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
80 irq->
group = !!(val & BIT(i));
83 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
97 gpa_t addr,
unsigned int len)
104 for (i = 0; i < len * 8; i++) {
117 gpa_t addr,
unsigned int len,
124 for_each_set_bit(i, &val, len * 8) {
127 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
130 struct irq_data *data;
133 data = &irq_to_desc(irq->
host_irq)->irq_data;
134 while (irqd_irq_disabled(data))
138 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
166 gpa_t addr,
unsigned int len,
173 for_each_set_bit(i, &val, len * 8) {
176 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
182 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
188 gpa_t addr,
unsigned int len,
195 for_each_set_bit(i, &val, len * 8) {
198 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
209 gpa_t addr,
unsigned int len,
216 for_each_set_bit(i, &val, len * 8) {
219 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
221 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
230 gpa_t addr,
unsigned int len,
238 for (i = 0; i < len * 8; i++) {
252 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
257 err = irq_get_irqchip_state(irq->
host_irq,
258 IRQCHIP_STATE_PENDING,
260 WARN_RATELIMIT(err,
"IRQ %d", irq->
host_irq);
264 switch (
vcpu->kvm->arch.vgic.vgic_model) {
265 case KVM_DEV_TYPE_ARM_VGIC_V3:
277 value |= ((u32)val << i);
278 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
287 gpa_t addr,
unsigned int len)
293 gpa_t addr,
unsigned int len)
301 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
305 unsigned long val,
bool is_user)
311 for_each_set_bit(i, &val, len * 8) {
320 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
333 err = irq_set_irqchip_state(irq->
host_irq,
334 IRQCHIP_STATE_PENDING,
336 WARN_RATELIMIT(err,
"IRQ %d", irq->
host_irq);
338 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
345 if (irq->
hw && !is_user)
354 gpa_t addr,
unsigned int len,
361 gpa_t addr,
unsigned int len,
390 gpa_t addr,
unsigned int len,
391 unsigned long val,
bool is_user)
397 for_each_set_bit(i, &val, len * 8) {
406 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
419 err = irq_set_irqchip_state(irq->
host_irq,
420 IRQCHIP_STATE_PENDING,
422 WARN_RATELIMIT(err,
"IRQ %d", irq->
host_irq);
424 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
430 if (irq->
hw && !is_user)
435 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
441 gpa_t addr,
unsigned int len,
448 gpa_t addr,
unsigned int len,
473 if ((
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
482 if ((
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
489 gpa_t addr,
unsigned int len)
496 for (i = 0; i < len * 8; i++) {
513 gpa_t addr,
unsigned int len)
518 mutex_lock(&
vcpu->kvm->arch.config_lock);
524 mutex_unlock(&
vcpu->kvm->arch.config_lock);
530 gpa_t addr,
unsigned int len)
537 bool active,
bool is_uaccess)
552 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
564 u32 model = vcpu->kvm->arch.vgic.vgic_model;
580 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
582 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
590 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
594 gpa_t addr,
unsigned int len,
600 for_each_set_bit(i, &val, len * 8) {
608 gpa_t addr,
unsigned int len,
613 mutex_lock(&
vcpu->kvm->arch.config_lock);
619 mutex_unlock(&
vcpu->kvm->arch.config_lock);
623 gpa_t addr,
unsigned int len,
631 gpa_t addr,
unsigned int len,
637 for_each_set_bit(i, &val, len * 8) {
645 gpa_t addr,
unsigned int len,
650 mutex_lock(&
vcpu->kvm->arch.config_lock);
656 mutex_unlock(&
vcpu->kvm->arch.config_lock);
660 gpa_t addr,
unsigned int len,
668 gpa_t addr,
unsigned int len)
674 for (i = 0; i < len; i++) {
677 val |= (u64)irq->
priority << (i * 8);
693 gpa_t addr,
unsigned int len,
700 for (i = 0; i < len; i++) {
703 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
708 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
715 gpa_t addr,
unsigned int len)
721 for (i = 0; i < len * 4; i++) {
725 value |= (2U << (i * 2));
734 gpa_t addr,
unsigned int len,
741 for (i = 0; i < len * 4; i++) {
754 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
756 if (test_bit(i * 2 + 1, &val))
761 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
772 for (i = 0; i < 32; i++) {
795 for (i = 0; i < 32; i++) {
809 new_level = !!(val & (1U << i));
810 raw_spin_lock_irqsave(&irq->
irq_lock, flags);
815 raw_spin_unlock_irqrestore(&irq->
irq_lock, flags);
823 const unsigned int offset = (
unsigned long)key;
837 int nr_regions,
unsigned int offset)
839 return bsearch((
void *)(uintptr_t)offset, regions, nr_regions,
876 return le16_to_cpu(data);
878 return le32_to_cpu(data);
880 return le64_to_cpu(data);
900 data = cpu_to_le16(data);
903 data = cpu_to_le32(data);
906 data = cpu_to_le64(data);
938 if ((region->
access_flags & flags) && IS_ALIGNED(addr, len)) {
964 gpa_t addr, u32 *val)
967 struct kvm_vcpu *r_vcpu;
979 *val = region->
read(r_vcpu, addr,
sizeof(u32));
985 gpa_t addr,
const u32 *val)
988 struct kvm_vcpu *r_vcpu;
996 return region->
uaccess_write(r_vcpu, addr,
sizeof(u32), *val);
998 region->
write(r_vcpu, addr,
sizeof(u32), *val);
1006 bool is_write,
int offset, u32 *val)
1015 gpa_t addr,
int len,
void *val)
1019 unsigned long data = 0;
1023 memset(val, 0,
len);
1029 data = region->
read(vcpu, addr,
len);
1032 data = region->
read(vcpu, addr,
len);
1047 gpa_t addr,
int len,
const void *val)
1059 region->
write(vcpu, addr,
len, data);
1062 region->
write(vcpu, addr,
len, data);
1097 io_device->
base_addr = dist_base_address;
1102 len, &io_device->
dev);
void kvm_arm_resume_guest(struct kvm *kvm)
void kvm_arm_halt_guest(struct kvm *kvm)
struct vgic_global kvm_vgic_global_state
#define VGIC_NR_PRIVATE_IRQS
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len, struct kvm_io_device *dev)
struct kvm_vcpu * kvm_get_running_vcpu(void)
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
int(* read)(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, void *val)
struct kvm_vcpu * redist_vcpu
const struct vgic_register_region * regions
enum iodev_type iodev_type
enum vgic_irq_config config
unsigned long(* read)(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
unsigned int access_flags
unsigned long(* uaccess_read)(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
void(* write)(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
unsigned long(* its_read)(struct kvm *kvm, struct vgic_its *its, gpa_t addr, unsigned int len)
int(* uaccess_write)(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
void(* its_write)(struct kvm *kvm, struct vgic_its *its, gpa_t addr, unsigned int len, unsigned long val)
unsigned int bits_per_irq
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool active, bool is_uaccess)
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, const u32 val)
static bool check_region(const struct kvm *kvm, const struct vgic_register_region *region, gpa_t addr, int len)
void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
const struct vgic_register_region * vgic_find_mmio_region(const struct vgic_register_region *regions, int nr_regions, unsigned int offset)
unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
static void vgic_update_vsgi(struct vgic_irq *irq)
int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, enum vgic_type type)
void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val)
unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, gpa_t addr, const u32 *val)
unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, unsigned long data)
void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val)
static void __set_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val, bool is_user)
void vgic_mmio_write_config(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static int match_region(const void *key, const void *elt)
unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, gpa_t addr, u32 *val)
int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
static void __clear_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val, bool is_user)
static unsigned long __read_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, bool is_user)
int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool active)
static struct vgic_io_device * kvm_to_vgic_iodev(const struct kvm_io_device *dev)
static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, bool is_write, int offset, u32 *val)
const struct kvm_io_device_ops kvm_io_gic_ops
void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val)
unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len)
const struct vgic_register_region * vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, gpa_t addr, int len)
#define VGIC_ACCESS_64bit
#define VGIC_ADDR_TO_INTID(addr, bits)
#define VGIC_ACCESS_32bit
void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, unsigned long flags)
bool vgic_get_phys_line_level(struct vgic_irq *irq)
void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
struct vgic_irq * vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid)
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
static bool irq_is_pending(struct vgic_irq *irq)
#define vgic_irq_is_sgi(intid)
static bool vgic_irq_is_mapped_level(struct vgic_irq *irq)