KVM
Macros | Functions | Variables
vgic.c File Reference
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/list_sort.h>
#include <linux/nospec.h>
#include <asm/kvm_hyp.h>
#include "vgic.h"
#include "trace.h"
Include dependency graph for vgic.c:

Go to the source code of this file.

Macros

#define CREATE_TRACE_POINTS
 

Functions

static struct vgic_irqvgic_get_lpi (struct kvm *kvm, u32 intid)
 
struct vgic_irqvgic_get_irq (struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid)
 
static void vgic_irq_release (struct kref *ref)
 
void __vgic_put_lpi_locked (struct kvm *kvm, struct vgic_irq *irq)
 
void vgic_put_irq (struct kvm *kvm, struct vgic_irq *irq)
 
void vgic_flush_pending_lpis (struct kvm_vcpu *vcpu)
 
void vgic_irq_set_phys_pending (struct vgic_irq *irq, bool pending)
 
bool vgic_get_phys_line_level (struct vgic_irq *irq)
 
void vgic_irq_set_phys_active (struct vgic_irq *irq, bool active)
 
static struct kvm_vcpu * vgic_target_oracle (struct vgic_irq *irq)
 
static int vgic_irq_cmp (void *priv, const struct list_head *a, const struct list_head *b)
 
static void vgic_sort_ap_list (struct kvm_vcpu *vcpu)
 
static bool vgic_validate_injection (struct vgic_irq *irq, bool level, void *owner)
 
bool vgic_queue_irq_unlock (struct kvm *kvm, struct vgic_irq *irq, unsigned long flags)
 
int kvm_vgic_inject_irq (struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned int intid, bool level, void *owner)
 
static int kvm_vgic_map_irq (struct kvm_vcpu *vcpu, struct vgic_irq *irq, unsigned int host_irq, struct irq_ops *ops)
 
static void kvm_vgic_unmap_irq (struct vgic_irq *irq)
 
int kvm_vgic_map_phys_irq (struct kvm_vcpu *vcpu, unsigned int host_irq, u32 vintid, struct irq_ops *ops)
 
void kvm_vgic_reset_mapped_irq (struct kvm_vcpu *vcpu, u32 vintid)
 
int kvm_vgic_unmap_phys_irq (struct kvm_vcpu *vcpu, unsigned int vintid)
 
int kvm_vgic_get_map (struct kvm_vcpu *vcpu, unsigned int vintid)
 
int kvm_vgic_set_owner (struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
 
static void vgic_prune_ap_list (struct kvm_vcpu *vcpu)
 
static void vgic_fold_lr_state (struct kvm_vcpu *vcpu)
 
static void vgic_populate_lr (struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
 
static void vgic_clear_lr (struct kvm_vcpu *vcpu, int lr)
 
static void vgic_set_underflow (struct kvm_vcpu *vcpu)
 
static int compute_ap_list_depth (struct kvm_vcpu *vcpu, bool *multi_sgi)
 
static void vgic_flush_lr_state (struct kvm_vcpu *vcpu)
 
static bool can_access_vgic_from_kernel (void)
 
static void vgic_save_state (struct kvm_vcpu *vcpu)
 
void kvm_vgic_sync_hwstate (struct kvm_vcpu *vcpu)
 
static void vgic_restore_state (struct kvm_vcpu *vcpu)
 
void kvm_vgic_flush_hwstate (struct kvm_vcpu *vcpu)
 
void kvm_vgic_load (struct kvm_vcpu *vcpu)
 
void kvm_vgic_put (struct kvm_vcpu *vcpu)
 
void kvm_vgic_vmcr_sync (struct kvm_vcpu *vcpu)
 
int kvm_vgic_vcpu_pending_irq (struct kvm_vcpu *vcpu)
 
void vgic_kick_vcpus (struct kvm *kvm)
 
bool kvm_vgic_map_is_active (struct kvm_vcpu *vcpu, unsigned int vintid)
 
void vgic_irq_handle_resampling (struct vgic_irq *irq, bool lr_deactivated, bool lr_pending)
 

Variables

struct vgic_global kvm_vgic_global_state __ro_after_init
 

Macro Definition Documentation

◆ CREATE_TRACE_POINTS

#define CREATE_TRACE_POINTS

Definition at line 17 of file vgic.c.

Function Documentation

◆ __vgic_put_lpi_locked()

void __vgic_put_lpi_locked ( struct kvm *  kvm,
struct vgic_irq irq 
)

Definition at line 126 of file vgic.c.

127 {
128  struct vgic_dist *dist = &kvm->arch.vgic;
129 
130  if (!kref_put(&irq->refcount, vgic_irq_release))
131  return;
132 
133  list_del(&irq->lpi_list);
134  dist->lpi_list_count--;
135 
136  kfree(irq);
137 }
int lpi_list_count
Definition: arm_vgic.h:279
struct list_head lpi_list
Definition: arm_vgic.h:119
struct kref refcount
Definition: arm_vgic.h:141
static void vgic_irq_release(struct kref *ref)
Definition: vgic.c:119
Here is the call graph for this function:
Here is the caller graph for this function:

◆ can_access_vgic_from_kernel()

static bool can_access_vgic_from_kernel ( void  )
inlinestatic

Definition at line 856 of file vgic.c.

857 {
858  /*
859  * GICv2 can always be accessed from the kernel because it is
860  * memory-mapped, and VHE systems can access GICv3 EL2 system
861  * registers.
862  */
863  return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
864 }
struct vgic_global kvm_vgic_global_state
struct static_key_false gicv3_cpuif
Definition: arm_vgic.h:81
Here is the caller graph for this function:

◆ compute_ap_list_depth()

static int compute_ap_list_depth ( struct kvm_vcpu *  vcpu,
bool *  multi_sgi 
)
static

Definition at line 771 of file vgic.c.

773 {
774  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
775  struct vgic_irq *irq;
776  int count = 0;
777 
778  *multi_sgi = false;
779 
780  lockdep_assert_held(&vgic_cpu->ap_list_lock);
781 
782  list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
783  int w;
784 
785  raw_spin_lock(&irq->irq_lock);
786  /* GICv2 SGIs can count for more than one... */
787  w = vgic_irq_get_lr_count(irq);
788  raw_spin_unlock(&irq->irq_lock);
789 
790  count += w;
791  *multi_sgi |= (w > 1);
792  }
793  return count;
794 }
raw_spinlock_t ap_list_lock
Definition: arm_vgic.h:334
struct list_head ap_list_head
Definition: arm_vgic.h:342
struct list_head ap_list
Definition: arm_vgic.h:120
raw_spinlock_t irq_lock
Definition: arm_vgic.h:118
static int vgic_irq_get_lr_count(struct vgic_irq *irq)
Definition: vgic.h:121
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_flush_hwstate()

void kvm_vgic_flush_hwstate ( struct kvm_vcpu *  vcpu)

Definition at line 905 of file vgic.c.

906 {
907  /*
908  * If there are no virtual interrupts active or pending for this
909  * VCPU, then there is no work to do and we can bail out without
910  * taking any lock. There is a potential race with someone injecting
911  * interrupts to the VCPU, but it is a benign race as the VCPU will
912  * either observe the new interrupt before or after doing this check,
913  * and introducing additional synchronization mechanism doesn't change
914  * this.
915  *
916  * Note that we still need to go through the whole thing if anything
917  * can be directly injected (GICv4).
918  */
919  if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
920  !vgic_supports_direct_msis(vcpu->kvm))
921  return;
922 
923  DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
924 
925  if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
926  raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
927  vgic_flush_lr_state(vcpu);
928  raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
929  }
930 
932  vgic_restore_state(vcpu);
933 
934  if (vgic_supports_direct_msis(vcpu->kvm))
935  vgic_v4_commit(vcpu);
936 }
bool vgic_supports_direct_msis(struct kvm *kvm)
Definition: vgic-mmio-v3.c:51
void vgic_v4_commit(struct kvm_vcpu *vcpu)
Definition: vgic-v4.c:385
static void vgic_restore_state(struct kvm_vcpu *vcpu)
Definition: vgic.c:896
static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
Definition: vgic.c:797
static bool can_access_vgic_from_kernel(void)
Definition: vgic.c:856
#define DEBUG_SPINLOCK_BUG_ON(p)
Definition: vgic.h:99
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_get_map()

int kvm_vgic_get_map ( struct kvm_vcpu *  vcpu,
unsigned int  vintid 
)

Definition at line 576 of file vgic.c.

577 {
578  struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
579  unsigned long flags;
580  int ret = -1;
581 
582  raw_spin_lock_irqsave(&irq->irq_lock, flags);
583  if (irq->hw)
584  ret = irq->hwintid;
585  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
586 
587  vgic_put_irq(vcpu->kvm, irq);
588  return ret;
589 }
struct kvm_vcpu * vcpu
Definition: arm_vgic.h:122
bool hw
Definition: arm_vgic.h:140
u32 hwintid
Definition: arm_vgic.h:142
struct vgic_irq * vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid)
Definition: vgic.c:92
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
Definition: vgic.c:139
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_inject_irq()

int kvm_vgic_inject_irq ( struct kvm *  kvm,
struct kvm_vcpu *  vcpu,
unsigned int  intid,
bool  level,
void *  owner 
)

kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic @kvm: The VM structure pointer @vcpu: The CPU for PPIs or NULL for global interrupts @intid: The INTID to inject a new state to. @level: Edge-triggered: true: to trigger the interrupt false: to ignore the call Level-sensitive true: raise the input signal false: lower the input signal @owner: The opaque pointer to the owner of the IRQ being raised to verify that the caller is allowed to inject this IRQ. Userspace injections will have owner == NULL.

The VGIC is not concerned with devices being active-LOW or active-HIGH for level-sensitive interrupts. You can think of the level parameter as 1 being HIGH and 0 being LOW and all devices being active-HIGH.

Definition at line 439 of file vgic.c.

441 {
442  struct vgic_irq *irq;
443  unsigned long flags;
444  int ret;
445 
446  ret = vgic_lazy_init(kvm);
447  if (ret)
448  return ret;
449 
450  if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
451  return -EINVAL;
452 
453  trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level);
454 
455  irq = vgic_get_irq(kvm, vcpu, intid);
456  if (!irq)
457  return -EINVAL;
458 
459  raw_spin_lock_irqsave(&irq->irq_lock, flags);
460 
461  if (!vgic_validate_injection(irq, level, owner)) {
462  /* Nothing to see here, move along... */
463  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
464  vgic_put_irq(kvm, irq);
465  return 0;
466  }
467 
468  if (irq->config == VGIC_CONFIG_LEVEL)
469  irq->line_level = level;
470  else
471  irq->pending_latch = true;
472 
473  vgic_queue_irq_unlock(kvm, irq, flags);
474  vgic_put_irq(kvm, irq);
475 
476  return 0;
477 }
@ VGIC_CONFIG_LEVEL
Definition: arm_vgic.h:94
#define VGIC_NR_PRIVATE_IRQS
Definition: arm_vgic.h:27
u32 intid
Definition: arm_vgic.h:133
void * owner
Definition: arm_vgic.h:156
bool line_level
Definition: arm_vgic.h:134
bool pending_latch
Definition: arm_vgic.h:135
enum vgic_irq_config config
Definition: arm_vgic.h:152
int vgic_lazy_init(struct kvm *kvm)
Definition: vgic-init.c:423
static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
Definition: vgic.c:313
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, unsigned long flags)
Definition: vgic.c:336
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_load()

void kvm_vgic_load ( struct kvm_vcpu *  vcpu)

Definition at line 938 of file vgic.c.

939 {
940  if (unlikely(!vgic_initialized(vcpu->kvm)))
941  return;
942 
944  vgic_v2_load(vcpu);
945  else
946  vgic_v3_load(vcpu);
947 }
@ VGIC_V2
Definition: arm_vgic.h:39
#define vgic_initialized(k)
Definition: arm_vgic.h:393
enum vgic_type type
Definition: arm_vgic.h:46
void vgic_v2_load(struct kvm_vcpu *vcpu)
Definition: vgic-v2.c:457
void vgic_v3_load(struct kvm_vcpu *vcpu)
Definition: vgic-v3.c:720
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_map_irq()

static int kvm_vgic_map_irq ( struct kvm_vcpu *  vcpu,
struct vgic_irq irq,
unsigned int  host_irq,
struct irq_ops ops 
)
static

Definition at line 480 of file vgic.c.

483 {
484  struct irq_desc *desc;
485  struct irq_data *data;
486 
487  /*
488  * Find the physical IRQ number corresponding to @host_irq
489  */
490  desc = irq_to_desc(host_irq);
491  if (!desc) {
492  kvm_err("%s: no interrupt descriptor\n", __func__);
493  return -EINVAL;
494  }
495  data = irq_desc_get_irq_data(desc);
496  while (data->parent_data)
497  data = data->parent_data;
498 
499  irq->hw = true;
500  irq->host_irq = host_irq;
501  irq->hwintid = data->hwirq;
502  irq->ops = ops;
503  return 0;
504 }
struct irq_ops * ops
Definition: arm_vgic.h:154
unsigned int host_irq
Definition: arm_vgic.h:143
Here is the caller graph for this function:

◆ kvm_vgic_map_is_active()

bool kvm_vgic_map_is_active ( struct kvm_vcpu *  vcpu,
unsigned int  vintid 
)

Definition at line 1022 of file vgic.c.

1023 {
1024  struct vgic_irq *irq;
1025  bool map_is_active;
1026  unsigned long flags;
1027 
1028  if (!vgic_initialized(vcpu->kvm))
1029  return false;
1030 
1031  irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
1032  raw_spin_lock_irqsave(&irq->irq_lock, flags);
1033  map_is_active = irq->hw && irq->active;
1034  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1035  vgic_put_irq(vcpu->kvm, irq);
1036 
1037  return map_is_active;
1038 }
bool active
Definition: arm_vgic.h:138
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_map_phys_irq()

int kvm_vgic_map_phys_irq ( struct kvm_vcpu *  vcpu,
unsigned int  host_irq,
u32  vintid,
struct irq_ops ops 
)

Definition at line 514 of file vgic.c.

516 {
517  struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
518  unsigned long flags;
519  int ret;
520 
521  BUG_ON(!irq);
522 
523  raw_spin_lock_irqsave(&irq->irq_lock, flags);
524  ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
525  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
526  vgic_put_irq(vcpu->kvm, irq);
527 
528  return ret;
529 }
static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, unsigned int host_irq, struct irq_ops *ops)
Definition: vgic.c:480
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_put()

void kvm_vgic_put ( struct kvm_vcpu *  vcpu)

Definition at line 949 of file vgic.c.

950 {
951  if (unlikely(!vgic_initialized(vcpu->kvm)))
952  return;
953 
955  vgic_v2_put(vcpu);
956  else
957  vgic_v3_put(vcpu);
958 }
void vgic_v2_put(struct kvm_vcpu *vcpu)
Definition: vgic-v2.c:474
void vgic_v3_put(struct kvm_vcpu *vcpu)
Definition: vgic-v3.c:748
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_reset_mapped_irq()

void kvm_vgic_reset_mapped_irq ( struct kvm_vcpu *  vcpu,
u32  vintid 
)

kvm_vgic_reset_mapped_irq - Reset a mapped IRQ @vcpu: The VCPU pointer @vintid: The INTID of the interrupt

Reset the active and pending states of a mapped interrupt. Kernel subsystems injecting mapped interrupts should reset their interrupt lines when we are doing a reset of the VM.

Definition at line 540 of file vgic.c.

541 {
542  struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
543  unsigned long flags;
544 
545  if (!irq->hw)
546  goto out;
547 
548  raw_spin_lock_irqsave(&irq->irq_lock, flags);
549  irq->active = false;
550  irq->pending_latch = false;
551  irq->line_level = false;
552  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
553 out:
554  vgic_put_irq(vcpu->kvm, irq);
555 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_set_owner()

int kvm_vgic_set_owner ( struct kvm_vcpu *  vcpu,
unsigned int  intid,
void *  owner 
)

kvm_vgic_set_owner - Set the owner of an interrupt for a VM

@vcpu: Pointer to the VCPU (used for PPIs) @intid: The virtual INTID identifying the interrupt (PPI or SPI) @owner: Opaque pointer to the owner

Returns 0 if intid is not already used by another in-kernel device and the owner is set, otherwise returns an error code.

Definition at line 601 of file vgic.c.

602 {
603  struct vgic_irq *irq;
604  unsigned long flags;
605  int ret = 0;
606 
607  if (!vgic_initialized(vcpu->kvm))
608  return -EAGAIN;
609 
610  /* SGIs and LPIs cannot be wired up to any device */
611  if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
612  return -EINVAL;
613 
614  irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
615  raw_spin_lock_irqsave(&irq->irq_lock, flags);
616  if (irq->owner && irq->owner != owner)
617  ret = -EEXIST;
618  else
619  irq->owner = owner;
620  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
621 
622  return ret;
623 }
#define irq_is_ppi(irq)
Definition: arm_vgic.h:34
#define vgic_valid_spi(k, i)
Definition: arm_vgic.h:395
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_sync_hwstate()

void kvm_vgic_sync_hwstate ( struct kvm_vcpu *  vcpu)

Definition at line 875 of file vgic.c.

876 {
877  int used_lrs;
878 
879  /* An empty ap_list_head implies used_lrs == 0 */
880  if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
881  return;
882 
884  vgic_save_state(vcpu);
885 
886  if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
887  used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
888  else
889  used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
890 
891  if (used_lrs)
892  vgic_fold_lr_state(vcpu);
893  vgic_prune_ap_list(vcpu);
894 }
static void vgic_save_state(struct kvm_vcpu *vcpu)
Definition: vgic.c:866
static void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
Definition: vgic.c:734
static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
Definition: vgic.c:633
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_unmap_irq()

static void kvm_vgic_unmap_irq ( struct vgic_irq irq)
inlinestatic

Definition at line 507 of file vgic.c.

508 {
509  irq->hw = false;
510  irq->hwintid = 0;
511  irq->ops = NULL;
512 }
Here is the caller graph for this function:

◆ kvm_vgic_unmap_phys_irq()

int kvm_vgic_unmap_phys_irq ( struct kvm_vcpu *  vcpu,
unsigned int  vintid 
)

Definition at line 557 of file vgic.c.

558 {
559  struct vgic_irq *irq;
560  unsigned long flags;
561 
562  if (!vgic_initialized(vcpu->kvm))
563  return -EAGAIN;
564 
565  irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
566  BUG_ON(!irq);
567 
568  raw_spin_lock_irqsave(&irq->irq_lock, flags);
569  kvm_vgic_unmap_irq(irq);
570  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
571  vgic_put_irq(vcpu->kvm, irq);
572 
573  return 0;
574 }
static void kvm_vgic_unmap_irq(struct vgic_irq *irq)
Definition: vgic.c:507
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_vcpu_pending_irq()

int kvm_vgic_vcpu_pending_irq ( struct kvm_vcpu *  vcpu)

Definition at line 971 of file vgic.c.

972 {
973  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
974  struct vgic_irq *irq;
975  bool pending = false;
976  unsigned long flags;
977  struct vgic_vmcr vmcr;
978 
979  if (!vcpu->kvm->arch.vgic.enabled)
980  return false;
981 
982  if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
983  return true;
984 
985  vgic_get_vmcr(vcpu, &vmcr);
986 
987  raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
988 
989  list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
990  raw_spin_lock(&irq->irq_lock);
991  pending = irq_is_pending(irq) && irq->enabled &&
992  !irq->active &&
993  irq->priority < vmcr.pmr;
994  raw_spin_unlock(&irq->irq_lock);
995 
996  if (pending)
997  break;
998  }
999 
1000  raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
1001 
1002  return pending;
1003 }
u8 priority
Definition: arm_vgic.h:150
bool enabled
Definition: arm_vgic.h:139
void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
Definition: vgic-mmio.c:851
static bool irq_is_pending(struct vgic_irq *irq)
Definition: vgic.h:108
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vgic_vmcr_sync()

void kvm_vgic_vmcr_sync ( struct kvm_vcpu *  vcpu)

Definition at line 960 of file vgic.c.

961 {
962  if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
963  return;
964 
966  vgic_v2_vmcr_sync(vcpu);
967  else
968  vgic_v3_vmcr_sync(vcpu);
969 }
#define irqchip_in_kernel(k)
Definition: arm_vgic.h:392
void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
Definition: vgic-v2.c:467
void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
Definition: vgic-v3.c:740
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_clear_lr()

static void vgic_clear_lr ( struct kvm_vcpu *  vcpu,
int  lr 
)
inlinestatic

Definition at line 754 of file vgic.c.

755 {
757  vgic_v2_clear_lr(vcpu, lr);
758  else
759  vgic_v3_clear_lr(vcpu, lr);
760 }
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
Definition: vgic-v2.c:200
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
Definition: vgic-v3.c:189
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_flush_lr_state()

static void vgic_flush_lr_state ( struct kvm_vcpu *  vcpu)
static

Definition at line 797 of file vgic.c.

798 {
799  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
800  struct vgic_irq *irq;
801  int count;
802  bool multi_sgi;
803  u8 prio = 0xff;
804  int i = 0;
805 
806  lockdep_assert_held(&vgic_cpu->ap_list_lock);
807 
808  count = compute_ap_list_depth(vcpu, &multi_sgi);
809  if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
811 
812  count = 0;
813 
814  list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
815  raw_spin_lock(&irq->irq_lock);
816 
817  /*
818  * If we have multi-SGIs in the pipeline, we need to
819  * guarantee that they are all seen before any IRQ of
820  * lower priority. In that case, we need to filter out
821  * these interrupts by exiting early. This is easy as
822  * the AP list has been sorted already.
823  */
824  if (multi_sgi && irq->priority > prio) {
825  _raw_spin_unlock(&irq->irq_lock);
826  break;
827  }
828 
829  if (likely(vgic_target_oracle(irq) == vcpu)) {
830  vgic_populate_lr(vcpu, irq, count++);
831 
832  if (irq->source)
833  prio = irq->priority;
834  }
835 
836  raw_spin_unlock(&irq->irq_lock);
837 
838  if (count == kvm_vgic_global_state.nr_lr) {
839  if (!list_is_last(&irq->ap_list,
842  break;
843  }
844  }
845 
846  /* Nuke remaining LRs */
847  for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
848  vgic_clear_lr(vcpu, i);
849 
850  if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
851  vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
852  else
853  vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
854 }
int nr_lr
Definition: arm_vgic.h:62
u8 source
Definition: arm_vgic.h:148
static struct kvm_vcpu * vgic_target_oracle(struct vgic_irq *irq)
Definition: vgic.c:216
static void vgic_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
Definition: vgic.c:743
static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
Definition: vgic.c:299
static int compute_ap_list_depth(struct kvm_vcpu *vcpu, bool *multi_sgi)
Definition: vgic.c:771
static void vgic_set_underflow(struct kvm_vcpu *vcpu)
Definition: vgic.c:762
static void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
Definition: vgic.c:754
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_flush_pending_lpis()

void vgic_flush_pending_lpis ( struct kvm_vcpu *  vcpu)

Definition at line 152 of file vgic.c.

153 {
154  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
155  struct vgic_irq *irq, *tmp;
156  unsigned long flags;
157 
158  raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
159 
160  list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
161  if (irq->intid >= VGIC_MIN_LPI) {
162  raw_spin_lock(&irq->irq_lock);
163  list_del(&irq->ap_list);
164  irq->vcpu = NULL;
165  raw_spin_unlock(&irq->irq_lock);
166  vgic_put_irq(vcpu->kvm, irq);
167  }
168  }
169 
170  raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
171 }
#define VGIC_MIN_LPI
Definition: arm_vgic.h:31
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_fold_lr_state()

static void vgic_fold_lr_state ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 734 of file vgic.c.

735 {
737  vgic_v2_fold_lr_state(vcpu);
738  else
739  vgic_v3_fold_lr_state(vcpu);
740 }
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
Definition: vgic-v2.c:49
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
Definition: vgic-v3.c:35
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_get_irq()

struct vgic_irq* vgic_get_irq ( struct kvm *  kvm,
struct kvm_vcpu *  vcpu,
u32  intid 
)

Definition at line 92 of file vgic.c.

94 {
95  /* SGIs and PPIs */
96  if (intid <= VGIC_MAX_PRIVATE) {
97  intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
98  return &vcpu->arch.vgic_cpu.private_irqs[intid];
99  }
100 
101  /* SPIs */
102  if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
103  intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
104  return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
105  }
106 
107  /* LPIs */
108  if (intid >= VGIC_MIN_LPI)
109  return vgic_get_lpi(kvm, intid);
110 
111  return NULL;
112 }
#define VGIC_MAX_PRIVATE
Definition: arm_vgic.h:28
static struct vgic_irq * vgic_get_lpi(struct kvm *kvm, u32 intid)
Definition: vgic.c:60
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_get_lpi()

static struct vgic_irq* vgic_get_lpi ( struct kvm *  kvm,
u32  intid 
)
static

Definition at line 60 of file vgic.c.

61 {
62  struct vgic_dist *dist = &kvm->arch.vgic;
63  struct vgic_irq *irq = NULL;
64  unsigned long flags;
65 
66  raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
67 
68  list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
69  if (irq->intid != intid)
70  continue;
71 
72  /*
73  * This increases the refcount, the caller is expected to
74  * call vgic_put_irq() later once it's finished with the IRQ.
75  */
76  vgic_get_irq_kref(irq);
77  goto out_unlock;
78  }
79  irq = NULL;
80 
81 out_unlock:
82  raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
83 
84  return irq;
85 }
struct list_head lpi_list_head
Definition: arm_vgic.h:278
raw_spinlock_t lpi_list_lock
Definition: arm_vgic.h:277
static void vgic_get_irq_kref(struct vgic_irq *irq)
Definition: vgic.h:223
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_get_phys_line_level()

bool vgic_get_phys_line_level ( struct vgic_irq irq)

Definition at line 180 of file vgic.c.

181 {
182  bool line_level;
183 
184  BUG_ON(!irq->hw);
185 
186  if (irq->ops && irq->ops->get_input_level)
187  return irq->ops->get_input_level(irq->intid);
188 
189  WARN_ON(irq_get_irqchip_state(irq->host_irq,
190  IRQCHIP_STATE_PENDING,
191  &line_level));
192  return line_level;
193 }
bool(* get_input_level)(int vintid)
Definition: arm_vgic.h:114
Here is the caller graph for this function:

◆ vgic_irq_cmp()

static int vgic_irq_cmp ( void *  priv,
const struct list_head *  a,
const struct list_head *  b 
)
static

Definition at line 259 of file vgic.c.

261 {
262  struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
263  struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
264  bool penda, pendb;
265  int ret;
266 
267  /*
268  * list_sort may call this function with the same element when
269  * the list is fairly long.
270  */
271  if (unlikely(irqa == irqb))
272  return 0;
273 
274  raw_spin_lock(&irqa->irq_lock);
275  raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
276 
277  if (irqa->active || irqb->active) {
278  ret = (int)irqb->active - (int)irqa->active;
279  goto out;
280  }
281 
282  penda = irqa->enabled && irq_is_pending(irqa);
283  pendb = irqb->enabled && irq_is_pending(irqb);
284 
285  if (!penda || !pendb) {
286  ret = (int)pendb - (int)penda;
287  goto out;
288  }
289 
290  /* Both pending and enabled, sort by priority */
291  ret = irqa->priority - irqb->priority;
292 out:
293  raw_spin_unlock(&irqb->irq_lock);
294  raw_spin_unlock(&irqa->irq_lock);
295  return ret;
296 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_irq_handle_resampling()

void vgic_irq_handle_resampling ( struct vgic_irq irq,
bool  lr_deactivated,
bool  lr_pending 
)

Definition at line 1060 of file vgic.c.

1062 {
1063  if (vgic_irq_is_mapped_level(irq)) {
1064  bool resample = false;
1065 
1066  if (unlikely(vgic_irq_needs_resampling(irq))) {
1067  resample = !(irq->active || irq->pending_latch);
1068  } else if (lr_pending || (lr_deactivated && irq->line_level)) {
1070  resample = !irq->line_level;
1071  }
1072 
1073  if (resample)
1074  vgic_irq_set_phys_active(irq, false);
1075  }
1076 }
static bool vgic_irq_needs_resampling(struct vgic_irq *irq)
Definition: arm_vgic.h:160
bool vgic_get_phys_line_level(struct vgic_irq *irq)
Definition: vgic.c:180
void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
Definition: vgic.c:196
static bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
Definition: vgic.h:116
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_irq_release()

static void vgic_irq_release ( struct kref *  ref)
static

Definition at line 119 of file vgic.c.

120 {
121 }
Here is the caller graph for this function:

◆ vgic_irq_set_phys_active()

void vgic_irq_set_phys_active ( struct vgic_irq irq,
bool  active 
)

Definition at line 196 of file vgic.c.

197 {
198 
199  BUG_ON(!irq->hw);
200  WARN_ON(irq_set_irqchip_state(irq->host_irq,
201  IRQCHIP_STATE_ACTIVE,
202  active));
203 }
Here is the caller graph for this function:

◆ vgic_irq_set_phys_pending()

void vgic_irq_set_phys_pending ( struct vgic_irq irq,
bool  pending 
)

Definition at line 173 of file vgic.c.

174 {
175  WARN_ON(irq_set_irqchip_state(irq->host_irq,
176  IRQCHIP_STATE_PENDING,
177  pending));
178 }
Here is the caller graph for this function:

◆ vgic_kick_vcpus()

void vgic_kick_vcpus ( struct kvm *  kvm)

Definition at line 1005 of file vgic.c.

1006 {
1007  struct kvm_vcpu *vcpu;
1008  unsigned long c;
1009 
1010  /*
1011  * We've injected an interrupt, time to find out who deserves
1012  * a good kick...
1013  */
1014  kvm_for_each_vcpu(c, vcpu, kvm) {
1015  if (kvm_vgic_vcpu_pending_irq(vcpu)) {
1016  kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
1017  kvm_vcpu_kick(vcpu);
1018  }
1019  }
1020 }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3931
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
Definition: vgic.c:971
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_populate_lr()

static void vgic_populate_lr ( struct kvm_vcpu *  vcpu,
struct vgic_irq irq,
int  lr 
)
inlinestatic

Definition at line 743 of file vgic.c.

745 {
746  lockdep_assert_held(&irq->irq_lock);
747 
749  vgic_v2_populate_lr(vcpu, irq, lr);
750  else
751  vgic_v3_populate_lr(vcpu, irq, lr);
752 }
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
Definition: vgic-v2.c:122
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
Definition: vgic-v3.c:107
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_prune_ap_list()

static void vgic_prune_ap_list ( struct kvm_vcpu *  vcpu)
static

vgic_prune_ap_list - Remove non-relevant interrupts from the list

@vcpu: The VCPU pointer

Go over the list of "interesting" interrupts, and prune those that we won't have to consider in the near future.

Definition at line 633 of file vgic.c.

634 {
635  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
636  struct vgic_irq *irq, *tmp;
637 
638  DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
639 
640 retry:
641  raw_spin_lock(&vgic_cpu->ap_list_lock);
642 
643  list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
644  struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
645  bool target_vcpu_needs_kick = false;
646 
647  raw_spin_lock(&irq->irq_lock);
648 
649  BUG_ON(vcpu != irq->vcpu);
650 
651  target_vcpu = vgic_target_oracle(irq);
652 
653  if (!target_vcpu) {
654  /*
655  * We don't need to process this interrupt any
656  * further, move it off the list.
657  */
658  list_del(&irq->ap_list);
659  irq->vcpu = NULL;
660  raw_spin_unlock(&irq->irq_lock);
661 
662  /*
663  * This vgic_put_irq call matches the
664  * vgic_get_irq_kref in vgic_queue_irq_unlock,
665  * where we added the LPI to the ap_list. As
666  * we remove the irq from the list, we drop
667  * also drop the refcount.
668  */
669  vgic_put_irq(vcpu->kvm, irq);
670  continue;
671  }
672 
673  if (target_vcpu == vcpu) {
674  /* We're on the right CPU */
675  raw_spin_unlock(&irq->irq_lock);
676  continue;
677  }
678 
679  /* This interrupt looks like it has to be migrated. */
680 
681  raw_spin_unlock(&irq->irq_lock);
682  raw_spin_unlock(&vgic_cpu->ap_list_lock);
683 
684  /*
685  * Ensure locking order by always locking the smallest
686  * ID first.
687  */
688  if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
689  vcpuA = vcpu;
690  vcpuB = target_vcpu;
691  } else {
692  vcpuA = target_vcpu;
693  vcpuB = vcpu;
694  }
695 
696  raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
697  raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
698  SINGLE_DEPTH_NESTING);
699  raw_spin_lock(&irq->irq_lock);
700 
701  /*
702  * If the affinity has been preserved, move the
703  * interrupt around. Otherwise, it means things have
704  * changed while the interrupt was unlocked, and we
705  * need to replay this.
706  *
707  * In all cases, we cannot trust the list not to have
708  * changed, so we restart from the beginning.
709  */
710  if (target_vcpu == vgic_target_oracle(irq)) {
711  struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
712 
713  list_del(&irq->ap_list);
714  irq->vcpu = target_vcpu;
715  list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
716  target_vcpu_needs_kick = true;
717  }
718 
719  raw_spin_unlock(&irq->irq_lock);
720  raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
721  raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
722 
723  if (target_vcpu_needs_kick) {
724  kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
725  kvm_vcpu_kick(target_vcpu);
726  }
727 
728  goto retry;
729  }
730 
731  raw_spin_unlock(&vgic_cpu->ap_list_lock);
732 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_put_irq()

void vgic_put_irq ( struct kvm *  kvm,
struct vgic_irq irq 
)

Definition at line 139 of file vgic.c.

140 {
141  struct vgic_dist *dist = &kvm->arch.vgic;
142  unsigned long flags;
143 
144  if (irq->intid < VGIC_MIN_LPI)
145  return;
146 
147  raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
148  __vgic_put_lpi_locked(kvm, irq);
149  raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 }
void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
Definition: vgic.c:126
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_queue_irq_unlock()

bool vgic_queue_irq_unlock ( struct kvm *  kvm,
struct vgic_irq irq,
unsigned long  flags 
)

Definition at line 336 of file vgic.c.

338 {
339  struct kvm_vcpu *vcpu;
340 
341  lockdep_assert_held(&irq->irq_lock);
342 
343 retry:
344  vcpu = vgic_target_oracle(irq);
345  if (irq->vcpu || !vcpu) {
346  /*
347  * If this IRQ is already on a VCPU's ap_list, then it
348  * cannot be moved or modified and there is no more work for
349  * us to do.
350  *
351  * Otherwise, if the irq is not pending and enabled, it does
352  * not need to be inserted into an ap_list and there is also
353  * no more work for us to do.
354  */
355  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
356 
357  /*
358  * We have to kick the VCPU here, because we could be
359  * queueing an edge-triggered interrupt for which we
360  * get no EOI maintenance interrupt. In that case,
361  * while the IRQ is already on the VCPU's AP list, the
362  * VCPU could have EOI'ed the original interrupt and
363  * won't see this one until it exits for some other
364  * reason.
365  */
366  if (vcpu) {
367  kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
368  kvm_vcpu_kick(vcpu);
369  }
370  return false;
371  }
372 
373  /*
374  * We must unlock the irq lock to take the ap_list_lock where
375  * we are going to insert this new pending interrupt.
376  */
377  raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
378 
379  /* someone can do stuff here, which we re-check below */
380 
381  raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
382  raw_spin_lock(&irq->irq_lock);
383 
384  /*
385  * Did something change behind our backs?
386  *
387  * There are two cases:
388  * 1) The irq lost its pending state or was disabled behind our
389  * backs and/or it was queued to another VCPU's ap_list.
390  * 2) Someone changed the affinity on this irq behind our
391  * backs and we are now holding the wrong ap_list_lock.
392  *
393  * In both cases, drop the locks and retry.
394  */
395 
396  if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
397  raw_spin_unlock(&irq->irq_lock);
398  raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
399  flags);
400 
401  raw_spin_lock_irqsave(&irq->irq_lock, flags);
402  goto retry;
403  }
404 
405  /*
406  * Grab a reference to the irq to reflect the fact that it is
407  * now in the ap_list.
408  */
409  vgic_get_irq_kref(irq);
410  list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
411  irq->vcpu = vcpu;
412 
413  raw_spin_unlock(&irq->irq_lock);
414  raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
415 
416  kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
417  kvm_vcpu_kick(vcpu);
418 
419  return true;
420 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_restore_state()

static void vgic_restore_state ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 896 of file vgic.c.

897 {
898  if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
899  vgic_v2_restore_state(vcpu);
900  else
901  __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
902 }
void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
Definition: vgic-v2.c:438
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
Definition: vgic-v3-sr.c:234
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_save_state()

static void vgic_save_state ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 866 of file vgic.c.

867 {
868  if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
869  vgic_v2_save_state(vcpu);
870  else
871  __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
872 }
void vgic_v2_save_state(struct kvm_vcpu *vcpu)
Definition: vgic-v2.c:424
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
Definition: vgic-v3-sr.c:199
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_set_underflow()

static void vgic_set_underflow ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 762 of file vgic.c.

763 {
765  vgic_v2_set_underflow(vcpu);
766  else
767  vgic_v3_set_underflow(vcpu);
768 }
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
Definition: vgic-v2.c:29
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
Definition: vgic-v3.c:22
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_sort_ap_list()

static void vgic_sort_ap_list ( struct kvm_vcpu *  vcpu)
static

Definition at line 299 of file vgic.c.

300 {
301  struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
302 
303  lockdep_assert_held(&vgic_cpu->ap_list_lock);
304 
305  list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
306 }
static int vgic_irq_cmp(void *priv, const struct list_head *a, const struct list_head *b)
Definition: vgic.c:259
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_target_oracle()

static struct kvm_vcpu* vgic_target_oracle ( struct vgic_irq irq)
static

kvm_vgic_target_oracle - compute the target vcpu for an irq

@irq: The irq to route. Must be already locked.

Based on the current state of the interrupt (enabled, pending, active, vcpu and target_vcpu), compute the next vcpu this should be given to. Return NULL if this shouldn't be injected at all.

Requires the IRQ lock to be held.

Definition at line 216 of file vgic.c.

217 {
218  lockdep_assert_held(&irq->irq_lock);
219 
220  /* If the interrupt is active, it must stay on the current vcpu */
221  if (irq->active)
222  return irq->vcpu ? : irq->target_vcpu;
223 
224  /*
225  * If the IRQ is not active but enabled and pending, we should direct
226  * it to its configured target VCPU.
227  * If the distributor is disabled, pending interrupts shouldn't be
228  * forwarded.
229  */
230  if (irq->enabled && irq_is_pending(irq)) {
231  if (unlikely(irq->target_vcpu &&
232  !irq->target_vcpu->kvm->arch.vgic.enabled))
233  return NULL;
234 
235  return irq->target_vcpu;
236  }
237 
238  /* If neither active nor pending and enabled, then this IRQ should not
239  * be queued to any VCPU.
240  */
241  return NULL;
242 }
struct kvm_vcpu * target_vcpu
Definition: arm_vgic.h:127
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vgic_validate_injection()

static bool vgic_validate_injection ( struct vgic_irq irq,
bool  level,
void *  owner 
)
static

Definition at line 313 of file vgic.c.

314 {
315  if (irq->owner != owner)
316  return false;
317 
318  switch (irq->config) {
319  case VGIC_CONFIG_LEVEL:
320  return irq->line_level != level;
321  case VGIC_CONFIG_EDGE:
322  return level;
323  }
324 
325  return false;
326 }
@ VGIC_CONFIG_EDGE
Definition: arm_vgic.h:93
Here is the caller graph for this function:

Variable Documentation

◆ __ro_after_init

struct vgic_global kvm_vgic_global_state __ro_after_init
Initial value:
= {
.gicv3_cpuif = STATIC_KEY_FALSE_INIT,
}

Definition at line 1 of file vgic.c.