KVM
Macros | Functions
pmu-emul.c File Reference
#include <linux/cpu.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/list.h>
#include <linux/perf_event.h>
#include <linux/perf/arm_pmu.h>
#include <linux/uaccess.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
#include <asm/arm_pmuv3.h>
Include dependency graph for pmu-emul.c:

Go to the source code of this file.

Macros

#define PERF_ATTR_CFG1_COUNTER_64BIT   BIT(0)
 

Functions

 DEFINE_STATIC_KEY_FALSE (kvm_arm_pmu_available)
 
static LIST_HEAD (arm_pmus)
 
static DEFINE_MUTEX (arm_pmus_lock)
 
static void kvm_pmu_create_perf_event (struct kvm_pmc *pmc)
 
static void kvm_pmu_release_perf_event (struct kvm_pmc *pmc)
 
static struct kvm_vcpu * kvm_pmc_to_vcpu (const struct kvm_pmc *pmc)
 
static struct kvm_pmc * kvm_vcpu_idx_to_pmc (struct kvm_vcpu *vcpu, int cnt_idx)
 
static u32 __kvm_pmu_event_mask (unsigned int pmuver)
 
static u32 kvm_pmu_event_mask (struct kvm *kvm)
 
u64 kvm_pmu_evtyper_mask (struct kvm *kvm)
 
static bool kvm_pmc_is_64bit (struct kvm_pmc *pmc)
 
static bool kvm_pmc_has_64bit_overflow (struct kvm_pmc *pmc)
 
static bool kvm_pmu_counter_can_chain (struct kvm_pmc *pmc)
 
static u32 counter_index_to_reg (u64 idx)
 
static u32 counter_index_to_evtreg (u64 idx)
 
static u64 kvm_pmu_get_pmc_value (struct kvm_pmc *pmc)
 
u64 kvm_pmu_get_counter_value (struct kvm_vcpu *vcpu, u64 select_idx)
 
static void kvm_pmu_set_pmc_value (struct kvm_pmc *pmc, u64 val, bool force)
 
void kvm_pmu_set_counter_value (struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
 
static void kvm_pmu_stop_counter (struct kvm_pmc *pmc)
 
void kvm_pmu_vcpu_init (struct kvm_vcpu *vcpu)
 
void kvm_pmu_vcpu_reset (struct kvm_vcpu *vcpu)
 
void kvm_pmu_vcpu_destroy (struct kvm_vcpu *vcpu)
 
u64 kvm_pmu_valid_counter_mask (struct kvm_vcpu *vcpu)
 
void kvm_pmu_enable_counter_mask (struct kvm_vcpu *vcpu, u64 val)
 
void kvm_pmu_disable_counter_mask (struct kvm_vcpu *vcpu, u64 val)
 
static u64 kvm_pmu_overflow_status (struct kvm_vcpu *vcpu)
 
static void kvm_pmu_update_state (struct kvm_vcpu *vcpu)
 
bool kvm_pmu_should_notify_user (struct kvm_vcpu *vcpu)
 
void kvm_pmu_update_run (struct kvm_vcpu *vcpu)
 
void kvm_pmu_flush_hwstate (struct kvm_vcpu *vcpu)
 
void kvm_pmu_sync_hwstate (struct kvm_vcpu *vcpu)
 
static void kvm_pmu_perf_overflow_notify_vcpu (struct irq_work *work)
 
static void kvm_pmu_counter_increment (struct kvm_vcpu *vcpu, unsigned long mask, u32 event)
 
static u64 compute_period (struct kvm_pmc *pmc, u64 counter)
 
static void kvm_pmu_perf_overflow (struct perf_event *perf_event, struct perf_sample_data *data, struct pt_regs *regs)
 
void kvm_pmu_software_increment (struct kvm_vcpu *vcpu, u64 val)
 
void kvm_pmu_handle_pmcr (struct kvm_vcpu *vcpu, u64 val)
 
static bool kvm_pmu_counter_is_enabled (struct kvm_pmc *pmc)
 
void kvm_pmu_set_counter_event_type (struct kvm_vcpu *vcpu, u64 data, u64 select_idx)
 
void kvm_host_pmu_init (struct arm_pmu *pmu)
 
static struct arm_pmu * kvm_pmu_probe_armpmu (void)
 
u64 kvm_pmu_get_pmceid (struct kvm_vcpu *vcpu, bool pmceid1)
 
void kvm_vcpu_reload_pmu (struct kvm_vcpu *vcpu)
 
int kvm_arm_pmu_v3_enable (struct kvm_vcpu *vcpu)
 
static int kvm_arm_pmu_v3_init (struct kvm_vcpu *vcpu)
 
static bool pmu_irq_is_valid (struct kvm *kvm, int irq)
 
u8 kvm_arm_pmu_get_max_counters (struct kvm *kvm)
 
static void kvm_arm_set_pmu (struct kvm *kvm, struct arm_pmu *arm_pmu)
 
int kvm_arm_set_default_pmu (struct kvm *kvm)
 
static int kvm_arm_pmu_v3_set_pmu (struct kvm_vcpu *vcpu, int pmu_id)
 
int kvm_arm_pmu_v3_set_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
int kvm_arm_pmu_v3_get_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
int kvm_arm_pmu_v3_has_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
u8 kvm_arm_pmu_get_pmuver_limit (void)
 
u64 kvm_vcpu_read_pmcr (struct kvm_vcpu *vcpu)
 

Macro Definition Documentation

◆ PERF_ATTR_CFG1_COUNTER_64BIT

#define PERF_ATTR_CFG1_COUNTER_64BIT   BIT(0)

Definition at line 19 of file pmu-emul.c.

Function Documentation

◆ __kvm_pmu_event_mask()

static u32 __kvm_pmu_event_mask ( unsigned int  pmuver)
static

Definition at line 39 of file pmu-emul.c.

40 {
41  switch (pmuver) {
42  case ID_AA64DFR0_EL1_PMUVer_IMP:
43  return GENMASK(9, 0);
44  case ID_AA64DFR0_EL1_PMUVer_V3P1:
45  case ID_AA64DFR0_EL1_PMUVer_V3P4:
46  case ID_AA64DFR0_EL1_PMUVer_V3P5:
47  case ID_AA64DFR0_EL1_PMUVer_V3P7:
48  return GENMASK(15, 0);
49  default: /* Shouldn't be here, just for sanity */
50  WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
51  return 0;
52  }
53 }
Here is the caller graph for this function:

◆ compute_period()

static u64 compute_period ( struct kvm_pmc *  pmc,
u64  counter 
)
static

Definition at line 481 of file pmu-emul.c.

482 {
483  u64 val;
484 
486  val = (-counter) & GENMASK(63, 0);
487  else
488  val = (-counter) & GENMASK(31, 0);
489 
490  return val;
491 }
static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
Definition: pmu-emul.c:84
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
Definition: pmu-emul.c:90
Here is the call graph for this function:
Here is the caller graph for this function:

◆ counter_index_to_evtreg()

static u32 counter_index_to_evtreg ( u64  idx)
static

Definition at line 109 of file pmu-emul.c.

110 {
111  return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx;
112 }
#define ARMV8_PMU_CYCLE_IDX
Definition: arm_pmu.h:13
Here is the caller graph for this function:

◆ counter_index_to_reg()

static u32 counter_index_to_reg ( u64  idx)
static

Definition at line 104 of file pmu-emul.c.

105 {
106  return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx;
107 }
Here is the caller graph for this function:

◆ DEFINE_MUTEX()

static DEFINE_MUTEX ( arm_pmus_lock  )
static

◆ DEFINE_STATIC_KEY_FALSE()

DEFINE_STATIC_KEY_FALSE ( kvm_arm_pmu_available  )

◆ kvm_arm_pmu_get_max_counters()

u8 kvm_arm_pmu_get_max_counters ( struct kvm *  kvm)

kvm_arm_pmu_get_max_counters - Return the max number of PMU counters. @kvm: The kvm pointer

Definition at line 908 of file pmu-emul.c.

909 {
910  struct arm_pmu *arm_pmu = kvm->arch.arm_pmu;
911 
912  /*
913  * The arm_pmu->num_events considers the cycle counter as well.
914  * Ignore that and return only the general-purpose counters.
915  */
916  return arm_pmu->num_events - 1;
917 }
Here is the caller graph for this function:

◆ kvm_arm_pmu_get_pmuver_limit()

u8 kvm_arm_pmu_get_pmuver_limit ( void  )

Definition at line 1121 of file pmu-emul.c.

1122 {
1123  u64 tmp;
1124 
1125  tmp = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1126  tmp = cpuid_feature_cap_perfmon_field(tmp,
1127  ID_AA64DFR0_EL1_PMUVer_SHIFT,
1128  ID_AA64DFR0_EL1_PMUVer_V3P5);
1129  return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp);
1130 }
Here is the caller graph for this function:

◆ kvm_arm_pmu_v3_enable()

int kvm_arm_pmu_v3_enable ( struct kvm_vcpu *  vcpu)

Definition at line 816 of file pmu-emul.c.

817 {
818  if (!kvm_vcpu_has_pmu(vcpu))
819  return 0;
820 
821  if (!vcpu->arch.pmu.created)
822  return -EINVAL;
823 
824  /*
825  * A valid interrupt configuration for the PMU is either to have a
826  * properly configured interrupt number and using an in-kernel
827  * irqchip, or to not have an in-kernel GIC and not set an IRQ.
828  */
829  if (irqchip_in_kernel(vcpu->kvm)) {
830  int irq = vcpu->arch.pmu.irq_num;
831  /*
832  * If we are using an in-kernel vgic, at this point we know
833  * the vgic will be initialized, so we can check the PMU irq
834  * number against the dimensions of the vgic and make sure
835  * it's valid.
836  */
837  if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
838  return -EINVAL;
839  } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
840  return -EINVAL;
841  }
842 
843  /* One-off reload of the PMU on first run */
844  kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
845 
846  return 0;
847 }
#define kvm_arm_pmu_irq_initialized(v)
Definition: arm_pmu.h:118
#define kvm_vcpu_has_pmu(vcpu)
Definition: arm_pmu.h:170
#define irqchip_in_kernel(k)
Definition: arm_vgic.h:392
#define irq_is_ppi(irq)
Definition: arm_vgic.h:34
#define vgic_valid_spi(k, i)
Definition: arm_vgic.h:395
Here is the caller graph for this function:

◆ kvm_arm_pmu_v3_get_attr()

int kvm_arm_pmu_v3_get_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)

Definition at line 1083 of file pmu-emul.c.

1084 {
1085  switch (attr->attr) {
1086  case KVM_ARM_VCPU_PMU_V3_IRQ: {
1087  int __user *uaddr = (int __user *)(long)attr->addr;
1088  int irq;
1089 
1090  if (!irqchip_in_kernel(vcpu->kvm))
1091  return -EINVAL;
1092 
1093  if (!kvm_vcpu_has_pmu(vcpu))
1094  return -ENODEV;
1095 
1096  if (!kvm_arm_pmu_irq_initialized(vcpu))
1097  return -ENXIO;
1098 
1099  irq = vcpu->arch.pmu.irq_num;
1100  return put_user(irq, uaddr);
1101  }
1102  }
1103 
1104  return -ENXIO;
1105 }
Here is the caller graph for this function:

◆ kvm_arm_pmu_v3_has_attr()

int kvm_arm_pmu_v3_has_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)

Definition at line 1107 of file pmu-emul.c.

1108 {
1109  switch (attr->attr) {
1110  case KVM_ARM_VCPU_PMU_V3_IRQ:
1111  case KVM_ARM_VCPU_PMU_V3_INIT:
1112  case KVM_ARM_VCPU_PMU_V3_FILTER:
1113  case KVM_ARM_VCPU_PMU_V3_SET_PMU:
1114  if (kvm_vcpu_has_pmu(vcpu))
1115  return 0;
1116  }
1117 
1118  return -ENXIO;
1119 }
Here is the caller graph for this function:

◆ kvm_arm_pmu_v3_init()

static int kvm_arm_pmu_v3_init ( struct kvm_vcpu *  vcpu)
static

Definition at line 849 of file pmu-emul.c.

850 {
851  if (irqchip_in_kernel(vcpu->kvm)) {
852  int ret;
853 
854  /*
855  * If using the PMU with an in-kernel virtual GIC
856  * implementation, we require the GIC to be already
857  * initialized when initializing the PMU.
858  */
859  if (!vgic_initialized(vcpu->kvm))
860  return -ENODEV;
861 
862  if (!kvm_arm_pmu_irq_initialized(vcpu))
863  return -ENXIO;
864 
865  ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
866  &vcpu->arch.pmu);
867  if (ret)
868  return ret;
869  }
870 
871  init_irq_work(&vcpu->arch.pmu.overflow_work,
873 
874  vcpu->arch.pmu.created = true;
875  return 0;
876 }
#define vgic_initialized(k)
Definition: arm_vgic.h:393
static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
Definition: pmu-emul.c:427
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
Definition: vgic.c:601
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_pmu_v3_set_attr()

int kvm_arm_pmu_v3_set_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)

Definition at line 980 of file pmu-emul.c.

981 {
982  struct kvm *kvm = vcpu->kvm;
983 
984  lockdep_assert_held(&kvm->arch.config_lock);
985 
986  if (!kvm_vcpu_has_pmu(vcpu))
987  return -ENODEV;
988 
989  if (vcpu->arch.pmu.created)
990  return -EBUSY;
991 
992  switch (attr->attr) {
993  case KVM_ARM_VCPU_PMU_V3_IRQ: {
994  int __user *uaddr = (int __user *)(long)attr->addr;
995  int irq;
996 
997  if (!irqchip_in_kernel(kvm))
998  return -EINVAL;
999 
1000  if (get_user(irq, uaddr))
1001  return -EFAULT;
1002 
1003  /* The PMU overflow interrupt can be a PPI or a valid SPI. */
1004  if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
1005  return -EINVAL;
1006 
1007  if (!pmu_irq_is_valid(kvm, irq))
1008  return -EINVAL;
1009 
1010  if (kvm_arm_pmu_irq_initialized(vcpu))
1011  return -EBUSY;
1012 
1013  kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
1014  vcpu->arch.pmu.irq_num = irq;
1015  return 0;
1016  }
1017  case KVM_ARM_VCPU_PMU_V3_FILTER: {
1018  u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
1019  struct kvm_pmu_event_filter __user *uaddr;
1020  struct kvm_pmu_event_filter filter;
1021  int nr_events;
1022 
1023  /*
1024  * Allow userspace to specify an event filter for the entire
1025  * event range supported by PMUVer of the hardware, rather
1026  * than the guest's PMUVer for KVM backward compatibility.
1027  */
1028  nr_events = __kvm_pmu_event_mask(pmuver) + 1;
1029 
1030  uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
1031 
1032  if (copy_from_user(&filter, uaddr, sizeof(filter)))
1033  return -EFAULT;
1034 
1035  if (((u32)filter.base_event + filter.nevents) > nr_events ||
1036  (filter.action != KVM_PMU_EVENT_ALLOW &&
1037  filter.action != KVM_PMU_EVENT_DENY))
1038  return -EINVAL;
1039 
1040  if (kvm_vm_has_ran_once(kvm))
1041  return -EBUSY;
1042 
1043  if (!kvm->arch.pmu_filter) {
1044  kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
1045  if (!kvm->arch.pmu_filter)
1046  return -ENOMEM;
1047 
1048  /*
1049  * The default depends on the first applied filter.
1050  * If it allows events, the default is to deny.
1051  * Conversely, if the first filter denies a set of
1052  * events, the default is to allow.
1053  */
1054  if (filter.action == KVM_PMU_EVENT_ALLOW)
1055  bitmap_zero(kvm->arch.pmu_filter, nr_events);
1056  else
1057  bitmap_fill(kvm->arch.pmu_filter, nr_events);
1058  }
1059 
1060  if (filter.action == KVM_PMU_EVENT_ALLOW)
1061  bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1062  else
1063  bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents);
1064 
1065  return 0;
1066  }
1067  case KVM_ARM_VCPU_PMU_V3_SET_PMU: {
1068  int __user *uaddr = (int __user *)(long)attr->addr;
1069  int pmu_id;
1070 
1071  if (get_user(pmu_id, uaddr))
1072  return -EFAULT;
1073 
1074  return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id);
1075  }
1076  case KVM_ARM_VCPU_PMU_V3_INIT:
1077  return kvm_arm_pmu_v3_init(vcpu);
1078  }
1079 
1080  return -ENXIO;
1081 }
#define irq_is_spi(irq)
Definition: arm_vgic.h:35
static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id)
Definition: pmu-emul.c:950
u8 kvm_arm_pmu_get_pmuver_limit(void)
Definition: pmu-emul.c:1121
static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:849
static u32 __kvm_pmu_event_mask(unsigned int pmuver)
Definition: pmu-emul.c:39
static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
Definition: pmu-emul.c:883
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_pmu_v3_set_pmu()

static int kvm_arm_pmu_v3_set_pmu ( struct kvm_vcpu *  vcpu,
int  pmu_id 
)
static

Definition at line 950 of file pmu-emul.c.

951 {
952  struct kvm *kvm = vcpu->kvm;
953  struct arm_pmu_entry *entry;
954  struct arm_pmu *arm_pmu;
955  int ret = -ENXIO;
956 
957  lockdep_assert_held(&kvm->arch.config_lock);
958  mutex_lock(&arm_pmus_lock);
959 
960  list_for_each_entry(entry, &arm_pmus, entry) {
961  arm_pmu = entry->arm_pmu;
962  if (arm_pmu->pmu.type == pmu_id) {
963  if (kvm_vm_has_ran_once(kvm) ||
964  (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) {
965  ret = -EBUSY;
966  break;
967  }
968 
969  kvm_arm_set_pmu(kvm, arm_pmu);
970  cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus);
971  ret = 0;
972  break;
973  }
974  }
975 
976  mutex_unlock(&arm_pmus_lock);
977  return ret;
978 }
static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu)
Definition: pmu-emul.c:919
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_set_default_pmu()

int kvm_arm_set_default_pmu ( struct kvm *  kvm)

kvm_arm_set_default_pmu - No PMU set, get the default one. @kvm: The kvm pointer

The observant among you will notice that the supported_cpus mask does not get updated for the default PMU even though it is quite possible the selected instance supports only a subset of cores in the system. This is intentional, and upholds the preexisting behavior on heterogeneous systems where vCPUs can be scheduled on any core but the guest counters could stop working.

Definition at line 939 of file pmu-emul.c.

940 {
941  struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu();
942 
943  if (!arm_pmu)
944  return -ENODEV;
945 
946  kvm_arm_set_pmu(kvm, arm_pmu);
947  return 0;
948 }
static struct arm_pmu * kvm_pmu_probe_armpmu(void)
Definition: pmu-emul.c:720
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_set_pmu()

static void kvm_arm_set_pmu ( struct kvm *  kvm,
struct arm_pmu *  arm_pmu 
)
static

Definition at line 919 of file pmu-emul.c.

920 {
921  lockdep_assert_held(&kvm->arch.config_lock);
922 
923  kvm->arch.arm_pmu = arm_pmu;
924  kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm);
925 }
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
Definition: pmu-emul.c:908
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_host_pmu_init()

void kvm_host_pmu_init ( struct arm_pmu *  pmu)

Definition at line 693 of file pmu-emul.c.

694 {
695  struct arm_pmu_entry *entry;
696 
697  /*
698  * Check the sanitised PMU version for the system, as KVM does not
699  * support implementations where PMUv3 exists on a subset of CPUs.
700  */
701  if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
702  return;
703 
704  mutex_lock(&arm_pmus_lock);
705 
706  entry = kmalloc(sizeof(*entry), GFP_KERNEL);
707  if (!entry)
708  goto out_unlock;
709 
710  entry->arm_pmu = pmu;
711  list_add_tail(&entry->entry, &arm_pmus);
712 
713  if (list_is_singular(&arm_pmus))
714  static_branch_enable(&kvm_arm_pmu_available);
715 
716 out_unlock:
717  mutex_unlock(&arm_pmus_lock);
718 }
Here is the call graph for this function:

◆ kvm_pmc_has_64bit_overflow()

static bool kvm_pmc_has_64bit_overflow ( struct kvm_pmc *  pmc)
static

Definition at line 90 of file pmu-emul.c.

91 {
92  u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc));
93 
94  return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) ||
95  (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC));
96 }
static struct kvm_vcpu * kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
Definition: pmu-emul.c:29
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:1136
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmc_is_64bit()

static bool kvm_pmc_is_64bit ( struct kvm_pmc *  pmc)
static

kvm_pmc_is_64bit - determine if counter is 64bit @pmc: counter context

Definition at line 84 of file pmu-emul.c.

85 {
86  return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
88 }
#define kvm_pmu_is_3p5(vcpu)
Definition: arm_pmu.h:171
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmc_to_vcpu()

static struct kvm_vcpu* kvm_pmc_to_vcpu ( const struct kvm_pmc *  pmc)
static

Definition at line 29 of file pmu-emul.c.

30 {
31  return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
32 }
Here is the caller graph for this function:

◆ kvm_pmu_counter_can_chain()

static bool kvm_pmu_counter_can_chain ( struct kvm_pmc *  pmc)
static

Definition at line 98 of file pmu-emul.c.

99 {
100  return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX &&
102 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_counter_increment()

static void kvm_pmu_counter_increment ( struct kvm_vcpu *  vcpu,
unsigned long  mask,
u32  event 
)
static

Definition at line 440 of file pmu-emul.c.

442 {
443  int i;
444 
445  if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
446  return;
447 
448  /* Weed out disabled counters */
449  mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
450 
451  for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) {
452  struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
453  u64 type, reg;
454 
455  /* Filter on event type */
456  type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i));
457  type &= kvm_pmu_event_mask(vcpu->kvm);
458  if (type != event)
459  continue;
460 
461  /* Increment this counter */
462  reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1;
463  if (!kvm_pmc_is_64bit(pmc))
464  reg = lower_32_bits(reg);
465  __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg;
466 
467  /* No overflow? move on */
468  if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg))
469  continue;
470 
471  /* Mark overflow */
472  __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
473 
474  if (kvm_pmu_counter_can_chain(pmc))
475  kvm_pmu_counter_increment(vcpu, BIT(i + 1),
476  ARMV8_PMUV3_PERFCTR_CHAIN);
477  }
478 }
static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc)
Definition: pmu-emul.c:98
static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, unsigned long mask, u32 event)
Definition: pmu-emul.c:440
static u32 counter_index_to_reg(u64 idx)
Definition: pmu-emul.c:104
static u32 kvm_pmu_event_mask(struct kvm *kvm)
Definition: pmu-emul.c:55
static u32 counter_index_to_evtreg(u64 idx)
Definition: pmu-emul.c:109
static struct kvm_pmc * kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
Definition: pmu-emul.c:34
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_counter_is_enabled()

static bool kvm_pmu_counter_is_enabled ( struct kvm_pmc *  pmc)
static

Definition at line 585 of file pmu-emul.c.

586 {
587  struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
588  return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) &&
589  (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx));
590 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_create_perf_event()

static void kvm_pmu_create_perf_event ( struct kvm_pmc *  pmc)
static

kvm_pmu_create_perf_event - create a perf event for a counter @pmc: Counter context

Definition at line 596 of file pmu-emul.c.

597 {
598  struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
599  struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
600  struct perf_event *event;
601  struct perf_event_attr attr;
602  u64 eventsel, reg, data;
603  bool p, u, nsk, nsu;
604 
605  reg = counter_index_to_evtreg(pmc->idx);
606  data = __vcpu_sys_reg(vcpu, reg);
607 
609  if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
610  eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
611  else
612  eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
613 
614  /*
615  * Neither SW increment nor chained events need to be backed
616  * by a perf event.
617  */
618  if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR ||
619  eventsel == ARMV8_PMUV3_PERFCTR_CHAIN)
620  return;
621 
622  /*
623  * If we have a filter in place and that the event isn't allowed, do
624  * not install a perf event either.
625  */
626  if (vcpu->kvm->arch.pmu_filter &&
627  !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
628  return;
629 
630  p = data & ARMV8_PMU_EXCLUDE_EL1;
631  u = data & ARMV8_PMU_EXCLUDE_EL0;
632  nsk = data & ARMV8_PMU_EXCLUDE_NS_EL1;
633  nsu = data & ARMV8_PMU_EXCLUDE_NS_EL0;
634 
635  memset(&attr, 0, sizeof(struct perf_event_attr));
636  attr.type = arm_pmu->pmu.type;
637  attr.size = sizeof(attr);
638  attr.pinned = 1;
639  attr.disabled = !kvm_pmu_counter_is_enabled(pmc);
640  attr.exclude_user = (u != nsu);
641  attr.exclude_kernel = (p != nsk);
642  attr.exclude_hv = 1; /* Don't count EL2 events */
643  attr.exclude_host = 1; /* Don't count host events */
644  attr.config = eventsel;
645 
646  /*
647  * If counting with a 64bit counter, advertise it to the perf
648  * code, carefully dealing with the initial sample period
649  * which also depends on the overflow.
650  */
651  if (kvm_pmc_is_64bit(pmc))
652  attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT;
653 
654  attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc));
655 
656  event = perf_event_create_kernel_counter(&attr, -1, current,
657  kvm_pmu_perf_overflow, pmc);
658 
659  if (IS_ERR(event)) {
660  pr_err_once("kvm: pmu event creation failed %ld\n",
661  PTR_ERR(event));
662  return;
663  }
664 
665  pmc->perf_event = event;
666 }
static void kvm_pmu_perf_overflow(struct perf_event *perf_event, struct perf_sample_data *data, struct pt_regs *regs)
Definition: pmu-emul.c:496
static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
Definition: pmu-emul.c:481
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
Definition: pmu-emul.c:585
static void kvm_pmu_stop_counter(struct kvm_pmc *pmc)
Definition: pmu-emul.c:209
static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc)
Definition: pmu-emul.c:114
#define PERF_ATTR_CFG1_COUNTER_64BIT
Definition: pmu-emul.c:19
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_disable_counter_mask()

void kvm_pmu_disable_counter_mask ( struct kvm_vcpu *  vcpu,
u64  val 
)

kvm_pmu_disable_counter_mask - disable selected PMU counters @vcpu: The vcpu pointer @val: the value guest writes to PMCNTENCLR register

Call perf_event_disable to stop counting the perf event

Definition at line 319 of file pmu-emul.c.

320 {
321  int i;
322 
323  if (!kvm_vcpu_has_pmu(vcpu) || !val)
324  return;
325 
326  for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
327  struct kvm_pmc *pmc;
328 
329  if (!(val & BIT(i)))
330  continue;
331 
332  pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
333 
334  if (pmc->perf_event)
335  perf_event_disable(pmc->perf_event);
336  }
337 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_enable_counter_mask()

void kvm_pmu_enable_counter_mask ( struct kvm_vcpu *  vcpu,
u64  val 
)

kvm_pmu_enable_counter_mask - enable selected PMU counters @vcpu: The vcpu pointer @val: the value guest writes to PMCNTENSET register

Call perf_event_enable to start counting the perf event

Definition at line 285 of file pmu-emul.c.

286 {
287  int i;
288  if (!kvm_vcpu_has_pmu(vcpu))
289  return;
290 
291  if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
292  return;
293 
294  for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
295  struct kvm_pmc *pmc;
296 
297  if (!(val & BIT(i)))
298  continue;
299 
300  pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
301 
302  if (!pmc->perf_event) {
304  } else {
305  perf_event_enable(pmc->perf_event);
306  if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
307  kvm_debug("fail to enable perf event\n");
308  }
309  }
310 }
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc)
Definition: pmu-emul.c:596
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_event_mask()

static u32 kvm_pmu_event_mask ( struct kvm *  kvm)
static

Definition at line 55 of file pmu-emul.c.

56 {
57  u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
58  u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
59 
60  return __kvm_pmu_event_mask(pmuver);
61 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_evtyper_mask()

u64 kvm_pmu_evtyper_mask ( struct kvm *  kvm)

Definition at line 63 of file pmu-emul.c.

64 {
65  u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
66  kvm_pmu_event_mask(kvm);
67  u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
68 
69  if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
70  mask |= ARMV8_PMU_INCLUDE_EL2;
71 
72  if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0))
73  mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
74  ARMV8_PMU_EXCLUDE_NS_EL1 |
75  ARMV8_PMU_EXCLUDE_EL3;
76 
77  return mask;
78 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_flush_hwstate()

void kvm_pmu_flush_hwstate ( struct kvm_vcpu *  vcpu)

kvm_pmu_flush_hwstate - flush pmu state to cpu @vcpu: The vcpu pointer

Check if the PMU has overflowed while we were running in the host, and inject an interrupt if that was the case.

Definition at line 405 of file pmu-emul.c.

406 {
407  kvm_pmu_update_state(vcpu);
408 }
static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:352
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_get_counter_value()

u64 kvm_pmu_get_counter_value ( struct kvm_vcpu *  vcpu,
u64  select_idx 
)

kvm_pmu_get_counter_value - get PMU counter value @vcpu: The vcpu pointer @select_idx: The counter index

Definition at line 141 of file pmu-emul.c.

142 {
143  if (!kvm_vcpu_has_pmu(vcpu))
144  return 0;
145 
146  return kvm_pmu_get_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx));
147 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_get_pmc_value()

static u64 kvm_pmu_get_pmc_value ( struct kvm_pmc *  pmc)
static

Definition at line 114 of file pmu-emul.c.

115 {
116  struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
117  u64 counter, reg, enabled, running;
118 
119  reg = counter_index_to_reg(pmc->idx);
120  counter = __vcpu_sys_reg(vcpu, reg);
121 
122  /*
123  * The real counter value is equal to the value of counter register plus
124  * the value perf event counts.
125  */
126  if (pmc->perf_event)
127  counter += perf_event_read_value(pmc->perf_event, &enabled,
128  &running);
129 
130  if (!kvm_pmc_is_64bit(pmc))
131  counter = lower_32_bits(counter);
132 
133  return counter;
134 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_get_pmceid()

u64 kvm_pmu_get_pmceid ( struct kvm_vcpu *  vcpu,
bool  pmceid1 
)

Definition at line 760 of file pmu-emul.c.

761 {
762  unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
763  u64 val, mask = 0;
764  int base, i, nr_events;
765 
766  if (!kvm_vcpu_has_pmu(vcpu))
767  return 0;
768 
769  if (!pmceid1) {
770  val = read_sysreg(pmceid0_el0);
771  /* always support CHAIN */
772  val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN);
773  base = 0;
774  } else {
775  val = read_sysreg(pmceid1_el0);
776  /*
777  * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
778  * as RAZ
779  */
780  val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
781  BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
782  BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
783  base = 32;
784  }
785 
786  if (!bmap)
787  return val;
788 
789  nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
790 
791  for (i = 0; i < 32; i += 8) {
792  u64 byte;
793 
794  byte = bitmap_get_value8(bmap, base + i);
795  mask |= byte << i;
796  if (nr_events >= (0x4000 + base + 32)) {
797  byte = bitmap_get_value8(bmap, 0x4000 + base + i);
798  mask |= byte << (32 + i);
799  }
800  }
801 
802  return val & mask;
803 }
static unsigned long base
Definition: early_alloc.c:15
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_handle_pmcr()

void kvm_pmu_handle_pmcr ( struct kvm_vcpu *  vcpu,
u64  val 
)

kvm_pmu_handle_pmcr - handle PMCR register @vcpu: The vcpu pointer @val: the value guest writes to PMCR register

Definition at line 551 of file pmu-emul.c.

552 {
553  int i;
554 
555  if (!kvm_vcpu_has_pmu(vcpu))
556  return;
557 
558  /* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
559  if (!kvm_pmu_is_3p5(vcpu))
560  val &= ~ARMV8_PMU_PMCR_LP;
561 
562  /* The reset bits don't indicate any state, and shouldn't be saved. */
563  __vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
564 
565  if (val & ARMV8_PMU_PMCR_E) {
567  __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
568  } else {
570  __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
571  }
572 
573  if (val & ARMV8_PMU_PMCR_C)
575 
576  if (val & ARMV8_PMU_PMCR_P) {
577  unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
578  mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
579  for_each_set_bit(i, &mask, 32)
580  kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
581  }
583 }
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:268
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
Definition: pmu-emul.c:319
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
Definition: pmu-emul.c:285
static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force)
Definition: pmu-emul.c:149
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
Definition: pmu-emul.c:182
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
Definition: pmu.c:176
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_overflow_status()

static u64 kvm_pmu_overflow_status ( struct kvm_vcpu *  vcpu)
static

Definition at line 339 of file pmu-emul.c.

340 {
341  u64 reg = 0;
342 
343  if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
344  reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
345  reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
346  reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
347  }
348 
349  return reg;
350 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_perf_overflow()

static void kvm_pmu_perf_overflow ( struct perf_event *  perf_event,
struct perf_sample_data *  data,
struct pt_regs *  regs 
)
static

When the perf event overflows, set the overflow status and inform the vcpu.

Definition at line 496 of file pmu-emul.c.

499 {
500  struct kvm_pmc *pmc = perf_event->overflow_handler_context;
501  struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
502  struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
503  int idx = pmc->idx;
504  u64 period;
505 
506  cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
507 
508  /*
509  * Reset the sample period to the architectural limit,
510  * i.e. the point where the counter overflows.
511  */
512  period = compute_period(pmc, local64_read(&perf_event->count));
513 
514  local64_set(&perf_event->hw.period_left, 0);
515  perf_event->attr.sample_period = period;
516  perf_event->hw.sample_period = period;
517 
518  __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
519 
520  if (kvm_pmu_counter_can_chain(pmc))
521  kvm_pmu_counter_increment(vcpu, BIT(idx + 1),
522  ARMV8_PMUV3_PERFCTR_CHAIN);
523 
524  if (kvm_pmu_overflow_status(vcpu)) {
525  kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
526 
527  if (!in_nmi())
528  kvm_vcpu_kick(vcpu);
529  else
530  irq_work_queue(&vcpu->arch.pmu.overflow_work);
531  }
532 
533  cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
534 }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3931
static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
Definition: pmu-emul.c:339
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_perf_overflow_notify_vcpu()

static void kvm_pmu_perf_overflow_notify_vcpu ( struct irq_work *  work)
static

When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding to the event. This is why we need a callback to do it once outside of the NMI context.

Definition at line 427 of file pmu-emul.c.

428 {
429  struct kvm_vcpu *vcpu;
430 
431  vcpu = container_of(work, struct kvm_vcpu, arch.pmu.overflow_work);
432  kvm_vcpu_kick(vcpu);
433 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_probe_armpmu()

static struct arm_pmu* kvm_pmu_probe_armpmu ( void  )
static

Definition at line 720 of file pmu-emul.c.

721 {
722  struct arm_pmu *tmp, *pmu = NULL;
723  struct arm_pmu_entry *entry;
724  int cpu;
725 
726  mutex_lock(&arm_pmus_lock);
727 
728  /*
729  * It is safe to use a stale cpu to iterate the list of PMUs so long as
730  * the same value is used for the entirety of the loop. Given this, and
731  * the fact that no percpu data is used for the lookup there is no need
732  * to disable preemption.
733  *
734  * It is still necessary to get a valid cpu, though, to probe for the
735  * default PMU instance as userspace is not required to specify a PMU
736  * type. In order to uphold the preexisting behavior KVM selects the
737  * PMU instance for the core during vcpu init. A dependent use
738  * case would be a user with disdain of all things big.LITTLE that
739  * affines the VMM to a particular cluster of cores.
740  *
741  * In any case, userspace should just do the sane thing and use the UAPI
742  * to select a PMU type directly. But, be wary of the baggage being
743  * carried here.
744  */
745  cpu = raw_smp_processor_id();
746  list_for_each_entry(entry, &arm_pmus, entry) {
747  tmp = entry->arm_pmu;
748 
749  if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
750  pmu = tmp;
751  break;
752  }
753  }
754 
755  mutex_unlock(&arm_pmus_lock);
756 
757  return pmu;
758 }
Here is the caller graph for this function:

◆ kvm_pmu_release_perf_event()

static void kvm_pmu_release_perf_event ( struct kvm_pmc *  pmc)
static

kvm_pmu_release_perf_event - remove the perf event @pmc: The PMU counter pointer

Definition at line 194 of file pmu-emul.c.

195 {
196  if (pmc->perf_event) {
197  perf_event_disable(pmc->perf_event);
198  perf_event_release_kernel(pmc->perf_event);
199  pmc->perf_event = NULL;
200  }
201 }
Here is the caller graph for this function:

◆ kvm_pmu_set_counter_event_type()

void kvm_pmu_set_counter_event_type ( struct kvm_vcpu *  vcpu,
u64  data,
u64  select_idx 
)

kvm_pmu_set_counter_event_type - set selected counter to monitor some event @vcpu: The vcpu pointer @data: The data guest writes to PMXEVTYPER_EL0 @select_idx: The number of selected counter

When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an event with given hardware event number. Here we call perf_event API to emulate this action and create a kernel perf event for it.

Definition at line 678 of file pmu-emul.c.

680 {
681  struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx);
682  u64 reg;
683 
684  if (!kvm_vcpu_has_pmu(vcpu))
685  return;
686 
687  reg = counter_index_to_evtreg(pmc->idx);
688  __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm);
689 
691 }
u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
Definition: pmu-emul.c:63
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_set_counter_value()

void kvm_pmu_set_counter_value ( struct kvm_vcpu *  vcpu,
u64  select_idx,
u64  val 
)

kvm_pmu_set_counter_value - set PMU counter value @vcpu: The vcpu pointer @select_idx: The counter index @val: The counter value

Definition at line 182 of file pmu-emul.c.

183 {
184  if (!kvm_vcpu_has_pmu(vcpu))
185  return;
186 
187  kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, select_idx), val, false);
188 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_set_pmc_value()

static void kvm_pmu_set_pmc_value ( struct kvm_pmc *  pmc,
u64  val,
bool  force 
)
static

Definition at line 149 of file pmu-emul.c.

150 {
151  struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
152  u64 reg;
153 
155 
156  reg = counter_index_to_reg(pmc->idx);
157 
158  if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX &&
159  !force) {
160  /*
161  * Even with PMUv3p5, AArch32 cannot write to the top
162  * 32bit of the counters. The only possible course of
163  * action is to use PMCR.P, which will reset them to
164  * 0 (the only use of the 'force' parameter).
165  */
166  val = __vcpu_sys_reg(vcpu, reg) & GENMASK(63, 32);
167  val |= lower_32_bits(val);
168  }
169 
170  __vcpu_sys_reg(vcpu, reg) = val;
171 
172  /* Recreate the perf event to reflect the updated sample_period */
174 }
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
Definition: pmu-emul.c:194
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_should_notify_user()

bool kvm_pmu_should_notify_user ( struct kvm_vcpu *  vcpu)

Definition at line 373 of file pmu-emul.c.

374 {
375  struct kvm_pmu *pmu = &vcpu->arch.pmu;
376  struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
377  bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
378 
379  if (likely(irqchip_in_kernel(vcpu->kvm)))
380  return false;
381 
382  return pmu->irq_level != run_level;
383 }
Here is the caller graph for this function:

◆ kvm_pmu_software_increment()

void kvm_pmu_software_increment ( struct kvm_vcpu *  vcpu,
u64  val 
)

kvm_pmu_software_increment - do software increment @vcpu: The vcpu pointer @val: the value guest writes to PMSWINC register

Definition at line 541 of file pmu-emul.c.

542 {
543  kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR);
544 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_stop_counter()

static void kvm_pmu_stop_counter ( struct kvm_pmc *  pmc)
static

kvm_pmu_stop_counter - stop PMU counter @pmc: The PMU counter pointer

If this counter has been configured to monitor some event, release it here.

Definition at line 209 of file pmu-emul.c.

210 {
211  struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
212  u64 reg, val;
213 
214  if (!pmc->perf_event)
215  return;
216 
217  val = kvm_pmu_get_pmc_value(pmc);
218 
219  reg = counter_index_to_reg(pmc->idx);
220 
221  __vcpu_sys_reg(vcpu, reg) = val;
222 
224 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_sync_hwstate()

void kvm_pmu_sync_hwstate ( struct kvm_vcpu *  vcpu)

kvm_pmu_sync_hwstate - sync pmu state from cpu @vcpu: The vcpu pointer

Check if the PMU has overflowed while we were running in the guest, and inject an interrupt if that was the case.

Definition at line 417 of file pmu-emul.c.

418 {
419  kvm_pmu_update_state(vcpu);
420 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_update_run()

void kvm_pmu_update_run ( struct kvm_vcpu *  vcpu)

Definition at line 388 of file pmu-emul.c.

389 {
390  struct kvm_sync_regs *regs = &vcpu->run->s.regs;
391 
392  /* Populate the timer bitmap for user space */
393  regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
394  if (vcpu->arch.pmu.irq_level)
395  regs->device_irq_level |= KVM_ARM_DEV_PMU;
396 }
Here is the caller graph for this function:

◆ kvm_pmu_update_state()

static void kvm_pmu_update_state ( struct kvm_vcpu *  vcpu)
static

Definition at line 352 of file pmu-emul.c.

353 {
354  struct kvm_pmu *pmu = &vcpu->arch.pmu;
355  bool overflow;
356 
357  if (!kvm_vcpu_has_pmu(vcpu))
358  return;
359 
360  overflow = !!kvm_pmu_overflow_status(vcpu);
361  if (pmu->irq_level == overflow)
362  return;
363 
364  pmu->irq_level = overflow;
365 
366  if (likely(irqchip_in_kernel(vcpu->kvm))) {
367  int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu,
368  pmu->irq_num, overflow, pmu);
369  WARN_ON(ret);
370  }
371 }
int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned int intid, bool level, void *owner)
Definition: vgic.c:439
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_valid_counter_mask()

u64 kvm_pmu_valid_counter_mask ( struct kvm_vcpu *  vcpu)

Definition at line 268 of file pmu-emul.c.

269 {
270  u64 val = FIELD_GET(ARMV8_PMU_PMCR_N, kvm_vcpu_read_pmcr(vcpu));
271 
272  if (val == 0)
273  return BIT(ARMV8_PMU_CYCLE_IDX);
274  else
275  return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
276 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_vcpu_destroy()

void kvm_pmu_vcpu_destroy ( struct kvm_vcpu *  vcpu)

kvm_pmu_vcpu_destroy - free perf event of PMU for cpu @vcpu: The vcpu pointer

Definition at line 259 of file pmu-emul.c.

260 {
261  int i;
262 
263  for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
265  irq_work_sync(&vcpu->arch.pmu.overflow_work);
266 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_vcpu_init()

void kvm_pmu_vcpu_init ( struct kvm_vcpu *  vcpu)

kvm_pmu_vcpu_init - assign pmu counter idx for cpu @vcpu: The vcpu pointer

Definition at line 231 of file pmu-emul.c.

232 {
233  int i;
234  struct kvm_pmu *pmu = &vcpu->arch.pmu;
235 
236  for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
237  pmu->pmc[i].idx = i;
238 }
Here is the caller graph for this function:

◆ kvm_pmu_vcpu_reset()

void kvm_pmu_vcpu_reset ( struct kvm_vcpu *  vcpu)

kvm_pmu_vcpu_reset - reset pmu state for cpu @vcpu: The vcpu pointer

Definition at line 245 of file pmu-emul.c.

246 {
247  unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
248  int i;
249 
250  for_each_set_bit(i, &mask, 32)
252 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_idx_to_pmc()

static struct kvm_pmc* kvm_vcpu_idx_to_pmc ( struct kvm_vcpu *  vcpu,
int  cnt_idx 
)
static

Definition at line 34 of file pmu-emul.c.

35 {
36  return &vcpu->arch.pmu.pmc[cnt_idx];
37 }
Here is the caller graph for this function:

◆ kvm_vcpu_read_pmcr()

u64 kvm_vcpu_read_pmcr ( struct kvm_vcpu *  vcpu)

kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU @vcpu: The vcpu pointer

Definition at line 1136 of file pmu-emul.c.

1137 {
1138  u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
1139 
1140  return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
1141 }
Here is the caller graph for this function:

◆ kvm_vcpu_reload_pmu()

void kvm_vcpu_reload_pmu ( struct kvm_vcpu *  vcpu)

Definition at line 805 of file pmu-emul.c.

806 {
807  u64 mask = kvm_pmu_valid_counter_mask(vcpu);
808 
810 
811  __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
812  __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
813  __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
814 }
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
Definition: pmu-emul.c:551
Here is the call graph for this function:
Here is the caller graph for this function:

◆ LIST_HEAD()

static LIST_HEAD ( arm_pmus  )
static

◆ pmu_irq_is_valid()

static bool pmu_irq_is_valid ( struct kvm *  kvm,
int  irq 
)
static

Definition at line 883 of file pmu-emul.c.

884 {
885  unsigned long i;
886  struct kvm_vcpu *vcpu;
887 
888  kvm_for_each_vcpu(i, vcpu, kvm) {
889  if (!kvm_arm_pmu_irq_initialized(vcpu))
890  continue;
891 
892  if (irq_is_ppi(irq)) {
893  if (vcpu->arch.pmu.irq_num != irq)
894  return false;
895  } else {
896  if (vcpu->arch.pmu.irq_num == irq)
897  return false;
898  }
899  }
900 
901  return true;
902 }
Here is the caller graph for this function: