KVM
Classes | Macros | Functions | Variables
pmu.h File Reference
#include <linux/nospec.h>
Include dependency graph for pmu.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  kvm_pmu_ops
 

Macros

#define vcpu_to_pmu(vcpu)   (&(vcpu)->arch.pmu)
 
#define pmu_to_vcpu(pmu)   (container_of((pmu), struct kvm_vcpu, arch.pmu))
 
#define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
 
#define MSR_IA32_MISC_ENABLE_PMU_RO_MASK
 
#define fixed_ctrl_field(ctrl_reg, idx)   (((ctrl_reg) >> ((idx)*4)) & 0xf)
 
#define VMWARE_BACKDOOR_PMC_HOST_TSC   0x10000
 
#define VMWARE_BACKDOOR_PMC_REAL_TIME   0x10001
 
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME   0x10002
 

Functions

void kvm_pmu_ops_update (const struct kvm_pmu_ops *pmu_ops)
 
static bool kvm_pmu_has_perf_global_ctrl (struct kvm_pmu *pmu)
 
static u64 pmc_bitmask (struct kvm_pmc *pmc)
 
static u64 pmc_read_counter (struct kvm_pmc *pmc)
 
void pmc_write_counter (struct kvm_pmc *pmc, u64 val)
 
static bool pmc_is_gp (struct kvm_pmc *pmc)
 
static bool pmc_is_fixed (struct kvm_pmc *pmc)
 
static bool kvm_valid_perf_global_ctrl (struct kvm_pmu *pmu, u64 data)
 
static struct kvm_pmc * get_gp_pmc (struct kvm_pmu *pmu, u32 msr, u32 base)
 
static struct kvm_pmc * get_fixed_pmc (struct kvm_pmu *pmu, u32 msr)
 
static bool pmc_speculative_in_use (struct kvm_pmc *pmc)
 
static void kvm_init_pmu_capability (const struct kvm_pmu_ops *pmu_ops)
 
static void kvm_pmu_request_counter_reprogram (struct kvm_pmc *pmc)
 
static void reprogram_counters (struct kvm_pmu *pmu, u64 diff)
 
static bool pmc_is_globally_enabled (struct kvm_pmc *pmc)
 
void kvm_pmu_deliver_pmi (struct kvm_vcpu *vcpu)
 
void kvm_pmu_handle_event (struct kvm_vcpu *vcpu)
 
int kvm_pmu_rdpmc (struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
 
bool kvm_pmu_is_valid_rdpmc_ecx (struct kvm_vcpu *vcpu, unsigned int idx)
 
bool kvm_pmu_is_valid_msr (struct kvm_vcpu *vcpu, u32 msr)
 
int kvm_pmu_get_msr (struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
int kvm_pmu_set_msr (struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
void kvm_pmu_refresh (struct kvm_vcpu *vcpu)
 
void kvm_pmu_init (struct kvm_vcpu *vcpu)
 
void kvm_pmu_cleanup (struct kvm_vcpu *vcpu)
 
void kvm_pmu_destroy (struct kvm_vcpu *vcpu)
 
int kvm_vm_ioctl_set_pmu_event_filter (struct kvm *kvm, void __user *argp)
 
void kvm_pmu_trigger_event (struct kvm_vcpu *vcpu, u64 perf_hw_id)
 
bool is_vmware_backdoor_pmc (u32 pmc_idx)
 

Variables

struct x86_pmu_capability kvm_pmu_cap
 
struct kvm_pmu_ops intel_pmu_ops
 
struct kvm_pmu_ops amd_pmu_ops
 

Macro Definition Documentation

◆ fixed_ctrl_field

#define fixed_ctrl_field (   ctrl_reg,
  idx 
)    (((ctrl_reg) >> ((idx)*4)) & 0xf)

Definition at line 15 of file pmu.h.

◆ MSR_IA32_MISC_ENABLE_PMU_RO_MASK

#define MSR_IA32_MISC_ENABLE_PMU_RO_MASK
Value:
(MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \
MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)

Definition at line 11 of file pmu.h.

◆ pmc_to_pmu

#define pmc_to_pmu (   pmc)    (&(pmc)->vcpu->arch.pmu)

Definition at line 9 of file pmu.h.

◆ pmu_to_vcpu

#define pmu_to_vcpu (   pmu)    (container_of((pmu), struct kvm_vcpu, arch.pmu))

Definition at line 8 of file pmu.h.

◆ vcpu_to_pmu

#define vcpu_to_pmu (   vcpu)    (&(vcpu)->arch.pmu)

Definition at line 7 of file pmu.h.

◆ VMWARE_BACKDOOR_PMC_APPARENT_TIME

#define VMWARE_BACKDOOR_PMC_APPARENT_TIME   0x10002

Definition at line 19 of file pmu.h.

◆ VMWARE_BACKDOOR_PMC_HOST_TSC

#define VMWARE_BACKDOOR_PMC_HOST_TSC   0x10000

Definition at line 17 of file pmu.h.

◆ VMWARE_BACKDOOR_PMC_REAL_TIME

#define VMWARE_BACKDOOR_PMC_REAL_TIME   0x10001

Definition at line 18 of file pmu.h.

Function Documentation

◆ get_fixed_pmc()

static struct kvm_pmc* get_fixed_pmc ( struct kvm_pmu pmu,
u32  msr 
)
inlinestatic

Definition at line 114 of file pmu.h.

115 {
116  int base = MSR_CORE_PERF_FIXED_CTR0;
117 
118  if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
119  u32 index = array_index_nospec(msr - base,
120  pmu->nr_arch_fixed_counters);
121 
122  return &pmu->fixed_counters[index];
123  }
124 
125  return NULL;
126 }
Here is the caller graph for this function:

◆ get_gp_pmc()

static struct kvm_pmc* get_gp_pmc ( struct kvm_pmu pmu,
u32  msr,
u32  base 
)
inlinestatic

Definition at line 100 of file pmu.h.

102 {
103  if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
104  u32 index = array_index_nospec(msr - base,
105  pmu->nr_arch_gp_counters);
106 
107  return &pmu->gp_counters[index];
108  }
109 
110  return NULL;
111 }
Here is the caller graph for this function:

◆ is_vmware_backdoor_pmc()

bool is_vmware_backdoor_pmc ( u32  pmc_idx)

Definition at line 534 of file pmu.c.

535 {
536  switch (pmc_idx) {
540  return true;
541  }
542  return false;
543 }
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME
Definition: pmu.h:19
#define VMWARE_BACKDOOR_PMC_HOST_TSC
Definition: pmu.h:17
#define VMWARE_BACKDOOR_PMC_REAL_TIME
Definition: pmu.h:18
Here is the caller graph for this function:

◆ kvm_init_pmu_capability()

static void kvm_init_pmu_capability ( const struct kvm_pmu_ops pmu_ops)
inlinestatic

Definition at line 141 of file pmu.h.

142 {
143  bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
144  int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
145 
146  /*
147  * Hybrid PMUs don't play nice with virtualization without careful
148  * configuration by userspace, and KVM's APIs for reporting supported
149  * vPMU features do not account for hybrid PMUs. Disable vPMU support
150  * for hybrid PMUs until KVM gains a way to let userspace opt-in.
151  */
152  if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
153  enable_pmu = false;
154 
155  if (enable_pmu) {
156  perf_get_x86_pmu_capability(&kvm_pmu_cap);
157 
158  /*
159  * WARN if perf did NOT disable hardware PMU if the number of
160  * architecturally required GP counters aren't present, i.e. if
161  * there are a non-zero number of counters, but fewer than what
162  * is architecturally required.
163  */
164  if (!kvm_pmu_cap.num_counters_gp ||
165  WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
166  enable_pmu = false;
167  else if (is_intel && !kvm_pmu_cap.version)
168  enable_pmu = false;
169  }
170 
171  if (!enable_pmu) {
172  memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
173  return;
174  }
175 
176  kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
177  kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
178  pmu_ops->MAX_NR_GP_COUNTERS);
179  kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
180  KVM_PMC_MAX_FIXED);
181 }
struct x86_pmu_capability kvm_pmu_cap
Definition: pmu.c:29
const int MAX_NR_GP_COUNTERS
Definition: pmu.h:38
const int MIN_NR_GP_COUNTERS
Definition: pmu.h:39
bool __read_mostly enable_pmu
Definition: x86.c:192
Here is the caller graph for this function:

◆ kvm_pmu_cleanup()

void kvm_pmu_cleanup ( struct kvm_vcpu *  vcpu)

Definition at line 767 of file pmu.c.

768 {
769  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
770  struct kvm_pmc *pmc = NULL;
771  DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
772  int i;
773 
774  pmu->need_cleanup = false;
775 
776  bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
777  pmu->pmc_in_use, X86_PMC_IDX_MAX);
778 
779  for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
780  pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
781 
782  if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
783  pmc_stop_counter(pmc);
784  }
785 
786  static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
787 
788  bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
789 }
static void pmc_stop_counter(struct kvm_pmc *pmc)
Definition: pmu.c:285
static bool pmc_speculative_in_use(struct kvm_pmc *pmc)
Definition: pmu.h:128
#define vcpu_to_pmu(vcpu)
Definition: pmu.h:7
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_deliver_pmi()

void kvm_pmu_deliver_pmi ( struct kvm_vcpu *  vcpu)

Definition at line 594 of file pmu.c.

595 {
596  if (lapic_in_kernel(vcpu)) {
597  static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
598  kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
599  }
600 }
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
Definition: lapic.c:2762
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
Definition: lapic.h:186
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_destroy()

void kvm_pmu_destroy ( struct kvm_vcpu *  vcpu)

Definition at line 791 of file pmu.c.

792 {
793  kvm_pmu_reset(vcpu);
794 }
static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
Definition: pmu.c:709
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_get_msr()

int kvm_pmu_get_msr ( struct kvm_vcpu *  vcpu,
struct msr_data *  msr_info 
)

Definition at line 625 of file pmu.c.

626 {
627  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
628  u32 msr = msr_info->index;
629 
630  switch (msr) {
631  case MSR_CORE_PERF_GLOBAL_STATUS:
632  case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
633  msr_info->data = pmu->global_status;
634  break;
635  case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
636  case MSR_CORE_PERF_GLOBAL_CTRL:
637  msr_info->data = pmu->global_ctrl;
638  break;
639  case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
640  case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
641  msr_info->data = 0;
642  break;
643  default:
644  return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
645  }
646 
647  return 0;
648 }
Here is the caller graph for this function:

◆ kvm_pmu_handle_event()

void kvm_pmu_handle_event ( struct kvm_vcpu *  vcpu)

Definition at line 503 of file pmu.c.

504 {
505  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
506  int bit;
507 
508  for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
509  struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
510 
511  if (unlikely(!pmc)) {
512  clear_bit(bit, pmu->reprogram_pmi);
513  continue;
514  }
515 
516  reprogram_counter(pmc);
517  }
518 
519  /*
520  * Unused perf_events are only released if the corresponding MSRs
521  * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
522  * triggers KVM_REQ_PMU if cleanup is needed.
523  */
524  if (unlikely(pmu->need_cleanup))
525  kvm_pmu_cleanup(vcpu);
526 }
static void reprogram_counter(struct kvm_pmc *pmc)
Definition: pmu.c:448
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
Definition: pmu.c:767
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_has_perf_global_ctrl()

static bool kvm_pmu_has_perf_global_ctrl ( struct kvm_pmu pmu)
inlinestatic

Definition at line 44 of file pmu.h.

45 {
46  /*
47  * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
48  * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
49  * greater than zero. However, KVM only exposes and emulates the MSR
50  * to/for the guest if the guest PMU supports at least "Architectural
51  * Performance Monitoring Version 2".
52  *
53  * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
54  */
55  return pmu->version > 1;
56 }
Here is the caller graph for this function:

◆ kvm_pmu_init()

void kvm_pmu_init ( struct kvm_vcpu *  vcpu)

Definition at line 757 of file pmu.c.

758 {
759  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
760 
761  memset(pmu, 0, sizeof(*pmu));
762  static_call(kvm_x86_pmu_init)(vcpu);
763  kvm_pmu_refresh(vcpu);
764 }
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
Definition: pmu.c:742
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_is_valid_msr()

bool kvm_pmu_is_valid_msr ( struct kvm_vcpu *  vcpu,
u32  msr 
)

Definition at line 602 of file pmu.c.

603 {
604  switch (msr) {
605  case MSR_CORE_PERF_GLOBAL_STATUS:
606  case MSR_CORE_PERF_GLOBAL_CTRL:
607  case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
609  default:
610  break;
611  }
612  return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
613  static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
614 }
static bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
Definition: pmu.h:44
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_is_valid_rdpmc_ecx()

bool kvm_pmu_is_valid_rdpmc_ecx ( struct kvm_vcpu *  vcpu,
unsigned int  idx 
)

Definition at line 529 of file pmu.c.

530 {
531  return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
532 }
Here is the caller graph for this function:

◆ kvm_pmu_ops_update()

void kvm_pmu_ops_update ( const struct kvm_pmu_ops pmu_ops)

Definition at line 83 of file pmu.c.

84 {
85  memcpy(&kvm_pmu_ops, pmu_ops, sizeof(kvm_pmu_ops));
86 
87 #define __KVM_X86_PMU_OP(func) \
88  static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
89 #define KVM_X86_PMU_OP(func) \
90  WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
91 #define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
92 #include <asm/kvm-x86-pmu-ops.h>
93 #undef __KVM_X86_PMU_OP
94 }
Here is the caller graph for this function:

◆ kvm_pmu_rdpmc()

int kvm_pmu_rdpmc ( struct kvm_vcpu *  vcpu,
unsigned  pmc,
u64 *  data 
)

Definition at line 568 of file pmu.c.

569 {
570  bool fast_mode = idx & (1u << 31);
571  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
572  struct kvm_pmc *pmc;
573  u64 mask = fast_mode ? ~0u : ~0ull;
574 
575  if (!pmu->version)
576  return 1;
577 
578  if (is_vmware_backdoor_pmc(idx))
579  return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
580 
581  pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
582  if (!pmc)
583  return 1;
584 
585  if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCE) &&
586  (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
587  kvm_is_cr0_bit_set(vcpu, X86_CR0_PE))
588  return 1;
589 
590  *data = pmc_read_counter(pmc) & mask;
591  return 0;
592 }
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, unsigned long cr0_bit)
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
bool is_vmware_backdoor_pmc(u32 pmc_idx)
Definition: pmu.c:534
static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
Definition: pmu.c:545
static u64 pmc_read_counter(struct kvm_pmc *pmc)
Definition: pmu.h:65
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_refresh()

void kvm_pmu_refresh ( struct kvm_vcpu *  vcpu)

Definition at line 742 of file pmu.c.

743 {
744  if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
745  return;
746 
747  /*
748  * Stop/release all existing counters/events before realizing the new
749  * vPMU model.
750  */
751  kvm_pmu_reset(vcpu);
752 
753  bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
754  static_call(kvm_x86_pmu_refresh)(vcpu);
755 }
static bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)
Definition: x86.h:95
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_request_counter_reprogram()

static void kvm_pmu_request_counter_reprogram ( struct kvm_pmc *  pmc)
inlinestatic

Definition at line 183 of file pmu.h.

184 {
185  set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
186  kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
187 }
#define pmc_to_pmu(pmc)
Definition: pmu.h:9
Here is the caller graph for this function:

◆ kvm_pmu_set_msr()

int kvm_pmu_set_msr ( struct kvm_vcpu *  vcpu,
struct msr_data *  msr_info 
)

Definition at line 650 of file pmu.c.

651 {
652  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
653  u32 msr = msr_info->index;
654  u64 data = msr_info->data;
655  u64 diff;
656 
657  /*
658  * Note, AMD ignores writes to reserved bits and read-only PMU MSRs,
659  * whereas Intel generates #GP on attempts to write reserved/RO MSRs.
660  */
661  switch (msr) {
662  case MSR_CORE_PERF_GLOBAL_STATUS:
663  if (!msr_info->host_initiated)
664  return 1; /* RO MSR */
665  fallthrough;
666  case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
667  /* Per PPR, Read-only MSR. Writes are ignored. */
668  if (!msr_info->host_initiated)
669  break;
670 
671  if (data & pmu->global_status_mask)
672  return 1;
673 
674  pmu->global_status = data;
675  break;
676  case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
677  data &= ~pmu->global_ctrl_mask;
678  fallthrough;
679  case MSR_CORE_PERF_GLOBAL_CTRL:
680  if (!kvm_valid_perf_global_ctrl(pmu, data))
681  return 1;
682 
683  if (pmu->global_ctrl != data) {
684  diff = pmu->global_ctrl ^ data;
685  pmu->global_ctrl = data;
686  reprogram_counters(pmu, diff);
687  }
688  break;
689  case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
690  /*
691  * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
692  * GLOBAL_STATUS, and so the set of reserved bits is the same.
693  */
694  if (data & pmu->global_status_mask)
695  return 1;
696  fallthrough;
697  case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
698  if (!msr_info->host_initiated)
699  pmu->global_status &= ~data;
700  break;
701  default:
702  kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
703  return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
704  }
705 
706  return 0;
707 }
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
Definition: pmu.c:616
static bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, u64 data)
Definition: pmu.h:90
static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
Definition: pmu.h:189
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pmu_trigger_event()

void kvm_pmu_trigger_event ( struct kvm_vcpu *  vcpu,
u64  perf_hw_id 
)

Definition at line 828 of file pmu.c.

829 {
830  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
831  struct kvm_pmc *pmc;
832  int i;
833 
834  for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
835  pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
836 
837  if (!pmc || !pmc_event_is_allowed(pmc))
838  continue;
839 
840  /* Ignore checks for edge detect, pin control, invert and CMASK bits */
841  if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
843  }
844 }
static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
Definition: pmu.c:796
static bool cpl_is_matched(struct kvm_pmc *pmc)
Definition: pmu.c:809
static bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, unsigned int perf_hw_id)
Definition: pmu.c:802
static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
Definition: pmu.c:441
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_valid_perf_global_ctrl()

static bool kvm_valid_perf_global_ctrl ( struct kvm_pmu pmu,
u64  data 
)
inlinestatic

Definition at line 90 of file pmu.h.

92 {
93  return !(pmu->global_ctrl_mask & data);
94 }
Here is the caller graph for this function:

◆ kvm_vm_ioctl_set_pmu_event_filter()

int kvm_vm_ioctl_set_pmu_event_filter ( struct kvm *  kvm,
void __user *  argp 
)

Definition at line 928 of file pmu.c.

929 {
930  struct kvm_pmu_event_filter __user *user_filter = argp;
931  struct kvm_x86_pmu_event_filter *filter;
932  struct kvm_pmu_event_filter tmp;
933  struct kvm_vcpu *vcpu;
934  unsigned long i;
935  size_t size;
936  int r;
937 
938  if (copy_from_user(&tmp, user_filter, sizeof(tmp)))
939  return -EFAULT;
940 
941  if (tmp.action != KVM_PMU_EVENT_ALLOW &&
942  tmp.action != KVM_PMU_EVENT_DENY)
943  return -EINVAL;
944 
945  if (tmp.flags & ~KVM_PMU_EVENT_FLAGS_VALID_MASK)
946  return -EINVAL;
947 
948  if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
949  return -E2BIG;
950 
951  size = struct_size(filter, events, tmp.nevents);
952  filter = kzalloc(size, GFP_KERNEL_ACCOUNT);
953  if (!filter)
954  return -ENOMEM;
955 
956  filter->action = tmp.action;
957  filter->nevents = tmp.nevents;
958  filter->fixed_counter_bitmap = tmp.fixed_counter_bitmap;
959  filter->flags = tmp.flags;
960 
961  r = -EFAULT;
962  if (copy_from_user(filter->events, user_filter->events,
963  sizeof(filter->events[0]) * filter->nevents))
964  goto cleanup;
965 
966  r = prepare_filter_lists(filter);
967  if (r)
968  goto cleanup;
969 
970  mutex_lock(&kvm->lock);
971  filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
972  mutex_is_locked(&kvm->lock));
973  mutex_unlock(&kvm->lock);
974  synchronize_srcu_expedited(&kvm->srcu);
975 
976  BUILD_BUG_ON(sizeof(((struct kvm_pmu *)0)->reprogram_pmi) >
977  sizeof(((struct kvm_pmu *)0)->__reprogram_pmi));
978 
979  kvm_for_each_vcpu(i, vcpu, kvm)
980  atomic64_set(&vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
981 
982  kvm_make_all_cpus_request(kvm, KVM_REQ_PMU);
983 
984  r = 0;
985 cleanup:
986  kfree(filter);
987  return r;
988 }
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
Definition: kvm_main.c:340
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS
Definition: pmu.c:27
static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter)
Definition: pmu.c:892
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pmc_bitmask()

static u64 pmc_bitmask ( struct kvm_pmc *  pmc)
inlinestatic

Definition at line 58 of file pmu.h.

59 {
60  struct kvm_pmu *pmu = pmc_to_pmu(pmc);
61 
62  return pmu->counter_bitmask[pmc->type];
63 }
Here is the caller graph for this function:

◆ pmc_is_fixed()

static bool pmc_is_fixed ( struct kvm_pmc *  pmc)
inlinestatic

Definition at line 85 of file pmu.h.

86 {
87  return pmc->type == KVM_PMC_FIXED;
88 }
Here is the caller graph for this function:

◆ pmc_is_globally_enabled()

static bool pmc_is_globally_enabled ( struct kvm_pmc *  pmc)
inlinestatic

Definition at line 206 of file pmu.h.

207 {
208  struct kvm_pmu *pmu = pmc_to_pmu(pmc);
209 
211  return true;
212 
213  return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
214 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pmc_is_gp()

static bool pmc_is_gp ( struct kvm_pmc *  pmc)
inlinestatic

Definition at line 80 of file pmu.h.

81 {
82  return pmc->type == KVM_PMC_GP;
83 }
Here is the caller graph for this function:

◆ pmc_read_counter()

static u64 pmc_read_counter ( struct kvm_pmc *  pmc)
inlinestatic

Definition at line 65 of file pmu.h.

66 {
67  u64 counter, enabled, running;
68 
69  counter = pmc->counter + pmc->emulated_counter;
70 
71  if (pmc->perf_event && !pmc->is_paused)
72  counter += perf_event_read_value(pmc->perf_event,
73  &enabled, &running);
74  /* FIXME: Scaling needed? */
75  return counter & pmc_bitmask(pmc);
76 }
static u64 pmc_bitmask(struct kvm_pmc *pmc)
Definition: pmu.h:58
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pmc_speculative_in_use()

static bool pmc_speculative_in_use ( struct kvm_pmc *  pmc)
inlinestatic

Definition at line 128 of file pmu.h.

129 {
130  struct kvm_pmu *pmu = pmc_to_pmu(pmc);
131 
132  if (pmc_is_fixed(pmc))
133  return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
134  pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
135 
136  return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
137 }
static bool pmc_is_fixed(struct kvm_pmc *pmc)
Definition: pmu.h:85
#define fixed_ctrl_field(ctrl_reg, idx)
Definition: pmu.h:15
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pmc_write_counter()

void pmc_write_counter ( struct kvm_pmc *  pmc,
u64  val 
)

Definition at line 303 of file pmu.c.

304 {
305  /*
306  * Drop any unconsumed accumulated counts, the WRMSR is a write, not a
307  * read-modify-write. Adjust the counter value so that its value is
308  * relative to the current count, as reading the current count from
309  * perf is faster than pausing and repgrogramming the event in order to
310  * reset it to '0'. Note, this very sneakily offsets the accumulated
311  * emulated count too, by using pmc_read_counter()!
312  */
313  pmc->emulated_counter = 0;
314  pmc->counter += val - pmc_read_counter(pmc);
315  pmc->counter &= pmc_bitmask(pmc);
317 }
static void pmc_update_sample_period(struct kvm_pmc *pmc)
Definition: pmu.c:293
Here is the call graph for this function:
Here is the caller graph for this function:

◆ reprogram_counters()

static void reprogram_counters ( struct kvm_pmu pmu,
u64  diff 
)
inlinestatic

Definition at line 189 of file pmu.h.

190 {
191  int bit;
192 
193  if (!diff)
194  return;
195 
196  for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
197  set_bit(bit, pmu->reprogram_pmi);
198  kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
199 }
#define pmu_to_vcpu(pmu)
Definition: pmu.h:8
Here is the caller graph for this function:

Variable Documentation

◆ amd_pmu_ops

struct kvm_pmu_ops amd_pmu_ops
extern

◆ intel_pmu_ops

struct kvm_pmu_ops intel_pmu_ops
extern

◆ kvm_pmu_cap

struct x86_pmu_capability kvm_pmu_cap
extern

Definition at line 1 of file pmu.c.