KVM
Macros | Enumerations | Functions | Variables
pmu_intel.c File Reference
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <asm/perf_event.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "nested.h"
#include "pmu.h"
Include dependency graph for pmu_intel.c:

Go to the source code of this file.

Macros

#define pr_fmt(fmt)   KBUILD_MODNAME ": " fmt
 
#define MSR_PMC_FULL_WIDTH_BIT   (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
 

Enumerations

enum  intel_pmu_architectural_events {
  INTEL_ARCH_CPU_CYCLES , INTEL_ARCH_INSTRUCTIONS_RETIRED , INTEL_ARCH_REFERENCE_CYCLES , INTEL_ARCH_LLC_REFERENCES ,
  INTEL_ARCH_LLC_MISSES , INTEL_ARCH_BRANCHES_RETIRED , INTEL_ARCH_BRANCHES_MISPREDICTED , NR_REAL_INTEL_ARCH_EVENTS ,
  PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS , NR_INTEL_ARCH_EVENTS
}
 

Functions

static void reprogram_fixed_counters (struct kvm_pmu *pmu, u64 data)
 
static struct kvm_pmc * intel_pmc_idx_to_pmc (struct kvm_pmu *pmu, int pmc_idx)
 
static bool intel_hw_event_available (struct kvm_pmc *pmc)
 
static bool intel_is_valid_rdpmc_ecx (struct kvm_vcpu *vcpu, unsigned int idx)
 
static struct kvm_pmc * intel_rdpmc_ecx_to_pmc (struct kvm_vcpu *vcpu, unsigned int idx, u64 *mask)
 
static u64 vcpu_get_perf_capabilities (struct kvm_vcpu *vcpu)
 
static bool fw_writes_is_enabled (struct kvm_vcpu *vcpu)
 
static struct kvm_pmc * get_fw_gp_pmc (struct kvm_pmu *pmu, u32 msr)
 
static bool intel_pmu_is_valid_lbr_msr (struct kvm_vcpu *vcpu, u32 index)
 
static bool intel_is_valid_msr (struct kvm_vcpu *vcpu, u32 msr)
 
static struct kvm_pmc * intel_msr_idx_to_pmc (struct kvm_vcpu *vcpu, u32 msr)
 
static void intel_pmu_release_guest_lbr_event (struct kvm_vcpu *vcpu)
 
int intel_pmu_create_guest_lbr_event (struct kvm_vcpu *vcpu)
 
static bool intel_pmu_handle_lbr_msrs_access (struct kvm_vcpu *vcpu, struct msr_data *msr_info, bool read)
 
static int intel_pmu_get_msr (struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
static int intel_pmu_set_msr (struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
static void setup_fixed_pmc_eventsel (struct kvm_pmu *pmu)
 
static void intel_pmu_refresh (struct kvm_vcpu *vcpu)
 
static void intel_pmu_init (struct kvm_vcpu *vcpu)
 
static void intel_pmu_reset (struct kvm_vcpu *vcpu)
 
static void intel_pmu_legacy_freezing_lbrs_on_pmi (struct kvm_vcpu *vcpu)
 
static void intel_pmu_deliver_pmi (struct kvm_vcpu *vcpu)
 
static void vmx_update_intercept_for_lbr_msrs (struct kvm_vcpu *vcpu, bool set)
 
static void vmx_disable_lbr_msrs_passthrough (struct kvm_vcpu *vcpu)
 
static void vmx_enable_lbr_msrs_passthrough (struct kvm_vcpu *vcpu)
 
void vmx_passthrough_lbr_msrs (struct kvm_vcpu *vcpu)
 
static void intel_pmu_cleanup (struct kvm_vcpu *vcpu)
 
void intel_pmu_cross_mapped_check (struct kvm_pmu *pmu)
 

Variables

struct {
   u8   eventsel
 
   u8   unit_mask
 
intel_arch_events []
 
static int fixed_pmc_events []
 
struct kvm_pmu_ops intel_pmu_ops __initdata
 

Macro Definition Documentation

◆ MSR_PMC_FULL_WIDTH_BIT

#define MSR_PMC_FULL_WIDTH_BIT   (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)

Definition at line 23 of file pmu_intel.c.

◆ pr_fmt

#define pr_fmt (   fmt)    KBUILD_MODNAME ": " fmt

Definition at line 11 of file pmu_intel.c.

Enumeration Type Documentation

◆ intel_pmu_architectural_events

Enumerator
INTEL_ARCH_CPU_CYCLES 
INTEL_ARCH_INSTRUCTIONS_RETIRED 
INTEL_ARCH_REFERENCE_CYCLES 
INTEL_ARCH_LLC_REFERENCES 
INTEL_ARCH_LLC_MISSES 
INTEL_ARCH_BRANCHES_RETIRED 
INTEL_ARCH_BRANCHES_MISPREDICTED 
NR_REAL_INTEL_ARCH_EVENTS 
PSEUDO_ARCH_REFERENCE_CYCLES 
NR_INTEL_ARCH_EVENTS 

Definition at line 25 of file pmu_intel.c.

25  {
26  /*
27  * The order of the architectural events matters as support for each
28  * event is enumerated via CPUID using the index of the event.
29  */
37 
39 
40  /*
41  * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a.
42  * TSC reference cycles. The architectural reference cycles event may
43  * or may not actually use the TSC as the reference, e.g. might use the
44  * core crystal clock or the bus clock (yeah, "architectural").
45  */
48 };
@ INTEL_ARCH_INSTRUCTIONS_RETIRED
Definition: pmu_intel.c:31
@ INTEL_ARCH_REFERENCE_CYCLES
Definition: pmu_intel.c:32
@ INTEL_ARCH_LLC_REFERENCES
Definition: pmu_intel.c:33
@ NR_INTEL_ARCH_EVENTS
Definition: pmu_intel.c:47
@ INTEL_ARCH_CPU_CYCLES
Definition: pmu_intel.c:30
@ INTEL_ARCH_LLC_MISSES
Definition: pmu_intel.c:34
@ INTEL_ARCH_BRANCHES_MISPREDICTED
Definition: pmu_intel.c:36
@ PSEUDO_ARCH_REFERENCE_CYCLES
Definition: pmu_intel.c:46
@ INTEL_ARCH_BRANCHES_RETIRED
Definition: pmu_intel.c:35
@ NR_REAL_INTEL_ARCH_EVENTS
Definition: pmu_intel.c:38

Function Documentation

◆ fw_writes_is_enabled()

static bool fw_writes_is_enabled ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 169 of file pmu_intel.c.

170 {
171  return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
172 }
#define PMU_CAP_FW_WRITES
Definition: capabilities.h:24
static u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:161
Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_fw_gp_pmc()

static struct kvm_pmc* get_fw_gp_pmc ( struct kvm_pmu pmu,
u32  msr 
)
inlinestatic

Definition at line 174 of file pmu_intel.c.

175 {
177  return NULL;
178 
179  return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
180 }
static struct kvm_pmc * get_gp_pmc(struct kvm_pmu *pmu, u32 msr, u32 base)
Definition: pmu.h:100
#define pmu_to_vcpu(pmu)
Definition: pmu.h:8
static bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:169
Here is the call graph for this function:
Here is the caller graph for this function:

◆ intel_hw_event_available()

static bool intel_hw_event_available ( struct kvm_pmc *  pmc)
static

Definition at line 104 of file pmu_intel.c.

105 {
106  struct kvm_pmu *pmu = pmc_to_pmu(pmc);
107  u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
108  u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
109  int i;
110 
111  BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS);
112 
113  /*
114  * Disallow events reported as unavailable in guest CPUID. Note, this
115  * doesn't apply to pseudo-architectural events.
116  */
117  for (i = 0; i < NR_REAL_INTEL_ARCH_EVENTS; i++) {
118  if (intel_arch_events[i].eventsel != event_select ||
120  continue;
121 
122  return pmu->available_event_types & BIT(i);
123  }
124 
125  return true;
126 }
#define pmc_to_pmu(pmc)
Definition: pmu.h:9
u8 unit_mask
Definition: pmu_intel.c:52
u8 eventsel
Definition: pmu_intel.c:51
static struct @34 intel_arch_events[]

◆ intel_is_valid_msr()

static bool intel_is_valid_msr ( struct kvm_vcpu *  vcpu,
u32  msr 
)
static

Definition at line 200 of file pmu_intel.c.

201 {
202  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
203  u64 perf_capabilities;
204  int ret;
205 
206  switch (msr) {
207  case MSR_CORE_PERF_FIXED_CTR_CTRL:
208  return kvm_pmu_has_perf_global_ctrl(pmu);
209  case MSR_IA32_PEBS_ENABLE:
210  ret = vcpu_get_perf_capabilities(vcpu) & PERF_CAP_PEBS_FORMAT;
211  break;
212  case MSR_IA32_DS_AREA:
213  ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
214  break;
215  case MSR_PEBS_DATA_CFG:
216  perf_capabilities = vcpu_get_perf_capabilities(vcpu);
217  ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
218  ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
219  break;
220  default:
221  ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
222  get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
223  get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
224  intel_pmu_is_valid_lbr_msr(vcpu, msr);
225  break;
226  }
227 
228  return ret;
229 }
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:83
#define vcpu_to_pmu(vcpu)
Definition: pmu.h:7
static bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
Definition: pmu.h:44
static struct kvm_pmc * get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
Definition: pmu.h:114
static struct kvm_pmc * get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
Definition: pmu_intel.c:174
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
Definition: pmu_intel.c:182
Here is the call graph for this function:

◆ intel_is_valid_rdpmc_ecx()

static bool intel_is_valid_rdpmc_ecx ( struct kvm_vcpu *  vcpu,
unsigned int  idx 
)
static

Definition at line 128 of file pmu_intel.c.

129 {
130  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
131  bool fixed = idx & (1u << 30);
132 
133  idx &= ~(3u << 30);
134 
135  return fixed ? idx < pmu->nr_arch_fixed_counters
136  : idx < pmu->nr_arch_gp_counters;
137 }

◆ intel_msr_idx_to_pmc()

static struct kvm_pmc* intel_msr_idx_to_pmc ( struct kvm_vcpu *  vcpu,
u32  msr 
)
static

Definition at line 231 of file pmu_intel.c.

232 {
233  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
234  struct kvm_pmc *pmc;
235 
236  pmc = get_fixed_pmc(pmu, msr);
237  pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
238  pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
239 
240  return pmc;
241 }
Here is the call graph for this function:

◆ intel_pmc_idx_to_pmc()

static struct kvm_pmc* intel_pmc_idx_to_pmc ( struct kvm_pmu pmu,
int  pmc_idx 
)
static

Definition at line 92 of file pmu_intel.c.

93 {
94  if (pmc_idx < INTEL_PMC_IDX_FIXED) {
95  return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
96  MSR_P6_EVNTSEL0);
97  } else {
98  u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
99 
100  return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
101  }
102 }
Here is the call graph for this function:

◆ intel_pmu_cleanup()

static void intel_pmu_cleanup ( struct kvm_vcpu *  vcpu)
static

Definition at line 740 of file pmu_intel.c.

741 {
742  if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
744 }
static void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:243
static __always_inline u64 vmcs_read64(unsigned long field)
Definition: vmx_ops.h:169
Here is the call graph for this function:

◆ intel_pmu_create_guest_lbr_event()

int intel_pmu_create_guest_lbr_event ( struct kvm_vcpu *  vcpu)

Definition at line 254 of file pmu_intel.c.

255 {
256  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
257  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
258  struct perf_event *event;
259 
260  /*
261  * The perf_event_attr is constructed in the minimum efficient way:
262  * - set 'pinned = true' to make it task pinned so that if another
263  * cpu pinned event reclaims LBR, the event->oncpu will be set to -1;
264  * - set '.exclude_host = true' to record guest branches behavior;
265  *
266  * - set '.config = INTEL_FIXED_VLBR_EVENT' to indicates host perf
267  * schedule the event without a real HW counter but a fake one;
268  * check is_guest_lbr_event() and __intel_get_event_constraints();
269  *
270  * - set 'sample_type = PERF_SAMPLE_BRANCH_STACK' and
271  * 'branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
272  * PERF_SAMPLE_BRANCH_USER' to configure it as a LBR callstack
273  * event, which helps KVM to save/restore guest LBR records
274  * during host context switches and reduces quite a lot overhead,
275  * check branch_user_callstack() and intel_pmu_lbr_sched_task();
276  */
277  struct perf_event_attr attr = {
278  .type = PERF_TYPE_RAW,
279  .size = sizeof(attr),
280  .config = INTEL_FIXED_VLBR_EVENT,
281  .sample_type = PERF_SAMPLE_BRANCH_STACK,
282  .pinned = true,
283  .exclude_host = true,
284  .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
285  PERF_SAMPLE_BRANCH_USER,
286  };
287 
288  if (unlikely(lbr_desc->event)) {
289  __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
290  return 0;
291  }
292 
293  event = perf_event_create_kernel_counter(&attr, -1,
294  current, NULL, NULL);
295  if (IS_ERR(event)) {
296  pr_debug_ratelimited("%s: failed %ld\n",
297  __func__, PTR_ERR(event));
298  return PTR_ERR(event);
299  }
300  lbr_desc->event = event;
301  pmu->event_count++;
302  __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
303  return 0;
304 }
Definition: vmx.h:96
struct perf_event * event
Definition: vmx.h:106
static struct lbr_desc * vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
Definition: vmx.h:662
Here is the call graph for this function:
Here is the caller graph for this function:

◆ intel_pmu_cross_mapped_check()

void intel_pmu_cross_mapped_check ( struct kvm_pmu pmu)

Definition at line 746 of file pmu_intel.c.

747 {
748  struct kvm_pmc *pmc = NULL;
749  int bit, hw_idx;
750 
751  for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
752  X86_PMC_IDX_MAX) {
753  pmc = intel_pmc_idx_to_pmc(pmu, bit);
754 
755  if (!pmc || !pmc_speculative_in_use(pmc) ||
756  !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
757  continue;
758 
759  /*
760  * A negative index indicates the event isn't mapped to a
761  * physical counter in the host, e.g. due to contention.
762  */
763  hw_idx = pmc->perf_event->hw.idx;
764  if (hw_idx != pmc->idx && hw_idx > -1)
765  pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
766  }
767 }
static bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
Definition: pmu.h:206
static bool pmc_speculative_in_use(struct kvm_pmc *pmc)
Definition: pmu.h:128
static struct kvm_pmc * intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
Definition: pmu_intel.c:92
Here is the caller graph for this function:

◆ intel_pmu_deliver_pmi()

static void intel_pmu_deliver_pmi ( struct kvm_vcpu *  vcpu)
static

Definition at line 654 of file pmu_intel.c.

655 {
656  u8 version = vcpu_to_pmu(vcpu)->version;
657 
658  if (!intel_pmu_lbr_is_enabled(vcpu))
659  return;
660 
661  if (version > 1 && version < 4)
663 }
static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:644
static bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
Definition: vmx.h:672
Here is the call graph for this function:

◆ intel_pmu_get_msr()

static int intel_pmu_get_msr ( struct kvm_vcpu *  vcpu,
struct msr_data *  msr_info 
)
static

Definition at line 348 of file pmu_intel.c.

349 {
350  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
351  struct kvm_pmc *pmc;
352  u32 msr = msr_info->index;
353 
354  switch (msr) {
355  case MSR_CORE_PERF_FIXED_CTR_CTRL:
356  msr_info->data = pmu->fixed_ctr_ctrl;
357  break;
358  case MSR_IA32_PEBS_ENABLE:
359  msr_info->data = pmu->pebs_enable;
360  break;
361  case MSR_IA32_DS_AREA:
362  msr_info->data = pmu->ds_area;
363  break;
364  case MSR_PEBS_DATA_CFG:
365  msr_info->data = pmu->pebs_data_cfg;
366  break;
367  default:
368  if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
369  (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
370  u64 val = pmc_read_counter(pmc);
371  msr_info->data =
372  val & pmu->counter_bitmask[KVM_PMC_GP];
373  break;
374  } else if ((pmc = get_fixed_pmc(pmu, msr))) {
375  u64 val = pmc_read_counter(pmc);
376  msr_info->data =
377  val & pmu->counter_bitmask[KVM_PMC_FIXED];
378  break;
379  } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
380  msr_info->data = pmc->eventsel;
381  break;
382  } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, true)) {
383  break;
384  }
385  return 1;
386  }
387 
388  return 0;
389 }
static u64 pmc_read_counter(struct kvm_pmc *pmc)
Definition: pmu.h:65
static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu, struct msr_data *msr_info, bool read)
Definition: pmu_intel.c:311
Here is the call graph for this function:

◆ intel_pmu_handle_lbr_msrs_access()

static bool intel_pmu_handle_lbr_msrs_access ( struct kvm_vcpu *  vcpu,
struct msr_data *  msr_info,
bool  read 
)
static

Definition at line 311 of file pmu_intel.c.

313 {
314  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
315  u32 index = msr_info->index;
316 
317  if (!intel_pmu_is_valid_lbr_msr(vcpu, index))
318  return false;
319 
321  goto dummy;
322 
323  /*
324  * Disable irq to ensure the LBR feature doesn't get reclaimed by the
325  * host at the time the value is read from the msr, and this avoids the
326  * host LBR value to be leaked to the guest. If LBR has been reclaimed,
327  * return 0 on guest reads.
328  */
329  local_irq_disable();
330  if (lbr_desc->event->state == PERF_EVENT_STATE_ACTIVE) {
331  if (read)
332  rdmsrl(index, msr_info->data);
333  else
334  wrmsrl(index, msr_info->data);
335  __set_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
336  local_irq_enable();
337  return true;
338  }
339  clear_bit(INTEL_PMC_IDX_FIXED_VLBR, vcpu_to_pmu(vcpu)->pmc_in_use);
340  local_irq_enable();
341 
342 dummy:
343  if (read)
344  msr_info->data = 0;
345  return true;
346 }
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:254
Here is the call graph for this function:
Here is the caller graph for this function:

◆ intel_pmu_init()

static void intel_pmu_init ( struct kvm_vcpu *  vcpu)
static

Definition at line 606 of file pmu_intel.c.

607 {
608  int i;
609  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
610  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
611 
612  for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
613  pmu->gp_counters[i].type = KVM_PMC_GP;
614  pmu->gp_counters[i].vcpu = vcpu;
615  pmu->gp_counters[i].idx = i;
616  pmu->gp_counters[i].current_config = 0;
617  }
618 
619  for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
620  pmu->fixed_counters[i].type = KVM_PMC_FIXED;
621  pmu->fixed_counters[i].vcpu = vcpu;
622  pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
623  pmu->fixed_counters[i].current_config = 0;
624  }
625 
626  lbr_desc->records.nr = 0;
627  lbr_desc->event = NULL;
628  lbr_desc->msr_passthrough = false;
629 }
struct x86_pmu_lbr records
Definition: vmx.h:98
bool msr_passthrough
Definition: vmx.h:109
Here is the call graph for this function:

◆ intel_pmu_is_valid_lbr_msr()

static bool intel_pmu_is_valid_lbr_msr ( struct kvm_vcpu *  vcpu,
u32  index 
)
static

Definition at line 182 of file pmu_intel.c.

183 {
184  struct x86_pmu_lbr *records = vcpu_to_lbr_records(vcpu);
185  bool ret = false;
186 
187  if (!intel_pmu_lbr_is_enabled(vcpu))
188  return ret;
189 
190  ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
191  (index >= records->from && index < records->from + records->nr) ||
192  (index >= records->to && index < records->to + records->nr);
193 
194  if (!ret && records->info)
195  ret = (index >= records->info && index < records->info + records->nr);
196 
197  return ret;
198 }
static struct x86_pmu_lbr * vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
Definition: vmx.h:667
Here is the call graph for this function:
Here is the caller graph for this function:

◆ intel_pmu_legacy_freezing_lbrs_on_pmi()

static void intel_pmu_legacy_freezing_lbrs_on_pmi ( struct kvm_vcpu *  vcpu)
static

Definition at line 644 of file pmu_intel.c.

645 {
646  u64 data = vmcs_read64(GUEST_IA32_DEBUGCTL);
647 
648  if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
649  data &= ~DEBUGCTLMSR_LBR;
650  vmcs_write64(GUEST_IA32_DEBUGCTL, data);
651  }
652 }
static __always_inline void vmcs_write64(unsigned long field, u64 value)
Definition: vmx_ops.h:246
Here is the call graph for this function:
Here is the caller graph for this function:

◆ intel_pmu_refresh()

static void intel_pmu_refresh ( struct kvm_vcpu *  vcpu)
static

Definition at line 483 of file pmu_intel.c.

484 {
485  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
486  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
487  struct kvm_cpuid_entry2 *entry;
488  union cpuid10_eax eax;
489  union cpuid10_edx edx;
490  u64 perf_capabilities;
491  u64 counter_mask;
492  int i;
493 
494  pmu->nr_arch_gp_counters = 0;
495  pmu->nr_arch_fixed_counters = 0;
496  pmu->counter_bitmask[KVM_PMC_GP] = 0;
497  pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
498  pmu->version = 0;
499  pmu->reserved_bits = 0xffffffff00200000ull;
500  pmu->raw_event_mask = X86_RAW_EVENT_MASK;
501  pmu->global_ctrl_mask = ~0ull;
502  pmu->global_status_mask = ~0ull;
503  pmu->fixed_ctr_ctrl_mask = ~0ull;
504  pmu->pebs_enable_mask = ~0ull;
505  pmu->pebs_data_cfg_mask = ~0ull;
506 
507  memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
508 
509  /*
510  * Setting passthrough of LBR MSRs is done only in the VM-Entry loop,
511  * and PMU refresh is disallowed after the vCPU has run, i.e. this code
512  * should never be reached while KVM is passing through MSRs.
513  */
514  if (KVM_BUG_ON(lbr_desc->msr_passthrough, vcpu->kvm))
515  return;
516 
517  entry = kvm_find_cpuid_entry(vcpu, 0xa);
518  if (!entry || !vcpu->kvm->arch.enable_pmu)
519  return;
520  eax.full = entry->eax;
521  edx.full = entry->edx;
522 
523  pmu->version = eax.split.version_id;
524  if (!pmu->version)
525  return;
526 
527  pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
528  kvm_pmu_cap.num_counters_gp);
529  eax.split.bit_width = min_t(int, eax.split.bit_width,
530  kvm_pmu_cap.bit_width_gp);
531  pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
532  eax.split.mask_length = min_t(int, eax.split.mask_length,
533  kvm_pmu_cap.events_mask_len);
534  pmu->available_event_types = ~entry->ebx &
535  ((1ull << eax.split.mask_length) - 1);
536 
537  if (pmu->version == 1) {
538  pmu->nr_arch_fixed_counters = 0;
539  } else {
540  pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
541  kvm_pmu_cap.num_counters_fixed);
542  edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
543  kvm_pmu_cap.bit_width_fixed);
544  pmu->counter_bitmask[KVM_PMC_FIXED] =
545  ((u64)1 << edx.split.bit_width_fixed) - 1;
547  }
548 
549  for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
550  pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
551  counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
552  (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
553  pmu->global_ctrl_mask = counter_mask;
554 
555  /*
556  * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
557  * share reserved bit definitions. The kernel just happens to use
558  * OVF_CTRL for the names.
559  */
560  pmu->global_status_mask = pmu->global_ctrl_mask
561  & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
562  MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
564  pmu->global_status_mask &=
565  ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
566 
567  entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
568  if (entry &&
569  (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
570  (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
571  pmu->reserved_bits ^= HSW_IN_TX;
572  pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
573  }
574 
575  bitmap_set(pmu->all_valid_pmc_idx,
576  0, pmu->nr_arch_gp_counters);
577  bitmap_set(pmu->all_valid_pmc_idx,
578  INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
579 
580  perf_capabilities = vcpu_get_perf_capabilities(vcpu);
581  if (cpuid_model_is_consistent(vcpu) &&
582  (perf_capabilities & PMU_CAP_LBR_FMT))
583  x86_perf_get_lbr(&lbr_desc->records);
584  else
585  lbr_desc->records.nr = 0;
586 
587  if (lbr_desc->records.nr)
588  bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
589 
590  if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
591  if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
592  pmu->pebs_enable_mask = counter_mask;
593  pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
594  for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
595  pmu->fixed_ctr_ctrl_mask &=
596  ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
597  }
598  pmu->pebs_data_cfg_mask = ~0xff00000full;
599  } else {
600  pmu->pebs_enable_mask =
601  ~((1ull << pmu->nr_arch_gp_counters) - 1);
602  }
603  }
604 }
static bool vmx_pt_mode_is_host_guest(void)
Definition: capabilities.h:388
#define PMU_CAP_LBR_FMT
Definition: capabilities.h:25
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
Definition: cpuid.c:1447
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
Definition: cpuid.c:1455
static bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
Definition: cpuid.h:155
struct x86_pmu_capability __read_mostly kvm_pmu_cap
Definition: pmu.c:29
static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
Definition: pmu_intel.c:467
Here is the call graph for this function:

◆ intel_pmu_release_guest_lbr_event()

static void intel_pmu_release_guest_lbr_event ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 243 of file pmu_intel.c.

244 {
245  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
246 
247  if (lbr_desc->event) {
248  perf_event_release_kernel(lbr_desc->event);
249  lbr_desc->event = NULL;
250  vcpu_to_pmu(vcpu)->event_count--;
251  }
252 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ intel_pmu_reset()

static void intel_pmu_reset ( struct kvm_vcpu *  vcpu)
static

Definition at line 631 of file pmu_intel.c.

632 {
634 }
Here is the call graph for this function:

◆ intel_pmu_set_msr()

static int intel_pmu_set_msr ( struct kvm_vcpu *  vcpu,
struct msr_data *  msr_info 
)
static

Definition at line 391 of file pmu_intel.c.

392 {
393  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
394  struct kvm_pmc *pmc;
395  u32 msr = msr_info->index;
396  u64 data = msr_info->data;
397  u64 reserved_bits, diff;
398 
399  switch (msr) {
400  case MSR_CORE_PERF_FIXED_CTR_CTRL:
401  if (data & pmu->fixed_ctr_ctrl_mask)
402  return 1;
403 
404  if (pmu->fixed_ctr_ctrl != data)
405  reprogram_fixed_counters(pmu, data);
406  break;
407  case MSR_IA32_PEBS_ENABLE:
408  if (data & pmu->pebs_enable_mask)
409  return 1;
410 
411  if (pmu->pebs_enable != data) {
412  diff = pmu->pebs_enable ^ data;
413  pmu->pebs_enable = data;
414  reprogram_counters(pmu, diff);
415  }
416  break;
417  case MSR_IA32_DS_AREA:
418  if (is_noncanonical_address(data, vcpu))
419  return 1;
420 
421  pmu->ds_area = data;
422  break;
423  case MSR_PEBS_DATA_CFG:
424  if (data & pmu->pebs_data_cfg_mask)
425  return 1;
426 
427  pmu->pebs_data_cfg = data;
428  break;
429  default:
430  if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
431  (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
432  if ((msr & MSR_PMC_FULL_WIDTH_BIT) &&
433  (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
434  return 1;
435 
436  if (!msr_info->host_initiated &&
437  !(msr & MSR_PMC_FULL_WIDTH_BIT))
438  data = (s64)(s32)data;
439  pmc_write_counter(pmc, data);
440  break;
441  } else if ((pmc = get_fixed_pmc(pmu, msr))) {
442  pmc_write_counter(pmc, data);
443  break;
444  } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
445  reserved_bits = pmu->reserved_bits;
446  if ((pmc->idx == 2) &&
447  (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
448  reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
449  if (data & reserved_bits)
450  return 1;
451 
452  if (data != pmc->eventsel) {
453  pmc->eventsel = data;
455  }
456  break;
457  } else if (intel_pmu_handle_lbr_msrs_access(vcpu, msr_info, false)) {
458  break;
459  }
460  /* Not a known PMU MSR. */
461  return 1;
462  }
463 
464  return 0;
465 }
void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
Definition: pmu.c:303
static void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
Definition: pmu.h:183
static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
Definition: pmu.h:189
#define MSR_PMC_FULL_WIDTH_BIT
Definition: pmu_intel.c:23
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
Definition: pmu_intel.c:71
static bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
Definition: x86.h:213
Here is the call graph for this function:

◆ intel_rdpmc_ecx_to_pmc()

static struct kvm_pmc* intel_rdpmc_ecx_to_pmc ( struct kvm_vcpu *  vcpu,
unsigned int  idx,
u64 *  mask 
)
static

Definition at line 139 of file pmu_intel.c.

141 {
142  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
143  bool fixed = idx & (1u << 30);
144  struct kvm_pmc *counters;
145  unsigned int num_counters;
146 
147  idx &= ~(3u << 30);
148  if (fixed) {
149  counters = pmu->fixed_counters;
150  num_counters = pmu->nr_arch_fixed_counters;
151  } else {
152  counters = pmu->gp_counters;
153  num_counters = pmu->nr_arch_gp_counters;
154  }
155  if (idx >= num_counters)
156  return NULL;
157  *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
158  return &counters[array_index_nospec(idx, num_counters)];
159 }

◆ reprogram_fixed_counters()

static void reprogram_fixed_counters ( struct kvm_pmu pmu,
u64  data 
)
static

Definition at line 71 of file pmu_intel.c.

72 {
73  struct kvm_pmc *pmc;
74  u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
75  int i;
76 
77  pmu->fixed_ctr_ctrl = data;
78  for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
79  u8 new_ctrl = fixed_ctrl_field(data, i);
80  u8 old_ctrl = fixed_ctrl_field(old_fixed_ctr_ctrl, i);
81 
82  if (old_ctrl == new_ctrl)
83  continue;
84 
85  pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
86 
87  __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
89  }
90 }
#define fixed_ctrl_field(ctrl_reg, idx)
Definition: pmu.h:15
Here is the call graph for this function:
Here is the caller graph for this function:

◆ setup_fixed_pmc_eventsel()

static void setup_fixed_pmc_eventsel ( struct kvm_pmu pmu)
static

Definition at line 467 of file pmu_intel.c.

468 {
469  int i;
470 
471  BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED);
472 
473  for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
474  int index = array_index_nospec(i, KVM_PMC_MAX_FIXED);
475  struct kvm_pmc *pmc = &pmu->fixed_counters[index];
476  u32 event = fixed_pmc_events[index];
477 
478  pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
480  }
481 }
static int fixed_pmc_events[]
Definition: pmu_intel.c:65
Here is the caller graph for this function:

◆ vcpu_get_perf_capabilities()

static u64 vcpu_get_perf_capabilities ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 161 of file pmu_intel.c.

162 {
163  if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
164  return 0;
165 
166  return vcpu->arch.perf_capabilities;
167 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_disable_lbr_msrs_passthrough()

static void vmx_disable_lbr_msrs_passthrough ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 681 of file pmu_intel.c.

682 {
683  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
684 
686  return;
687 
689  lbr_desc->msr_passthrough = false;
690 }
static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
Definition: pmu_intel.c:665
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_enable_lbr_msrs_passthrough()

static void vmx_enable_lbr_msrs_passthrough ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 692 of file pmu_intel.c.

693 {
694  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
695 
697  return;
698 
700  lbr_desc->msr_passthrough = true;
701 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_passthrough_lbr_msrs()

void vmx_passthrough_lbr_msrs ( struct kvm_vcpu *  vcpu)

Definition at line 713 of file pmu_intel.c.

714 {
715  struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
716  struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
717 
718  if (!lbr_desc->event) {
720  if (vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
721  goto warn;
722  if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
723  goto warn;
724  return;
725  }
726 
727  if (lbr_desc->event->state < PERF_EVENT_STATE_ACTIVE) {
729  __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
730  goto warn;
731  } else
733 
734  return;
735 
736 warn:
737  pr_warn_ratelimited("vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id);
738 }
static void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:692
static void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:681
Here is the call graph for this function:
Here is the caller graph for this function:

◆ vmx_update_intercept_for_lbr_msrs()

static void vmx_update_intercept_for_lbr_msrs ( struct kvm_vcpu *  vcpu,
bool  set 
)
static

Definition at line 665 of file pmu_intel.c.

666 {
667  struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
668  int i;
669 
670  for (i = 0; i < lbr->nr; i++) {
671  vmx_set_intercept_for_msr(vcpu, lbr->from + i, MSR_TYPE_RW, set);
672  vmx_set_intercept_for_msr(vcpu, lbr->to + i, MSR_TYPE_RW, set);
673  if (lbr->info)
674  vmx_set_intercept_for_msr(vcpu, lbr->info + i, MSR_TYPE_RW, set);
675  }
676 
677  vmx_set_intercept_for_msr(vcpu, MSR_LBR_SELECT, MSR_TYPE_RW, set);
678  vmx_set_intercept_for_msr(vcpu, MSR_LBR_TOS, MSR_TYPE_RW, set);
679 }
#define MSR_TYPE_RW
Definition: vmx.h:21
static void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value)
Definition: vmx.h:427
Here is the call graph for this function:
Here is the caller graph for this function:

Variable Documentation

◆ __initdata

struct kvm_pmu_ops intel_pmu_ops __initdata
Initial value:
= {
.hw_event_available = intel_hw_event_available,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
.is_valid_rdpmc_ecx = intel_is_valid_rdpmc_ecx,
.is_valid_msr = intel_is_valid_msr,
.get_msr = intel_pmu_get_msr,
.set_msr = intel_pmu_set_msr,
.refresh = intel_pmu_refresh,
.init = intel_pmu_init,
.reset = intel_pmu_reset,
.deliver_pmi = intel_pmu_deliver_pmi,
.cleanup = intel_pmu_cleanup,
.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
.MIN_NR_GP_COUNTERS = 1,
}
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:631
static struct kvm_pmc * intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, unsigned int idx, u64 *mask)
Definition: pmu_intel.c:139
static void intel_pmu_init(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:606
static struct kvm_pmc * intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
Definition: pmu_intel.c:231
static bool intel_hw_event_available(struct kvm_pmc *pmc)
Definition: pmu_intel.c:104
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
Definition: pmu_intel.c:200
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Definition: pmu_intel.c:348
static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Definition: pmu_intel.c:391
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:740
static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:654
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
Definition: pmu_intel.c:483
static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
Definition: pmu_intel.c:128

Definition at line 746 of file pmu_intel.c.

◆ eventsel

u8 eventsel

Definition at line 51 of file pmu_intel.c.

◆ fixed_pmc_events

int fixed_pmc_events[]
static
Initial value:

Definition at line 65 of file pmu_intel.c.

◆ 

struct { ... } intel_arch_events[]
Initial value:
= {
[INTEL_ARCH_CPU_CYCLES] = { 0x3c, 0x00 },
[INTEL_ARCH_INSTRUCTIONS_RETIRED] = { 0xc0, 0x00 },
[INTEL_ARCH_REFERENCE_CYCLES] = { 0x3c, 0x01 },
[INTEL_ARCH_LLC_REFERENCES] = { 0x2e, 0x4f },
[INTEL_ARCH_LLC_MISSES] = { 0x2e, 0x41 },
[INTEL_ARCH_BRANCHES_RETIRED] = { 0xc4, 0x00 },
[PSEUDO_ARCH_REFERENCE_CYCLES] = { 0x00, 0x03 },
}

◆ unit_mask

u8 unit_mask

Definition at line 52 of file pmu_intel.c.