11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/types.h>
14 #include <linux/kvm_host.h>
15 #include <linux/perf_event.h>
16 #include <asm/perf_event.h>
23 #define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
74 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl;
77 pmu->fixed_ctr_ctrl = data;
78 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
82 if (old_ctrl == new_ctrl)
87 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use);
94 if (pmc_idx < INTEL_PMC_IDX_FIXED) {
95 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
98 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
107 u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
108 u8
unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
122 return pmu->available_event_types & BIT(i);
131 bool fixed = idx & (1u << 30);
135 return fixed ? idx < pmu->nr_arch_fixed_counters
136 : idx < pmu->nr_arch_gp_counters;
140 unsigned int idx, u64 *mask)
143 bool fixed = idx & (1u << 30);
144 struct kvm_pmc *counters;
145 unsigned int num_counters;
149 counters = pmu->fixed_counters;
150 num_counters = pmu->nr_arch_fixed_counters;
152 counters = pmu->gp_counters;
153 num_counters = pmu->nr_arch_gp_counters;
155 if (idx >= num_counters)
157 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
158 return &counters[array_index_nospec(idx, num_counters)];
166 return vcpu->arch.perf_capabilities;
190 ret = (index == MSR_LBR_SELECT) || (index == MSR_LBR_TOS) ||
191 (index >= records->from && index < records->from + records->nr) ||
192 (index >= records->to && index < records->to + records->nr);
194 if (!ret && records->info)
195 ret = (index >= records->info && index < records->info + records->nr);
203 u64 perf_capabilities;
207 case MSR_CORE_PERF_FIXED_CTR_CTRL:
209 case MSR_IA32_PEBS_ENABLE:
212 case MSR_IA32_DS_AREA:
215 case MSR_PEBS_DATA_CFG:
217 ret = (perf_capabilities & PERF_CAP_PEBS_BASELINE) &&
218 ((perf_capabilities & PERF_CAP_PEBS_FORMAT) > 3);
221 ret =
get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
237 pmc = pmc ? pmc :
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0);
238 pmc = pmc ? pmc :
get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0);
258 struct perf_event *event;
277 struct perf_event_attr attr = {
278 .type = PERF_TYPE_RAW,
279 .size =
sizeof(attr),
280 .config = INTEL_FIXED_VLBR_EVENT,
281 .sample_type = PERF_SAMPLE_BRANCH_STACK,
283 .exclude_host =
true,
284 .branch_sample_type = PERF_SAMPLE_BRANCH_CALL_STACK |
285 PERF_SAMPLE_BRANCH_USER,
289 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
293 event = perf_event_create_kernel_counter(&attr, -1,
294 current, NULL, NULL);
296 pr_debug_ratelimited(
"%s: failed %ld\n",
297 __func__, PTR_ERR(event));
298 return PTR_ERR(event);
302 __set_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
312 struct msr_data *msr_info,
bool read)
315 u32 index = msr_info->index;
332 rdmsrl(index, msr_info->data);
334 wrmsrl(index, msr_info->data);
335 __set_bit(INTEL_PMC_IDX_FIXED_VLBR,
vcpu_to_pmu(vcpu)->pmc_in_use);
339 clear_bit(INTEL_PMC_IDX_FIXED_VLBR,
vcpu_to_pmu(vcpu)->pmc_in_use);
352 u32 msr = msr_info->index;
355 case MSR_CORE_PERF_FIXED_CTR_CTRL:
356 msr_info->data = pmu->fixed_ctr_ctrl;
358 case MSR_IA32_PEBS_ENABLE:
359 msr_info->data = pmu->pebs_enable;
361 case MSR_IA32_DS_AREA:
362 msr_info->data = pmu->ds_area;
364 case MSR_PEBS_DATA_CFG:
365 msr_info->data = pmu->pebs_data_cfg;
368 if ((pmc =
get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
369 (pmc =
get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
372 val & pmu->counter_bitmask[KVM_PMC_GP];
377 val & pmu->counter_bitmask[KVM_PMC_FIXED];
379 }
else if ((pmc =
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
380 msr_info->data = pmc->eventsel;
395 u32 msr = msr_info->index;
396 u64 data = msr_info->data;
397 u64 reserved_bits, diff;
400 case MSR_CORE_PERF_FIXED_CTR_CTRL:
401 if (data & pmu->fixed_ctr_ctrl_mask)
404 if (pmu->fixed_ctr_ctrl != data)
407 case MSR_IA32_PEBS_ENABLE:
408 if (data & pmu->pebs_enable_mask)
411 if (pmu->pebs_enable != data) {
412 diff = pmu->pebs_enable ^ data;
413 pmu->pebs_enable = data;
417 case MSR_IA32_DS_AREA:
423 case MSR_PEBS_DATA_CFG:
424 if (data & pmu->pebs_data_cfg_mask)
427 pmu->pebs_data_cfg = data;
430 if ((pmc =
get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
431 (pmc =
get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
433 (data & ~pmu->counter_bitmask[KVM_PMC_GP]))
436 if (!msr_info->host_initiated &&
438 data = (s64)(s32)data;
444 }
else if ((pmc =
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
445 reserved_bits = pmu->reserved_bits;
446 if ((pmc->idx == 2) &&
447 (pmu->raw_event_mask & HSW_IN_TX_CHECKPOINTED))
448 reserved_bits ^= HSW_IN_TX_CHECKPOINTED;
449 if (data & reserved_bits)
452 if (data != pmc->eventsel) {
453 pmc->eventsel = data;
473 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
474 int index = array_index_nospec(i, KVM_PMC_MAX_FIXED);
475 struct kvm_pmc *pmc = &pmu->fixed_counters[index];
487 struct kvm_cpuid_entry2 *entry;
488 union cpuid10_eax eax;
489 union cpuid10_edx edx;
490 u64 perf_capabilities;
494 pmu->nr_arch_gp_counters = 0;
495 pmu->nr_arch_fixed_counters = 0;
496 pmu->counter_bitmask[KVM_PMC_GP] = 0;
497 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
499 pmu->reserved_bits = 0xffffffff00200000ull;
500 pmu->raw_event_mask = X86_RAW_EVENT_MASK;
501 pmu->global_ctrl_mask = ~0ull;
502 pmu->global_status_mask = ~0ull;
503 pmu->fixed_ctr_ctrl_mask = ~0ull;
504 pmu->pebs_enable_mask = ~0ull;
505 pmu->pebs_data_cfg_mask = ~0ull;
518 if (!entry || !vcpu->kvm->arch.enable_pmu)
520 eax.full = entry->eax;
521 edx.full = entry->edx;
523 pmu->version = eax.split.version_id;
527 pmu->nr_arch_gp_counters = min_t(
int, eax.split.num_counters,
529 eax.split.bit_width = min_t(
int, eax.split.bit_width,
531 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
532 eax.split.mask_length = min_t(
int, eax.split.mask_length,
534 pmu->available_event_types = ~entry->ebx &
535 ((1ull << eax.split.mask_length) - 1);
537 if (pmu->version == 1) {
538 pmu->nr_arch_fixed_counters = 0;
540 pmu->nr_arch_fixed_counters = min_t(
int, edx.split.num_counters_fixed,
542 edx.split.bit_width_fixed = min_t(
int, edx.split.bit_width_fixed,
544 pmu->counter_bitmask[KVM_PMC_FIXED] =
545 ((u64)1 << edx.split.bit_width_fixed) - 1;
549 for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
550 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4));
551 counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
552 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED));
553 pmu->global_ctrl_mask = counter_mask;
560 pmu->global_status_mask = pmu->global_ctrl_mask
561 & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
562 MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
564 pmu->global_status_mask &=
565 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
569 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
570 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
571 pmu->reserved_bits ^= HSW_IN_TX;
572 pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
575 bitmap_set(pmu->all_valid_pmc_idx,
576 0, pmu->nr_arch_gp_counters);
577 bitmap_set(pmu->all_valid_pmc_idx,
578 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
588 bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
590 if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
591 if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
592 pmu->pebs_enable_mask = counter_mask;
593 pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
594 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
595 pmu->fixed_ctr_ctrl_mask &=
596 ~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
598 pmu->pebs_data_cfg_mask = ~0xff00000full;
600 pmu->pebs_enable_mask =
601 ~((1ull << pmu->nr_arch_gp_counters) - 1);
612 for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
613 pmu->gp_counters[i].type = KVM_PMC_GP;
614 pmu->gp_counters[i].vcpu = vcpu;
615 pmu->gp_counters[i].idx = i;
616 pmu->gp_counters[i].current_config = 0;
619 for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
620 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
621 pmu->fixed_counters[i].vcpu = vcpu;
622 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
623 pmu->fixed_counters[i].current_config = 0;
648 if (data & DEBUGCTLMSR_FREEZE_LBRS_ON_PMI) {
649 data &= ~DEBUGCTLMSR_LBR;
661 if (version > 1 && version < 4)
670 for (i = 0; i < lbr->nr; i++) {
720 if (
vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR)
722 if (test_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use))
729 __clear_bit(INTEL_PMC_IDX_FIXED_VLBR, pmu->pmc_in_use);
737 pr_warn_ratelimited(
"vcpu-%d: fail to passthrough LBR.\n", vcpu->vcpu_id);
742 if (!(
vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
748 struct kvm_pmc *pmc = NULL;
751 for_each_set_bit(bit, (
unsigned long *)&pmu->global_ctrl,
763 hw_idx = pmc->perf_event->hw.idx;
764 if (hw_idx != pmc->idx && hw_idx > -1)
765 pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
783 .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
784 .MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC,
785 .MIN_NR_GP_COUNTERS = 1,
static bool vmx_pt_mode_is_host_guest(void)
#define PMU_CAP_FW_WRITES
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
static bool cpuid_model_is_consistent(struct kvm_vcpu *vcpu)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
struct x86_pmu_capability __read_mostly kvm_pmu_cap
static bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
static bool pmc_speculative_in_use(struct kvm_pmc *pmc)
static struct kvm_pmc * get_gp_pmc(struct kvm_pmu *pmu, u32 msr, u32 base)
static u64 pmc_read_counter(struct kvm_pmc *pmc)
static void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
#define vcpu_to_pmu(vcpu)
struct kvm_pmu_ops intel_pmu_ops
static bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
#define fixed_ctrl_field(ctrl_reg, idx)
static struct kvm_pmc * get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
static void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
static struct kvm_pmc * intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, unsigned int idx, u64 *mask)
static void intel_pmu_init(struct kvm_vcpu *vcpu)
static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
static struct kvm_pmc * intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
static struct kvm_pmc * get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
static bool intel_hw_event_available(struct kvm_pmc *pmc)
static void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
struct kvm_pmu_ops intel_pmu_ops __initdata
intel_pmu_architectural_events
@ INTEL_ARCH_INSTRUCTIONS_RETIRED
@ INTEL_ARCH_REFERENCE_CYCLES
@ INTEL_ARCH_LLC_REFERENCES
@ INTEL_ARCH_BRANCHES_MISPREDICTED
@ PSEUDO_ARCH_REFERENCE_CYCLES
@ INTEL_ARCH_BRANCHES_RETIRED
@ NR_REAL_INTEL_ARCH_EVENTS
static struct @34 intel_arch_events[]
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int fixed_pmc_events[]
static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu, struct msr_data *msr_info, bool read)
#define MSR_PMC_FULL_WIDTH_BIT
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
static struct kvm_pmc * intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
static bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
static u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
static bool intel_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
static void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
bool(* hw_event_available)(struct kvm_pmc *pmc)
struct perf_event * event
struct x86_pmu_lbr records
static struct x86_pmu_lbr * vcpu_to_lbr_records(struct kvm_vcpu *vcpu)
static void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value)
static struct lbr_desc * vcpu_to_lbr_desc(struct kvm_vcpu *vcpu)
static bool intel_pmu_lbr_is_enabled(struct kvm_vcpu *vcpu)
static __always_inline u64 vmcs_read64(unsigned long field)
static __always_inline void vmcs_write64(unsigned long field, u64 value)
static bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)