12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
30 unsigned int num_counters = pmu->nr_arch_gp_counters;
32 if (pmc_idx >= num_counters)
35 return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
44 if (!vcpu->kvm->arch.enable_pmu)
48 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
55 idx = (
unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
59 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
62 idx = msr - MSR_K7_EVNTSEL0;
64 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
67 idx = msr - MSR_K7_PERFCTR0;
87 return idx < pmu->nr_arch_gp_counters;
92 unsigned int idx, u64 *mask)
113 case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
114 return pmu->version > 0;
115 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
117 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
118 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
119 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
120 return pmu->version > 1;
122 if (msr > MSR_F15H_PERF_CTR5 &&
123 msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters)
124 return pmu->version > 1;
135 u32 msr = msr_info->index;
146 msr_info->data = pmc->eventsel;
157 u32 msr = msr_info->index;
158 u64 data = msr_info->data;
169 data &= ~pmu->reserved_bits;
170 if (data != pmc->eventsel) {
171 pmc->eventsel = data;
183 union cpuid_0x80000022_ebx ebx;
195 pmu->nr_arch_gp_counters = ebx.split.num_core_pmc;
197 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
199 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
202 pmu->nr_arch_gp_counters = min_t(
unsigned int, pmu->nr_arch_gp_counters,
205 if (pmu->version > 1) {
206 pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
207 pmu->global_status_mask = pmu->global_ctrl_mask;
210 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
211 pmu->reserved_bits = 0xfffffff000280000ull;
212 pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
214 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
215 pmu->nr_arch_fixed_counters = 0;
216 bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
224 BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE);
225 BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC);
227 for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) {
228 pmu->gp_counters[i].type = KVM_PMC_GP;
229 pmu->gp_counters[i].vcpu = vcpu;
230 pmu->gp_counters[i].idx = i;
231 pmu->gp_counters[i].current_config = 0;
246 .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
247 .MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
248 .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
struct x86_pmu_capability __read_mostly kvm_pmu_cap
static u64 pmc_read_counter(struct kvm_pmc *pmc)
static void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
#define vcpu_to_pmu(vcpu)
struct kvm_pmu_ops amd_pmu_ops
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
bool(* hw_event_available)(struct kvm_pmc *pmc)
static void amd_pmu_init(struct kvm_vcpu *vcpu)
static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
static struct kvm_pmc * amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
static struct kvm_pmc * amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, unsigned int idx, u64 *mask)
static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
struct kvm_pmu_ops amd_pmu_ops __initdata
static struct kvm_pmc * amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
static struct kvm_pmc * get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, enum pmu_type type)
static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
static bool amd_hw_event_available(struct kvm_pmc *pmc)