2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
5 #include <linux/nospec.h>
7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
9 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
11 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \
12 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
15 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
17 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
18 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
19 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
23 struct kvm_pmc *(*pmc_idx_to_pmc)(
struct kvm_pmu *pmu,
int pmc_idx);
24 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(
struct kvm_vcpu *vcpu,
25 unsigned int idx, u64 *mask);
26 struct kvm_pmc *(*msr_idx_to_pmc)(
struct kvm_vcpu *vcpu, u32 msr);
29 int (*
get_msr)(
struct kvm_vcpu *vcpu,
struct msr_data *msr_info);
30 int (*
set_msr)(
struct kvm_vcpu *vcpu,
struct msr_data *msr_info);
32 void (*
init)(
struct kvm_vcpu *vcpu);
33 void (*
reset)(
struct kvm_vcpu *vcpu);
55 return pmu->version > 1;
62 return pmu->counter_bitmask[pmc->type];
67 u64 counter, enabled, running;
69 counter = pmc->counter + pmc->emulated_counter;
71 if (pmc->perf_event && !pmc->is_paused)
72 counter += perf_event_read_value(pmc->perf_event,
82 return pmc->type == KVM_PMC_GP;
87 return pmc->type == KVM_PMC_FIXED;
93 return !(pmu->global_ctrl_mask & data);
103 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
104 u32 index = array_index_nospec(msr - base,
105 pmu->nr_arch_gp_counters);
107 return &pmu->gp_counters[index];
116 int base = MSR_CORE_PERF_FIXED_CTR0;
118 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
119 u32 index = array_index_nospec(msr - base,
120 pmu->nr_arch_fixed_counters);
122 return &pmu->fixed_counters[index];
134 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
136 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
143 bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
152 if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
165 WARN_ON_ONCE(
kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
185 set_bit(pmc->idx,
pmc_to_pmu(pmc)->reprogram_pmi);
186 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
196 for_each_set_bit(bit, (
unsigned long *)&diff, X86_PMC_IDX_MAX)
197 set_bit(bit, pmu->reprogram_pmi);
213 return test_bit(pmc->idx, (
unsigned long *)&pmu->global_ctrl);
218 int kvm_pmu_rdpmc(
struct kvm_vcpu *vcpu,
unsigned pmc, u64 *data);
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
struct x86_pmu_capability kvm_pmu_cap
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
static bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, u64 data)
static bool pmc_is_fixed(struct kvm_pmc *pmc)
static bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
static bool pmc_speculative_in_use(struct kvm_pmc *pmc)
static struct kvm_pmc * get_gp_pmc(struct kvm_pmu *pmu, u32 msr, u32 base)
static void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
bool is_vmware_backdoor_pmc(u32 pmc_idx)
static bool pmc_is_gp(struct kvm_pmc *pmc)
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
static u64 pmc_read_counter(struct kvm_pmc *pmc)
static u64 pmc_bitmask(struct kvm_pmc *pmc)
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
static void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
struct kvm_pmu_ops intel_pmu_ops
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
static bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
struct kvm_pmu_ops amd_pmu_ops
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
void kvm_pmu_init(struct kvm_vcpu *vcpu)
static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
#define fixed_ctrl_field(ctrl_reg, idx)
static struct kvm_pmc * get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
int(* set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
const int MAX_NR_GP_COUNTERS
int(* get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
bool(* is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx)
void(* refresh)(struct kvm_vcpu *vcpu)
const int MIN_NR_GP_COUNTERS
bool(* hw_event_available)(struct kvm_pmc *pmc)
bool(* is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr)
void(* deliver_pmi)(struct kvm_vcpu *vcpu)
void(* cleanup)(struct kvm_vcpu *vcpu)
void(* init)(struct kvm_vcpu *vcpu)
void(* reset)(struct kvm_vcpu *vcpu)
bool __read_mostly enable_pmu