12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include <linux/bsearch.h>
18 #include <linux/sort.h>
19 #include <asm/perf_event.h>
20 #include <asm/cpu_device_id.h>
27 #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
34 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
35 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
37 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
43 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
77 #define KVM_X86_PMU_OP(func) \
78 DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func, \
79 *(((struct kvm_pmu_ops *)0)->func));
80 #define KVM_X86_PMU_OP_OPTIONAL KVM_X86_PMU_OP
81 #include <asm/kvm-x86-pmu-ops.h>
87 #define __KVM_X86_PMU_OP(func) \
88 static_call_update(kvm_x86_pmu_##func, kvm_pmu_ops.func);
89 #define KVM_X86_PMU_OP(func) \
90 WARN_ON(!kvm_pmu_ops.func); __KVM_X86_PMU_OP(func)
91 #define KVM_X86_PMU_OP_OPTIONAL __KVM_X86_PMU_OP
92 #include <asm/kvm-x86-pmu-ops.h>
93 #undef __KVM_X86_PMU_OP
99 bool skip_pmi =
false;
101 if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
112 skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
113 (
unsigned long *)&pmu->global_status);
116 __set_bit(pmc->idx, (
unsigned long *)&pmu->global_status);
119 if (pmc->intr && !skip_pmi)
120 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
124 struct perf_sample_data *data,
125 struct pt_regs *regs)
127 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
134 if (test_and_set_bit(pmc->idx,
pmc_to_pmu(pmc)->reprogram_pmi))
139 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
166 u64 sample_period = (-counter_value) &
pmc_bitmask(pmc);
170 return sample_period;
174 bool exclude_user,
bool exclude_kernel,
178 struct perf_event *event;
179 struct perf_event_attr attr = {
181 .size =
sizeof(attr),
183 .exclude_idle =
true,
185 .exclude_user = exclude_user,
186 .exclude_kernel = exclude_kernel,
189 bool pebs = test_bit(pmc->idx, (
unsigned long *)&pmu->pebs_enable);
193 if ((attr.config & HSW_IN_TX_CHECKPOINTED) &&
200 attr.sample_period = 0;
212 event = perf_event_create_kernel_counter(&attr, -1, current,
215 pr_debug_ratelimited(
"kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
216 PTR_ERR(event), pmc->idx);
217 return PTR_ERR(event);
220 pmc->perf_event = event;
222 pmc->is_paused =
false;
223 pmc->intr = intr || pebs;
229 u64 counter = pmc->counter;
233 if (pmc->perf_event && !pmc->is_paused)
234 counter += perf_event_pause(pmc->perf_event,
true);
244 counter += pmc->emulated_counter;
247 pmc->emulated_counter = 0;
248 pmc->is_paused =
true;
250 return pmc->counter < prev_counter;
255 if (!pmc->perf_event)
259 if (is_sampling_event(pmc->perf_event) &&
260 perf_event_period(pmc->perf_event,
264 if (test_bit(pmc->idx, (
unsigned long *)&
pmc_to_pmu(pmc)->pebs_enable) !=
265 (!!pmc->perf_event->attr.precise_ip))
269 perf_event_enable(pmc->perf_event);
270 pmc->is_paused =
false;
277 if (pmc->perf_event) {
278 perf_event_release_kernel(pmc->perf_event);
279 pmc->perf_event = NULL;
280 pmc->current_config = 0;
287 if (pmc->perf_event) {
295 if (!pmc->perf_event || pmc->is_paused ||
296 !is_sampling_event(pmc->perf_event))
299 perf_event_period(pmc->perf_event,
313 pmc->emulated_counter = 0;
320 static int filter_cmp(
const void *pa,
const void *pb, u64 mask)
322 u64 a = *(u64 *)pa & mask;
323 u64 b = *(u64 *)pb & mask;
325 return (a > b) - (a < b);
331 return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT |
332 KVM_PMU_MASKED_ENTRY_EXCLUDE));
342 return filter_cmp(pa, pb, (KVM_PMU_MASKED_ENTRY_EVENT_SELECT));
347 u64 *fe = bsearch(&key, events, nevents,
sizeof(events[0]),
358 u64 mask = filter_event >> (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8);
359 u64 match = filter_event & KVM_PMU_MASKED_ENTRY_UMASK_MATCH;
361 BUILD_BUG_ON((KVM_PMU_ENCODE_MASKED_ENTRY(0, 0xff, 0,
false) >>
362 (KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT - 8)) !=
363 ARCH_PERFMON_EVENTSEL_UMASK);
365 return (umask & mask) == match;
371 u64 umask =
eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
382 for (i = index; i < nevents; i++) {
390 for (i = index - 1; i >= 0; i--) {
406 return f->action == KVM_PMU_EVENT_ALLOW;
408 return f->action == KVM_PMU_EVENT_DENY;
414 int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
416 if (filter->action == KVM_PMU_EVENT_DENY &&
417 test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
419 if (filter->action == KVM_PMU_EVENT_ALLOW &&
420 !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
428 struct kvm_x86_pmu_event_filter *filter;
429 struct kvm *kvm = pmc->vcpu->kvm;
431 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
444 static_call(kvm_x86_pmu_hw_event_available)(pmc) &&
453 bool emulate_overflow;
459 goto reprogram_complete;
461 if (emulate_overflow)
464 if (
eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
465 printk_once(
"kvm pmu: pin control bit is ignored\n");
469 pmc->idx - INTEL_PMC_IDX_FIXED);
470 if (fixed_ctr_ctrl & 0x1)
471 eventsel |= ARCH_PERFMON_EVENTSEL_OS;
472 if (fixed_ctr_ctrl & 0x2)
473 eventsel |= ARCH_PERFMON_EVENTSEL_USR;
474 if (fixed_ctr_ctrl & 0x8)
475 eventsel |= ARCH_PERFMON_EVENTSEL_INT;
476 new_config = (u64)fixed_ctr_ctrl;
480 goto reprogram_complete;
484 pmc->current_config = new_config;
494 !(
eventsel & ARCH_PERFMON_EVENTSEL_USR),
495 !(
eventsel & ARCH_PERFMON_EVENTSEL_OS),
496 eventsel & ARCH_PERFMON_EVENTSEL_INT))
500 clear_bit(pmc->idx, (
unsigned long *)&
pmc_to_pmu(pmc)->reprogram_pmi);
508 for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
509 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
511 if (unlikely(!pmc)) {
512 clear_bit(bit, pmu->reprogram_pmi);
524 if (unlikely(pmu->need_cleanup))
531 return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
554 ctr_val = ktime_get_boottime_ns();
557 ctr_val = ktime_get_boottime_ns() +
558 vcpu->kvm->arch.kvmclock_offset;
570 bool fast_mode = idx & (1u << 31);
573 u64 mask = fast_mode ? ~0u : ~0ull;
581 pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
586 (static_call(kvm_x86_get_cpl)(vcpu) != 0) &&
597 static_call_cond(kvm_x86_pmu_deliver_pmi)(vcpu);
605 case MSR_CORE_PERF_GLOBAL_STATUS:
606 case MSR_CORE_PERF_GLOBAL_CTRL:
607 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
612 return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
613 static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
619 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
622 __set_bit(pmc->idx, pmu->pmc_in_use);
628 u32 msr = msr_info->index;
631 case MSR_CORE_PERF_GLOBAL_STATUS:
632 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
633 msr_info->data = pmu->global_status;
635 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
636 case MSR_CORE_PERF_GLOBAL_CTRL:
637 msr_info->data = pmu->global_ctrl;
639 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
640 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
644 return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
653 u32 msr = msr_info->index;
654 u64 data = msr_info->data;
662 case MSR_CORE_PERF_GLOBAL_STATUS:
663 if (!msr_info->host_initiated)
666 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
668 if (!msr_info->host_initiated)
671 if (data & pmu->global_status_mask)
674 pmu->global_status = data;
676 case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
677 data &= ~pmu->global_ctrl_mask;
679 case MSR_CORE_PERF_GLOBAL_CTRL:
683 if (pmu->global_ctrl != data) {
684 diff = pmu->global_ctrl ^ data;
685 pmu->global_ctrl = data;
689 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
694 if (data & pmu->global_status_mask)
697 case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
698 if (!msr_info->host_initiated)
699 pmu->global_status &= ~data;
703 return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
715 pmu->need_cleanup =
false;
717 bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
719 for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
720 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
726 pmc->emulated_counter = 0;
732 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
734 static_call_cond(kvm_x86_pmu_reset)(vcpu);
753 bitmap_zero(
vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
754 static_call(kvm_x86_pmu_refresh)(vcpu);
761 memset(pmu, 0,
sizeof(*pmu));
762 static_call(kvm_x86_pmu_init)(vcpu);
770 struct kvm_pmc *pmc = NULL;
774 pmu->need_cleanup =
false;
776 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
777 pmu->pmc_in_use, X86_PMC_IDX_MAX);
779 for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
780 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
786 static_call_cond(kvm_x86_pmu_cleanup)(vcpu);
788 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
798 pmc->emulated_counter++;
803 unsigned int perf_hw_id)
805 return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
806 AMD64_RAW_EVENT_MASK_NB);
811 bool select_os, select_user;
815 config = pmc->eventsel;
816 select_os = config & ARCH_PERFMON_EVENTSEL_OS;
817 select_user = config & ARCH_PERFMON_EVENTSEL_USR;
820 pmc->idx - INTEL_PMC_IDX_FIXED);
821 select_os = config & 0x1;
822 select_user = config & 0x2;
825 return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
834 for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
835 pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
850 KVM_PMU_MASKED_ENTRY_UMASK_MASK |
851 KVM_PMU_MASKED_ENTRY_UMASK_MATCH |
852 KVM_PMU_MASKED_ENTRY_EXCLUDE;
855 for (i = 0; i < filter->nevents; i++) {
856 if (filter->events[i] & ~mask)
867 for (i = 0, j = 0; i < filter->nevents; i++) {
875 ARCH_PERFMON_EVENTSEL_UMASK))
885 filter->events[j++] = filter->events[i] |
886 (0xFFULL << KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT);
896 if (!(filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS))
908 sort(&filter->events, filter->nevents,
sizeof(filter->events[0]),
913 if (filter->flags & KVM_PMU_EVENT_FLAG_MASKED_EVENTS) {
914 for (i = 0; i < filter->nevents; i++) {
915 if (filter->events[i] & KVM_PMU_MASKED_ENTRY_EXCLUDE)
920 filter->nr_includes = i;
921 filter->nr_excludes = filter->nevents - filter->nr_includes;
922 filter->includes = filter->events;
923 filter->excludes = filter->events + filter->nr_includes;
930 struct kvm_pmu_event_filter __user *user_filter = argp;
931 struct kvm_x86_pmu_event_filter *filter;
932 struct kvm_pmu_event_filter tmp;
933 struct kvm_vcpu *vcpu;
938 if (copy_from_user(&tmp, user_filter,
sizeof(tmp)))
941 if (tmp.action != KVM_PMU_EVENT_ALLOW &&
942 tmp.action != KVM_PMU_EVENT_DENY)
945 if (tmp.flags & ~KVM_PMU_EVENT_FLAGS_VALID_MASK)
951 size = struct_size(filter, events, tmp.nevents);
952 filter = kzalloc(size, GFP_KERNEL_ACCOUNT);
956 filter->action = tmp.action;
957 filter->nevents = tmp.nevents;
958 filter->fixed_counter_bitmap = tmp.fixed_counter_bitmap;
959 filter->flags = tmp.flags;
962 if (copy_from_user(filter->events, user_filter->events,
963 sizeof(filter->events[0]) * filter->nevents))
970 mutex_lock(&kvm->lock);
971 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
972 mutex_is_locked(&kvm->lock));
973 mutex_unlock(&kvm->lock);
974 synchronize_srcu_expedited(&kvm->srcu);
976 BUILD_BUG_ON(
sizeof(((
struct kvm_pmu *)0)->reprogram_pmi) >
977 sizeof(((
struct kvm_pmu *)0)->__reprogram_pmi));
979 kvm_for_each_vcpu(i, vcpu, kvm)
980 atomic64_set(&
vcpu_to_pmu(vcpu)->__reprogram_pmi, -1ull);
static bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, unsigned long cr0_bit)
static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, unsigned long cr4_bit)
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
static const struct x86_cpu_id vmx_pebs_pdir_cpu[]
static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
static void pmc_release_perf_event(struct kvm_pmc *pmc)
static void pmc_update_sample_period(struct kvm_pmc *pmc)
static void reprogram_counter(struct kvm_pmc *pmc)
void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
static bool is_filter_entry_match(u64 filter_event, u64 umask)
static int filter_sort_cmp(const void *pa, const void *pb)
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static bool pmc_pause_counter(struct kvm_pmc *pmc)
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
bool is_vmware_backdoor_pmc(u32 pmc_idx)
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
static struct kvm_pmu_ops kvm_pmu_ops __read_mostly
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config, bool exclude_user, bool exclude_kernel, bool intr)
EXPORT_SYMBOL_GPL(kvm_pmu_cap)
static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f, u64 eventsel)
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
struct x86_pmu_capability __read_mostly kvm_pmu_cap
static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel)
static bool cpl_is_matched(struct kvm_pmc *pmc)
static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
static const struct x86_cpu_id vmx_pebs_pdist_cpu[]
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
static int find_filter_index(u64 *events, u64 nevents, u64 key)
static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter, int idx)
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS
static int filter_cmp(const void *pa, const void *pb, u64 mask)
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter)
static void pmc_stop_counter(struct kvm_pmc *pmc)
static bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc, unsigned int perf_hw_id)
static int filter_event_cmp(const void *pa, const void *pb)
static void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
static bool pmc_resume_counter(struct kvm_pmc *pmc)
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
static void kvm_perf_overflow(struct perf_event *perf_event, struct perf_sample_data *data, struct pt_regs *regs)
static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
void kvm_pmu_init(struct kvm_vcpu *vcpu)
static bool check_pmu_event_filter(struct kvm_pmc *pmc)
static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter *filter)
static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
static bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, u64 data)
static bool pmc_is_fixed(struct kvm_pmc *pmc)
static bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
static bool pmc_speculative_in_use(struct kvm_pmc *pmc)
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME
static bool pmc_is_gp(struct kvm_pmc *pmc)
static u64 pmc_read_counter(struct kvm_pmc *pmc)
static u64 pmc_bitmask(struct kvm_pmc *pmc)
static void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
#define vcpu_to_pmu(vcpu)
#define VMWARE_BACKDOOR_PMC_HOST_TSC
static bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
#define VMWARE_BACKDOOR_PMC_REAL_TIME
static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
#define fixed_ctrl_field(ctrl_reg, idx)
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS)
static bool kvm_vcpu_has_run(struct kvm_vcpu *vcpu)