2 #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define _TRACE_KVMMMU_H
5 #include <linux/tracepoint.h>
6 #include <linux/trace_events.h>
9 #define TRACE_SYSTEM kvmmmu
11 #define KVM_MMU_PAGE_FIELDS \
12 __field(__u8, mmu_valid_gen) \
14 __field(__u32, role) \
15 __field(__u32, root_count) \
18 #define KVM_MMU_PAGE_ASSIGN(sp) \
19 __entry->mmu_valid_gen = sp->mmu_valid_gen; \
20 __entry->gfn = sp->gfn; \
21 __entry->role = sp->role.word; \
22 __entry->root_count = sp->root_count; \
23 __entry->unsync = sp->unsync;
25 #define KVM_MMU_PAGE_PRINTK() ({ \
26 const char *saved_ptr = trace_seq_buffer_ptr(p); \
27 static const char *access_str[] = { \
28 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
30 union kvm_mmu_page_role role; \
32 role.word = __entry->role; \
34 trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
35 " %snxe %sad root %u %s%c", \
36 __entry->mmu_valid_gen, \
37 __entry->gfn, role.level, \
38 role.has_4_byte_gpte ? 4 : 8, \
40 role.direct ? " direct" : "", \
41 access_str[role.access], \
42 role.invalid ? " invalid" : "", \
43 role.efer_nx ? "" : "!", \
44 role.ad_disabled ? "!" : "", \
45 __entry->root_count, \
46 __entry->unsync ? "unsync" : "sync", 0); \
50 #define kvm_mmu_trace_pferr_flags \
51 { PFERR_PRESENT_MASK, "P" }, \
52 { PFERR_WRITE_MASK, "W" }, \
53 { PFERR_USER_MASK, "U" }, \
54 { PFERR_RSVD_MASK, "RSVD" }, \
55 { PFERR_FETCH_MASK, "F" }
68 kvm_mmu_pagetable_walk,
69 TP_PROTO(u64 addr, u32 pferr),
79 __entry->pferr = pferr;
82 TP_printk(
"addr %llx pferr %x %s", __entry->addr, __entry->pferr,
89 kvm_mmu_paging_element,
90 TP_PROTO(u64 pte,
int level),
100 __entry->level = level;
103 TP_printk(
"pte %llx level %u", __entry->pte, __entry->level)
108 TP_PROTO(
unsigned long table_gfn,
unsigned index,
unsigned size),
110 TP_ARGS(table_gfn, index, size),
117 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
121 TP_printk(
"gpa %llx", __entry->gpa)
127 TP_PROTO(
unsigned long table_gfn,
unsigned index,
unsigned size),
129 TP_ARGS(table_gfn, index, size)
135 TP_PROTO(
unsigned long table_gfn,
unsigned index,
unsigned size),
137 TP_ARGS(table_gfn, index, size)
141 kvm_mmu_walker_error,
146 __field(__u32, pferr)
150 __entry->pferr = pferr;
153 TP_printk(
"pferr %x %s", __entry->pferr,
160 TP_ARGS(sp, created),
164 __field(
bool, created)
169 __entry->created = created;
173 __entry->created ?
"new" :
"existing")
212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
213 TP_ARGS(sptep, gfn, spte),
216 __field(
void *, sptep)
218 __field(
unsigned, access)
219 __field(
unsigned int, gen)
223 __entry->sptep = sptep;
225 __entry->access = spte &
ACC_ALL;
229 TP_printk(
"sptep:%p gfn %llx access %x gen %x", __entry->sptep,
230 __entry->gfn, __entry->access, __entry->gen)
235 TP_PROTO(u64 addr, gfn_t gfn,
unsigned access),
236 TP_ARGS(addr, gfn, access),
241 __field(
unsigned, access)
245 __entry->addr = addr;
247 __entry->access = access;
250 TP_printk(
"addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
257 u64 *sptep, u64 old_spte,
int ret),
258 TP_ARGS(vcpu, fault, sptep, old_spte, ret),
261 __field(
int, vcpu_id)
262 __field(gpa_t, cr2_or_gpa)
263 __field(u32, error_code)
264 __field(u64 *, sptep)
265 __field(u64, old_spte)
266 __field(u64, new_spte)
271 __entry->vcpu_id = vcpu->vcpu_id;
272 __entry->cr2_or_gpa = fault->
addr;
274 __entry->sptep = sptep;
275 __entry->old_spte = old_spte;
276 __entry->new_spte = *sptep;
280 TP_printk(
"vcpu %d gva %llx error_code %s sptep %p old %#llx"
281 " new %llx spurious %d fixed %d", __entry->vcpu_id,
282 __entry->cr2_or_gpa, __print_flags(__entry->error_code,
"|",
284 __entry->old_spte, __entry->new_spte,
291 TP_PROTO(
struct kvm *kvm),
295 __field(__u8, mmu_valid_gen)
296 __field(
unsigned int, mmu_used_pages)
300 __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
301 __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
304 TP_printk(
"kvm-mmu-valid-gen %u used_pages %x",
305 __entry->mmu_valid_gen, __entry->mmu_used_pages
312 TP_PROTO(u64 spte,
unsigned int kvm_gen,
unsigned int spte_gen),
313 TP_ARGS(spte, kvm_gen, spte_gen),
316 __field(
unsigned int, kvm_gen)
317 __field(
unsigned int, spte_gen)
322 __entry->kvm_gen = kvm_gen;
323 __entry->spte_gen = spte_gen;
324 __entry->spte = spte;
327 TP_printk(
"spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
328 __entry->kvm_gen, __entry->spte_gen,
329 __entry->kvm_gen == __entry->spte_gen
335 TP_PROTO(
int level, gfn_t gfn, u64 *sptep),
336 TP_ARGS(level, gfn, sptep),
346 __field(
signed char, u)
351 __entry->spte = *sptep;
352 __entry->sptep = virt_to_phys(sptep);
353 __entry->level = level;
359 TP_printk(
"gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
360 __entry->gfn, __entry->spte,
361 __entry->r ?
"r" :
"-",
363 __entry->x ?
"x" :
"-",
364 __entry->u == -1 ?
"" : (__entry->u ?
"u" :
"-"),
365 __entry->level, __entry->sptep
370 kvm_mmu_spte_requested,
381 __entry->gfn = fault->
gfn;
382 __entry->pfn = fault->
pfn | (fault->
gfn & (KVM_PAGES_PER_HPAGE(fault->
goal_level) - 1));
386 TP_printk(
"gfn %llx pfn %llx level %d",
387 __entry->gfn, __entry->pfn, __entry->level
392 kvm_tdp_mmu_spte_changed,
393 TP_PROTO(
int as_id, gfn_t gfn,
int level, u64 old_spte, u64 new_spte),
394 TP_ARGS(as_id, gfn, level, old_spte, new_spte),
398 __field(u64, old_spte)
399 __field(u64, new_spte)
408 __entry->old_spte = old_spte;
409 __entry->new_spte = new_spte;
410 __entry->level = level;
411 __entry->as_id = as_id;
414 TP_printk(
"as id %d gfn %llx level %d old_spte %llx new_spte %llx",
415 __entry->as_id, __entry->gfn, __entry->level,
416 __entry->old_spte, __entry->new_spte
421 kvm_mmu_split_huge_page,
422 TP_PROTO(u64 gfn, u64 spte,
int level,
int errno),
423 TP_ARGS(gfn, spte, level, errno),
434 __entry->spte = spte;
435 __entry->level = level;
436 __entry->errno = errno;
439 TP_printk(
"gfn %llx spte %llx level %d errno %d",
440 __entry->gfn, __entry->spte, __entry->level, __entry->errno)
445 #undef TRACE_INCLUDE_PATH
446 #define TRACE_INCLUDE_PATH mmu
447 #undef TRACE_INCLUDE_FILE
448 #define TRACE_INCLUDE_FILE mmutrace
451 #include <trace/define_trace.h>
static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list)
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, unsigned int access)
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
#define KVM_MMU_PAGE_ASSIGN(sp)
#define KVM_MMU_PAGE_PRINTK()
#define KVM_MMU_PAGE_FIELDS
DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit, TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),)
DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class, TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), TP_ARGS(table_gfn, index, size), TP_STRUCT__entry(__field(__u64, gpa)), TP_fast_assign(__entry->gpa=((u64) table_gfn<< PAGE_SHIFT)+index *size;), TP_printk("gpa %llx", __entry->gpa))
TRACE_EVENT(kvm_mmu_pagetable_walk, TP_PROTO(u64 addr, u32 pferr), TP_ARGS(addr, pferr), TP_STRUCT__entry(__field(__u64, addr) __field(__u32, pferr)), TP_fast_assign(__entry->addr=addr;__entry->pferr=pferr;), TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr, __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags)))
TRACE_DEFINE_ENUM(RET_PF_CONTINUE)
#define kvm_mmu_trace_pferr_flags
u64 __read_mostly shadow_user_mask
u64 __read_mostly shadow_present_mask
static u64 get_mmio_spte_generation(u64 spte)
static bool is_executable_pte(u64 spte)