19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
22 #include <asm/stacktrace/nvhe.h>
26 struct kvm_nvhe_stacktrace_info *stacktrace_info
27 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
28 unsigned long low = (
unsigned long)stacktrace_info->overflow_stack_base;
29 unsigned long high = low + OVERFLOW_STACK_SIZE;
31 return (
struct stack_info) {
39 unsigned long low = (
unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
40 unsigned long high = low + OVERFLOW_STACK_SIZE;
42 return (
struct stack_info) {
50 struct kvm_nvhe_stacktrace_info *stacktrace_info
51 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
52 unsigned long low = (
unsigned long)stacktrace_info->stack_base;
53 unsigned long high = low + PAGE_SIZE;
55 return (
struct stack_info) {
63 unsigned long low = (
unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
64 unsigned long high = low + PAGE_SIZE;
66 return (
struct stack_info) {
87 struct stack_info stack_hyp, stack_kern;
91 if (stackinfo_on_stack(&stack_hyp, *addr,
size))
96 if (stackinfo_on_stack(&stack_hyp, *addr,
size))
102 *addr = *addr - stack_hyp.low + stack_kern.low;
123 return unwind_next_frame_record(state);
126 static void unwind(
struct unwind_state *state,
127 stack_trace_consume_fn consume_entry,
void *cookie)
132 if (!consume_entry(cookie, state->pc))
148 unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
149 unsigned long hyp_offset = (
unsigned long)arg;
152 where = (where &
va_mask) + hyp_offset;
153 kvm_err(
" [<%016lx>] %pB\n", where, (
void *)(where + kaslr_offset()));
160 kvm_err(
"nVHE call trace:\n");
165 kvm_err(
"---[ end nVHE call trace ]---\n");
180 struct kvm_nvhe_stacktrace_info *stacktrace_info;
181 struct stack_info stacks[] = {
185 struct unwind_state state = {
187 .nr_stacks = ARRAY_SIZE(stacks),
190 stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
192 kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
199 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
215 unsigned long *stacktrace
216 = (
unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
222 i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
230 kvm_err(
"Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
241 if (is_protected_kvm_enabled())
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params)
static struct stack_info stackinfo_get_overflow(void)
static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size)
static struct stack_info stackinfo_get_hyp_kern_va(void)
static void hyp_dump_backtrace(unsigned long hyp_offset)
static void unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, void *cookie)
static struct stack_info stackinfo_get_hyp(void)
static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
static void kvm_nvhe_dump_backtrace_start(void)
static int unwind_next(struct unwind_state *state)
static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr)
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
static void pkvm_dump_backtrace(unsigned long hyp_offset)
static void kvm_nvhe_dump_backtrace_end(void)
static struct stack_info stackinfo_get_overflow_kern_va(void)