KVM
stacktrace.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM nVHE hypervisor stack tracing support.
4  *
5  * Copyright (C) 2022 Google LLC
6  */
7 #include <asm/kvm_asm.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/memory.h>
10 #include <asm/percpu.h>
11 
12 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
13  __aligned(16);
14 
15 DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
16 
17 /*
18  * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
19  *
20  * @fp : frame pointer at which to start the unwinding.
21  * @pc : program counter at which to start the unwinding.
22  *
23  * Save the information needed by the host to unwind the non-protected
24  * nVHE hypervisor stack in EL1.
25  */
26 static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
27 {
28  struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
29  struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
30 
31  stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
32  stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
33  stacktrace_info->fp = fp;
34  stacktrace_info->pc = pc;
35 }
36 
37 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
38 #include <asm/stacktrace/nvhe.h>
39 
40 DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
41 
42 static struct stack_info stackinfo_get_overflow(void)
43 {
44  unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
45  unsigned long high = low + OVERFLOW_STACK_SIZE;
46 
47  return (struct stack_info) {
48  .low = low,
49  .high = high,
50  };
51 }
52 
53 static struct stack_info stackinfo_get_hyp(void)
54 {
55  struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
56  unsigned long high = params->stack_hyp_va;
57  unsigned long low = high - PAGE_SIZE;
58 
59  return (struct stack_info) {
60  .low = low,
61  .high = high,
62  };
63 }
64 
65 static int unwind_next(struct unwind_state *state)
66 {
67  return unwind_next_frame_record(state);
68 }
69 
70 static void notrace unwind(struct unwind_state *state,
71  stack_trace_consume_fn consume_entry,
72  void *cookie)
73 {
74  while (1) {
75  int ret;
76 
77  if (!consume_entry(cookie, state->pc))
78  break;
79  ret = unwind_next(state);
80  if (ret < 0)
81  break;
82  }
83 }
84 
85 /*
86  * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
87  *
88  * @arg : index of the entry in the stacktrace buffer
89  * @where : the program counter corresponding to the stack frame
90  *
91  * Save the return address of a stack frame to the shared stacktrace buffer.
92  * The host can access this shared buffer from EL1 to dump the backtrace.
93  */
94 static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
95 {
96  unsigned long *stacktrace = this_cpu_ptr(pkvm_stacktrace);
97  int *idx = (int *)arg;
98 
99  /*
100  * Need 2 free slots: 1 for current entry and 1 for the
101  * delimiter.
102  */
103  if (*idx > ARRAY_SIZE(pkvm_stacktrace) - 2)
104  return false;
105 
106  stacktrace[*idx] = where;
107  stacktrace[++*idx] = 0UL;
108 
109  return true;
110 }
111 
112 /*
113  * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
114  *
115  * @fp : frame pointer at which to start the unwinding.
116  * @pc : program counter at which to start the unwinding.
117  *
118  * Save the unwinded stack addresses to the shared stacktrace buffer.
119  * The host can access this shared buffer from EL1 to dump the backtrace.
120  */
121 static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
122 {
123  struct stack_info stacks[] = {
126  };
127  struct unwind_state state = {
128  .stacks = stacks,
129  .nr_stacks = ARRAY_SIZE(stacks),
130  };
131  int idx = 0;
132 
133  kvm_nvhe_unwind_init(&state, fp, pc);
134 
135  unwind(&state, pkvm_save_backtrace_entry, &idx);
136 }
137 #else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
138 static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
139 {
140 }
141 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
142 
143 /*
144  * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
145  *
146  * @fp : frame pointer at which to start the unwinding.
147  * @pc : program counter at which to start the unwinding.
148  *
149  * Saves the information needed by the host to dump the nVHE hypervisor
150  * backtrace.
151  */
152 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
153 {
154  if (is_protected_kvm_enabled())
155  pkvm_save_backtrace(fp, pc);
156  else
157  hyp_prepare_backtrace(fp, pc);
158 }
DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info)
static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
Definition: stacktrace.c:138
void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
Definition: stacktrace.c:152
static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
Definition: stacktrace.c:26
__aligned(16)
static struct stack_info stackinfo_get_overflow(void)
Definition: stacktrace.c:24
static void unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, void *cookie)
Definition: stacktrace.c:126
static struct stack_info stackinfo_get_hyp(void)
Definition: stacktrace.c:48
static int unwind_next(struct unwind_state *state)
Definition: stacktrace.c:114