KVM
Macros | Typedefs | Functions | Variables
handle_exit.c File Reference
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/esr.h>
#include <asm/exception.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h>
#include <asm/debug-monitors.h>
#include <asm/stacktrace/nvhe.h>
#include <asm/traps.h>
#include <kvm/arm_hypercalls.h>
#include "trace_handle_exit.h"
Include dependency graph for handle_exit.c:

Go to the source code of this file.

Macros

#define CREATE_TRACE_POINTS
 

Typedefs

typedef int(* exit_handle_fn) (struct kvm_vcpu *)
 

Functions

static void kvm_handle_guest_serror (struct kvm_vcpu *vcpu, u64 esr)
 
static int handle_hvc (struct kvm_vcpu *vcpu)
 
static int handle_smc (struct kvm_vcpu *vcpu)
 
static int handle_no_fpsimd (struct kvm_vcpu *vcpu)
 
static int kvm_handle_wfx (struct kvm_vcpu *vcpu)
 
static int kvm_handle_guest_debug (struct kvm_vcpu *vcpu)
 
static int kvm_handle_unknown_ec (struct kvm_vcpu *vcpu)
 
static int handle_sve (struct kvm_vcpu *vcpu)
 
static int kvm_handle_ptrauth (struct kvm_vcpu *vcpu)
 
static int kvm_handle_eret (struct kvm_vcpu *vcpu)
 
static int handle_svc (struct kvm_vcpu *vcpu)
 
static exit_handle_fn kvm_get_exit_handler (struct kvm_vcpu *vcpu)
 
static int handle_trap_exceptions (struct kvm_vcpu *vcpu)
 
int handle_exit (struct kvm_vcpu *vcpu, int exception_index)
 
void handle_exit_early (struct kvm_vcpu *vcpu, int exception_index)
 
void __noreturn __cold nvhe_hyp_panic_handler (u64 esr, u64 spsr, u64 elr_virt, u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar)
 

Variables

static exit_handle_fn arm_exit_handlers []
 

Macro Definition Documentation

◆ CREATE_TRACE_POINTS

#define CREATE_TRACE_POINTS

Definition at line 26 of file handle_exit.c.

Typedef Documentation

◆ exit_handle_fn

typedef int(* exit_handle_fn) (struct kvm_vcpu *)

Definition at line 29 of file handle_exit.c.

Function Documentation

◆ handle_exit()

int handle_exit ( struct kvm_vcpu *  vcpu,
int  exception_index 
)

Definition at line 322 of file handle_exit.c.

323 {
324  struct kvm_run *run = vcpu->run;
325 
326  if (ARM_SERROR_PENDING(exception_index)) {
327  /*
328  * The SError is handled by handle_exit_early(). If the guest
329  * survives it will re-execute the original instruction.
330  */
331  return 1;
332  }
333 
334  exception_index = ARM_EXCEPTION_CODE(exception_index);
335 
336  switch (exception_index) {
337  case ARM_EXCEPTION_IRQ:
338  return 1;
339  case ARM_EXCEPTION_EL1_SERROR:
340  return 1;
341  case ARM_EXCEPTION_TRAP:
342  return handle_trap_exceptions(vcpu);
343  case ARM_EXCEPTION_HYP_GONE:
344  /*
345  * EL2 has been reset to the hyp-stub. This happens when a guest
346  * is pre-emptied by kvm_reboot()'s shutdown call.
347  */
348  run->exit_reason = KVM_EXIT_FAIL_ENTRY;
349  return 0;
350  case ARM_EXCEPTION_IL:
351  /*
352  * We attempted an illegal exception return. Guest state must
353  * have been corrupted somehow. Give up.
354  */
355  run->exit_reason = KVM_EXIT_FAIL_ENTRY;
356  return -EINVAL;
357  default:
358  kvm_pr_unimpl("Unsupported exception type: %d",
359  exception_index);
360  run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
361  return 0;
362  }
363 }
static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:297
kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", &(struct va_format){ fmt, &va }, p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" :"read")
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_exit_early()

void handle_exit_early ( struct kvm_vcpu *  vcpu,
int  exception_index 
)

Definition at line 366 of file handle_exit.c.

367 {
368  if (ARM_SERROR_PENDING(exception_index)) {
369  if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
370  u64 disr = kvm_vcpu_get_disr(vcpu);
371 
372  kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
373  } else {
374  kvm_inject_vabt(vcpu);
375  }
376 
377  return;
378  }
379 
380  exception_index = ARM_EXCEPTION_CODE(exception_index);
381 
382  if (exception_index == ARM_EXCEPTION_EL1_SERROR)
383  kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
384 }
static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
Definition: handle_exit.c:31
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
Definition: inject_fault.c:251
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_hvc()

static int handle_hvc ( struct kvm_vcpu *  vcpu)
static

Definition at line 37 of file handle_exit.c.

38 {
39  trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
40  kvm_vcpu_hvc_get_imm(vcpu));
41  vcpu->stat.hvc_exit_stat++;
42 
43  /* Forward hvc instructions to the virtual EL2 if the guest has EL2. */
44  if (vcpu_has_nv(vcpu)) {
45  if (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_HCD)
47  else
48  kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
49 
50  return 1;
51  }
52 
53  return kvm_smccc_call_handler(vcpu);
54 }
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2)
int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
Definition: hypercalls.c:263
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
Definition: inject_fault.c:225
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
Definition: sys_regs.c:128
Here is the call graph for this function:

◆ handle_no_fpsimd()

static int handle_no_fpsimd ( struct kvm_vcpu *  vcpu)
static

Definition at line 93 of file handle_exit.c.

94 {
96  return 1;
97 }
Here is the call graph for this function:

◆ handle_smc()

static int handle_smc ( struct kvm_vcpu *  vcpu)
static

Definition at line 56 of file handle_exit.c.

57 {
58  /*
59  * "If an SMC instruction executed at Non-secure EL1 is
60  * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
61  * Trap exception, not a Secure Monitor Call exception [...]"
62  *
63  * We need to advance the PC after the trap, as it would
64  * otherwise return to the same address. Furthermore, pre-incrementing
65  * the PC before potentially exiting to userspace maintains the same
66  * abstraction for both SMCs and HVCs.
67  */
68  kvm_incr_pc(vcpu);
69 
70  /*
71  * SMCs with a nonzero immediate are reserved according to DEN0028E 2.9
72  * "SMC and HVC immediate value".
73  */
74  if (kvm_vcpu_hvc_get_imm(vcpu)) {
75  vcpu_set_reg(vcpu, 0, ~0UL);
76  return 1;
77  }
78 
79  /*
80  * If imm is zero then it is likely an SMCCC call.
81  *
82  * Note that on ARMv8.3, even if EL3 is not implemented, SMC executed
83  * at Non-secure EL1 is trapped to EL2 if HCR_EL2.TSC==1, rather than
84  * being treated as UNDEFINED.
85  */
86  return kvm_smccc_call_handler(vcpu);
87 }
Here is the call graph for this function:

◆ handle_svc()

static int handle_svc ( struct kvm_vcpu *  vcpu)
static

Definition at line 244 of file handle_exit.c.

245 {
246  /*
247  * So far, SVC traps only for NV via HFGITR_EL2. A SVC from a
248  * 32bit guest would be caught by vpcu_mode_is_bad_32bit(), so
249  * we should only have to deal with a 64 bit exception.
250  */
251  kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
252  return 1;
253 }
Here is the call graph for this function:

◆ handle_sve()

static int handle_sve ( struct kvm_vcpu *  vcpu)
static

Definition at line 203 of file handle_exit.c.

204 {
205  kvm_inject_undefined(vcpu);
206  return 1;
207 }
Here is the call graph for this function:

◆ handle_trap_exceptions()

static int handle_trap_exceptions ( struct kvm_vcpu *  vcpu)
static

Definition at line 297 of file handle_exit.c.

298 {
299  int handled;
300 
301  /*
302  * See ARM ARM B1.14.1: "Hyp traps on instructions
303  * that fail their condition code check"
304  */
305  if (!kvm_condition_valid(vcpu)) {
306  kvm_incr_pc(vcpu);
307  handled = 1;
308  } else {
309  exit_handle_fn exit_handler;
310 
311  exit_handler = kvm_get_exit_handler(vcpu);
312  handled = exit_handler(vcpu);
313  }
314 
315  return handled;
316 }
int(* exit_handle_fn)(struct kvm_vcpu *)
Definition: handle_exit.c:29
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:283
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_get_exit_handler()

static exit_handle_fn kvm_get_exit_handler ( struct kvm_vcpu *  vcpu)
static

Definition at line 283 of file handle_exit.c.

284 {
285  u64 esr = kvm_vcpu_get_esr(vcpu);
286  u8 esr_ec = ESR_ELx_EC(esr);
287 
288  return arm_exit_handlers[esr_ec];
289 }
static exit_handle_fn arm_exit_handlers[]
Definition: handle_exit.c:255
Here is the caller graph for this function:

◆ kvm_handle_eret()

static int kvm_handle_eret ( struct kvm_vcpu *  vcpu)
static

Definition at line 220 of file handle_exit.c.

221 {
222  if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET)
223  return kvm_handle_ptrauth(vcpu);
224 
225  /*
226  * If we got here, two possibilities:
227  *
228  * - the guest is in EL2, and we need to fully emulate ERET
229  *
230  * - the guest is in EL1, and we need to reinject the
231  * exception into the L1 hypervisor.
232  *
233  * If KVM ever traps ERET for its own use, we'll have to
234  * revisit this.
235  */
236  if (is_hyp_ctxt(vcpu))
238  else
239  kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
240 
241  return 1;
242 }
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu)
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:214
Here is the call graph for this function:

◆ kvm_handle_guest_debug()

static int kvm_handle_guest_debug ( struct kvm_vcpu *  vcpu)
static

kvm_handle_guest_debug - handle a debug exception instruction

@vcpu: the vcpu pointer

We route all debug exceptions through the same handler. If both the guest and host are using the same debug facilities it will be up to userspace to re-inject the correct exception for guest delivery.

Returns
: 0 (while setting vcpu->run->exit_reason)

Definition at line 166 of file handle_exit.c.

167 {
168  struct kvm_run *run = vcpu->run;
169  u64 esr = kvm_vcpu_get_esr(vcpu);
170 
171  run->exit_reason = KVM_EXIT_DEBUG;
172  run->debug.arch.hsr = lower_32_bits(esr);
173  run->debug.arch.hsr_high = upper_32_bits(esr);
174  run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
175 
176  switch (ESR_ELx_EC(esr)) {
177  case ESR_ELx_EC_WATCHPT_LOW:
178  run->debug.arch.far = vcpu->arch.fault.far_el2;
179  break;
180  case ESR_ELx_EC_SOFTSTP_LOW:
181  vcpu_clear_flag(vcpu, DBG_SS_ACTIVE_PENDING);
182  break;
183  }
184 
185  return 0;
186 }

◆ kvm_handle_guest_serror()

static void kvm_handle_guest_serror ( struct kvm_vcpu *  vcpu,
u64  esr 
)
static

Definition at line 31 of file handle_exit.c.

32 {
33  if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
34  kvm_inject_vabt(vcpu);
35 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_handle_ptrauth()

static int kvm_handle_ptrauth ( struct kvm_vcpu *  vcpu)
static

Definition at line 214 of file handle_exit.c.

215 {
216  kvm_inject_undefined(vcpu);
217  return 1;
218 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_handle_unknown_ec()

static int kvm_handle_unknown_ec ( struct kvm_vcpu *  vcpu)
static

Definition at line 188 of file handle_exit.c.

189 {
190  u64 esr = kvm_vcpu_get_esr(vcpu);
191 
192  kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
193  esr, esr_get_class_string(esr));
194 
195  kvm_inject_undefined(vcpu);
196  return 1;
197 }
Here is the call graph for this function:

◆ kvm_handle_wfx()

static int kvm_handle_wfx ( struct kvm_vcpu *  vcpu)
static

kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event instruction executed by a guest

@vcpu: the vcpu pointer

WFE[T]: Yield the CPU and come back to this vcpu when the scheduler decides to. WFI: Simply call kvm_vcpu_halt(), which will halt execution of world-switches and schedule other host processes until there is an incoming IRQ or FIQ to the VM. WFIT: Same as WFI, with a timed wakeup implemented as a background timer

WF{I,E}T can immediately return if the deadline has already expired.

Definition at line 114 of file handle_exit.c.

115 {
116  u64 esr = kvm_vcpu_get_esr(vcpu);
117 
118  if (esr & ESR_ELx_WFx_ISS_WFE) {
119  trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
120  vcpu->stat.wfe_exit_stat++;
121  } else {
122  trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
123  vcpu->stat.wfi_exit_stat++;
124  }
125 
126  if (esr & ESR_ELx_WFx_ISS_WFxT) {
127  if (esr & ESR_ELx_WFx_ISS_RV) {
128  u64 val, now;
129 
130  now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
131  val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
132 
133  if (now >= val)
134  goto out;
135  } else {
136  /* Treat WFxT as WFx if RN is invalid */
137  esr &= ~ESR_ELx_WFx_ISS_WFxT;
138  }
139  }
140 
141  if (esr & ESR_ELx_WFx_ISS_WFE) {
142  kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
143  } else {
144  if (esr & ESR_ELx_WFx_ISS_WFxT)
145  vcpu_set_flag(vcpu, IN_WFIT);
146 
147  kvm_vcpu_wfi(vcpu);
148  }
149 out:
150  kvm_incr_pc(vcpu);
151 
152  return 1;
153 }
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
Definition: arch_timer.c:1107
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
Definition: arm.c:769
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
Definition: kvm_main.c:4056
Here is the call graph for this function:

◆ nvhe_hyp_panic_handler()

void __noreturn __cold nvhe_hyp_panic_handler ( u64  esr,
u64  spsr,
u64  elr_virt,
u64  elr_phys,
u64  par,
uintptr_t  vcpu,
u64  far,
u64  hpfar 
)

Definition at line 386 of file handle_exit.c.

389  {
390  u64 elr_in_kimg = __phys_to_kimg(elr_phys);
391  u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
392  u64 mode = spsr & PSR_MODE_MASK;
393  u64 panic_addr = elr_virt + hyp_offset;
394 
395  if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
396  kvm_err("Invalid host exception to nVHE hyp!\n");
397  } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
398  (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
399  const char *file = NULL;
400  unsigned int line = 0;
401 
402  /* All hyp bugs, including warnings, are treated as fatal. */
403  if (!is_protected_kvm_enabled() ||
404  IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
405  struct bug_entry *bug = find_bug(elr_in_kimg);
406 
407  if (bug)
408  bug_get_file_line(bug, &file, &line);
409  }
410 
411  if (file)
412  kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
413  else
414  kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
415  (void *)(panic_addr + kaslr_offset()));
416  } else {
417  kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
418  (void *)(panic_addr + kaslr_offset()));
419  }
420 
421  /* Dump the nVHE hypervisor backtrace */
422  kvm_nvhe_dump_backtrace(hyp_offset);
423 
424  /*
425  * Hyp has panicked and we're going to handle that by panicking the
426  * kernel. The kernel offset will be revealed in the panic so we're
427  * also safe to reveal the hyp offset as a debugging aid for translating
428  * hyp VAs to vmlinux addresses.
429  */
430  kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
431 
432  panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
433  spsr, elr_virt, esr, far, hpfar, par, vcpu);
434 }
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
Definition: stacktrace.c:239
Here is the call graph for this function:

Variable Documentation

◆ arm_exit_handlers

exit_handle_fn arm_exit_handlers[]
static
Initial value:
= {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
[ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
[ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
[ESR_ELx_EC_CP10_ID] = kvm_handle_cp10_id,
[ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
[ESR_ELx_EC_HVC32] = handle_hvc,
[ESR_ELx_EC_SMC32] = handle_smc,
[ESR_ELx_EC_HVC64] = handle_hvc,
[ESR_ELx_EC_SMC64] = handle_smc,
[ESR_ELx_EC_SVC64] = handle_svc,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_SVE] = handle_sve,
[ESR_ELx_EC_ERET] = kvm_handle_eret,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
[ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
[ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
}
static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:114
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:188
static int handle_sve(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:203
static int handle_hvc(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:37
static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:93
static int handle_smc(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:56
static int kvm_handle_eret(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:220
static int handle_svc(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:244
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
Definition: handle_exit.c:166
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
Definition: mmu.c:1619
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
Definition: sys_regs.c:3264
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
Definition: sys_regs.c:3473
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
Definition: sys_regs.c:3378
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
Definition: sys_regs.c:3373
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
Definition: sys_regs.c:3073
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
Definition: sys_regs.c:3355
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
Definition: sys_regs.c:3350

Definition at line 255 of file handle_exit.c.