KVM
exception.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Fault injection for both 32 and 64bit guests.
4  *
5  * Copyright (C) 2012,2013 - ARM Ltd
6  * Author: Marc Zyngier <marc.zyngier@arm.com>
7  *
8  * Based on arch/arm/kvm/emulate.c
9  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
10  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11  */
12 
13 #include <hyp/adjust_pc.h>
14 #include <linux/kvm_host.h>
15 #include <asm/kvm_emulate.h>
16 #include <asm/kvm_mmu.h>
17 #include <asm/kvm_nested.h>
18 
19 #if !defined (__KVM_NVHE_HYPERVISOR__) && !defined (__KVM_VHE_HYPERVISOR__)
20 #error Hypervisor code only!
21 #endif
22 
23 static inline u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
24 {
25  u64 val;
26 
27  if (unlikely(vcpu_has_nv(vcpu)))
28  return vcpu_read_sys_reg(vcpu, reg);
29  else if (__vcpu_read_sys_reg_from_cpu(reg, &val))
30  return val;
31 
32  return __vcpu_sys_reg(vcpu, reg);
33 }
34 
35 static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
36 {
37  if (unlikely(vcpu_has_nv(vcpu)))
38  vcpu_write_sys_reg(vcpu, val, reg);
39  else if (!__vcpu_write_sys_reg_to_cpu(val, reg))
40  __vcpu_sys_reg(vcpu, reg) = val;
41 }
42 
43 static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode,
44  u64 val)
45 {
46  if (unlikely(vcpu_has_nv(vcpu))) {
47  if (target_mode == PSR_MODE_EL1h)
48  vcpu_write_sys_reg(vcpu, val, SPSR_EL1);
49  else
50  vcpu_write_sys_reg(vcpu, val, SPSR_EL2);
51  } else if (has_vhe()) {
52  write_sysreg_el1(val, SYS_SPSR);
53  } else {
54  __vcpu_sys_reg(vcpu, SPSR_EL1) = val;
55  }
56 }
57 
58 static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
59 {
60  if (has_vhe())
61  write_sysreg(val, spsr_abt);
62  else
63  vcpu->arch.ctxt.spsr_abt = val;
64 }
65 
66 static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
67 {
68  if (has_vhe())
69  write_sysreg(val, spsr_und);
70  else
71  vcpu->arch.ctxt.spsr_und = val;
72 }
73 
74 /*
75  * This performs the exception entry at a given EL (@target_mode), stashing PC
76  * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
77  * The EL passed to this function *must* be a non-secure, privileged mode with
78  * bit 0 being set (PSTATE.SP == 1).
79  *
80  * When an exception is taken, most PSTATE fields are left unchanged in the
81  * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
82  * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
83  * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
84  *
85  * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
86  * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
87  *
88  * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
89  * MSB to LSB.
90  */
91 static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
92  enum exception_type type)
93 {
94  unsigned long sctlr, vbar, old, new, mode;
95  u64 exc_offset;
96 
97  mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
98 
99  if (mode == target_mode)
100  exc_offset = CURRENT_EL_SP_ELx_VECTOR;
101  else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
102  exc_offset = CURRENT_EL_SP_EL0_VECTOR;
103  else if (!(mode & PSR_MODE32_BIT))
104  exc_offset = LOWER_EL_AArch64_VECTOR;
105  else
106  exc_offset = LOWER_EL_AArch32_VECTOR;
107 
108  switch (target_mode) {
109  case PSR_MODE_EL1h:
110  vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL1);
111  sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
112  __vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
113  break;
114  case PSR_MODE_EL2h:
115  vbar = __vcpu_read_sys_reg(vcpu, VBAR_EL2);
116  sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL2);
117  __vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL2);
118  break;
119  default:
120  /* Don't do that */
121  BUG();
122  }
123 
124  *vcpu_pc(vcpu) = vbar + exc_offset + type;
125 
126  old = *vcpu_cpsr(vcpu);
127  new = 0;
128 
129  new |= (old & PSR_N_BIT);
130  new |= (old & PSR_Z_BIT);
131  new |= (old & PSR_C_BIT);
132  new |= (old & PSR_V_BIT);
133 
134  if (kvm_has_mte(kern_hyp_va(vcpu->kvm)))
135  new |= PSR_TCO_BIT;
136 
137  new |= (old & PSR_DIT_BIT);
138 
139  // PSTATE.UAO is set to zero upon any exception to AArch64
140  // See ARM DDI 0487E.a, page D5-2579.
141 
142  // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
143  // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
144  // See ARM DDI 0487E.a, page D5-2578.
145  new |= (old & PSR_PAN_BIT);
146  if (!(sctlr & SCTLR_EL1_SPAN))
147  new |= PSR_PAN_BIT;
148 
149  // PSTATE.SS is set to zero upon any exception to AArch64
150  // See ARM DDI 0487E.a, page D2-2452.
151 
152  // PSTATE.IL is set to zero upon any exception to AArch64
153  // See ARM DDI 0487E.a, page D1-2306.
154 
155  // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
156  // See ARM DDI 0487E.a, page D13-3258
157  if (sctlr & SCTLR_ELx_DSSBS)
158  new |= PSR_SSBS_BIT;
159 
160  // PSTATE.BTYPE is set to zero upon any exception to AArch64
161  // See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
162 
163  new |= PSR_D_BIT;
164  new |= PSR_A_BIT;
165  new |= PSR_I_BIT;
166  new |= PSR_F_BIT;
167 
168  new |= target_mode;
169 
170  *vcpu_cpsr(vcpu) = new;
171  __vcpu_write_spsr(vcpu, target_mode, old);
172 }
173 
174 /*
175  * When an exception is taken, most CPSR fields are left unchanged in the
176  * handler. However, some are explicitly overridden (e.g. M[4:0]).
177  *
178  * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
179  * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
180  * obsoleted by the ARMv7 virtualization extensions and is RES0.
181  *
182  * For the SPSR layout seen from AArch32, see:
183  * - ARM DDI 0406C.d, page B1-1148
184  * - ARM DDI 0487E.a, page G8-6264
185  *
186  * For the SPSR_ELx layout for AArch32 seen from AArch64, see:
187  * - ARM DDI 0487E.a, page C5-426
188  *
189  * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
190  * MSB to LSB.
191  */
192 static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
193 {
194  u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
195  unsigned long old, new;
196 
197  old = *vcpu_cpsr(vcpu);
198  new = 0;
199 
200  new |= (old & PSR_AA32_N_BIT);
201  new |= (old & PSR_AA32_Z_BIT);
202  new |= (old & PSR_AA32_C_BIT);
203  new |= (old & PSR_AA32_V_BIT);
204  new |= (old & PSR_AA32_Q_BIT);
205 
206  // CPSR.IT[7:0] are set to zero upon any exception
207  // See ARM DDI 0487E.a, section G1.12.3
208  // See ARM DDI 0406C.d, section B1.8.3
209 
210  new |= (old & PSR_AA32_DIT_BIT);
211 
212  // CPSR.SSBS is set to SCTLR.DSSBS upon any exception
213  // See ARM DDI 0487E.a, page G8-6244
214  if (sctlr & BIT(31))
215  new |= PSR_AA32_SSBS_BIT;
216 
217  // CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
218  // SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
219  // See ARM DDI 0487E.a, page G8-6246
220  new |= (old & PSR_AA32_PAN_BIT);
221  if (!(sctlr & BIT(23)))
222  new |= PSR_AA32_PAN_BIT;
223 
224  // SS does not exist in AArch32, so ignore
225 
226  // CPSR.IL is set to zero upon any exception
227  // See ARM DDI 0487E.a, page G1-5527
228 
229  new |= (old & PSR_AA32_GE_MASK);
230 
231  // CPSR.IT[7:0] are set to zero upon any exception
232  // See prior comment above
233 
234  // CPSR.E is set to SCTLR.EE upon any exception
235  // See ARM DDI 0487E.a, page G8-6245
236  // See ARM DDI 0406C.d, page B4-1701
237  if (sctlr & BIT(25))
238  new |= PSR_AA32_E_BIT;
239 
240  // CPSR.A is unchanged upon an exception to Undefined, Supervisor
241  // CPSR.A is set upon an exception to other modes
242  // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
243  // See ARM DDI 0406C.d, page B1-1182
244  new |= (old & PSR_AA32_A_BIT);
245  if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
246  new |= PSR_AA32_A_BIT;
247 
248  // CPSR.I is set upon any exception
249  // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
250  // See ARM DDI 0406C.d, page B1-1182
251  new |= PSR_AA32_I_BIT;
252 
253  // CPSR.F is set upon an exception to FIQ
254  // CPSR.F is unchanged upon an exception to other modes
255  // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
256  // See ARM DDI 0406C.d, page B1-1182
257  new |= (old & PSR_AA32_F_BIT);
258  if (mode == PSR_AA32_MODE_FIQ)
259  new |= PSR_AA32_F_BIT;
260 
261  // CPSR.T is set to SCTLR.TE upon any exception
262  // See ARM DDI 0487E.a, page G8-5514
263  // See ARM DDI 0406C.d, page B1-1181
264  if (sctlr & BIT(30))
265  new |= PSR_AA32_T_BIT;
266 
267  new |= mode;
268 
269  return new;
270 }
271 
272 /*
273  * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
274  */
275 static const u8 return_offsets[8][2] = {
276  [0] = { 0, 0 }, /* Reset, unused */
277  [1] = { 4, 2 }, /* Undefined */
278  [2] = { 0, 0 }, /* SVC, unused */
279  [3] = { 4, 4 }, /* Prefetch abort */
280  [4] = { 8, 8 }, /* Data abort */
281  [5] = { 0, 0 }, /* HVC, unused */
282  [6] = { 4, 4 }, /* IRQ, unused */
283  [7] = { 4, 4 }, /* FIQ, unused */
284 };
285 
286 static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
287 {
288  unsigned long spsr = *vcpu_cpsr(vcpu);
289  bool is_thumb = (spsr & PSR_AA32_T_BIT);
290  u32 sctlr = __vcpu_read_sys_reg(vcpu, SCTLR_EL1);
291  u32 return_address;
292 
293  *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
294  return_address = *vcpu_pc(vcpu);
295  return_address += return_offsets[vect_offset >> 2][is_thumb];
296 
297  /* KVM only enters the ABT and UND modes, so only deal with those */
298  switch(mode) {
299  case PSR_AA32_MODE_ABT:
300  __vcpu_write_spsr_abt(vcpu, host_spsr_to_spsr32(spsr));
301  vcpu_gp_regs(vcpu)->compat_lr_abt = return_address;
302  break;
303 
304  case PSR_AA32_MODE_UND:
305  __vcpu_write_spsr_und(vcpu, host_spsr_to_spsr32(spsr));
306  vcpu_gp_regs(vcpu)->compat_lr_und = return_address;
307  break;
308  }
309 
310  /* Branch to exception vector */
311  if (sctlr & (1 << 13))
312  vect_offset += 0xffff0000;
313  else /* always have security exceptions */
314  vect_offset += __vcpu_read_sys_reg(vcpu, VBAR_EL1);
315 
316  *vcpu_pc(vcpu) = vect_offset;
317 }
318 
319 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
320 {
321  if (vcpu_el1_is_32bit(vcpu)) {
322  switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
323  case unpack_vcpu_flag(EXCEPT_AA32_UND):
324  enter_exception32(vcpu, PSR_AA32_MODE_UND, 4);
325  break;
326  case unpack_vcpu_flag(EXCEPT_AA32_IABT):
327  enter_exception32(vcpu, PSR_AA32_MODE_ABT, 12);
328  break;
329  case unpack_vcpu_flag(EXCEPT_AA32_DABT):
330  enter_exception32(vcpu, PSR_AA32_MODE_ABT, 16);
331  break;
332  default:
333  /* Err... */
334  break;
335  }
336  } else {
337  switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
338  case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
339  enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
340  break;
341 
342  case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC):
343  enter_exception64(vcpu, PSR_MODE_EL2h, except_type_sync);
344  break;
345 
346  case unpack_vcpu_flag(EXCEPT_AA64_EL2_IRQ):
347  enter_exception64(vcpu, PSR_MODE_EL2h, except_type_irq);
348  break;
349 
350  default:
351  /*
352  * Only EL1_SYNC and EL2_{SYNC,IRQ} makes
353  * sense so far. Everything else gets silently
354  * ignored.
355  */
356  break;
357  }
358  }
359 }
360 
361 /*
362  * Adjust the guest PC (and potentially exception state) depending on
363  * flags provided by the emulation code.
364  */
365 void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
366 {
367  if (vcpu_get_flag(vcpu, PENDING_EXCEPTION)) {
368  kvm_inject_exception(vcpu);
369  vcpu_clear_flag(vcpu, PENDING_EXCEPTION);
370  vcpu_clear_flag(vcpu, EXCEPT_MASK);
371  } else if (vcpu_get_flag(vcpu, INCREMENT_PC)) {
372  kvm_skip_instr(vcpu);
373  vcpu_clear_flag(vcpu, INCREMENT_PC);
374  }
375 }
static void kvm_skip_instr(struct kvm_vcpu *vcpu)
Definition: adjust_pc.h:16
static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode, enum exception_type type)
Definition: exception.c:91
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
Definition: exception.c:58
static const u8 return_offsets[8][2]
Definition: exception.c:275
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
Definition: exception.c:319
void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
Definition: exception.c:365
static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
Definition: exception.c:286
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long target_mode, u64 val)
Definition: exception.c:43
static void __vcpu_write_spsr_und(struct kvm_vcpu *vcpu, u64 val)
Definition: exception.c:66
static void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
Definition: exception.c:35
static u64 __vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
Definition: exception.c:23
static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
Definition: exception.c:192
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
Definition: sys_regs.c:128
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
Definition: sys_regs.c:172