KVM
Macros | Functions
inject_fault.c File Reference
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_nested.h>
#include <asm/esr.h>
Include dependency graph for inject_fault.c:

Go to the source code of this file.

Macros

#define DFSR_FSC_EXTABT_LPAE   0x10
 
#define DFSR_FSC_EXTABT_nLPAE   0x08
 
#define DFSR_LPAE   BIT(9)
 
#define TTBCR_EAE   BIT(31)
 

Functions

static void pend_sync_exception (struct kvm_vcpu *vcpu)
 
static bool match_target_el (struct kvm_vcpu *vcpu, unsigned long target)
 
static void inject_abt64 (struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 
static void inject_undef64 (struct kvm_vcpu *vcpu)
 
static void inject_undef32 (struct kvm_vcpu *vcpu)
 
static void inject_abt32 (struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
 
void kvm_inject_dabt (struct kvm_vcpu *vcpu, unsigned long addr)
 
void kvm_inject_pabt (struct kvm_vcpu *vcpu, unsigned long addr)
 
void kvm_inject_size_fault (struct kvm_vcpu *vcpu)
 
void kvm_inject_undefined (struct kvm_vcpu *vcpu)
 
void kvm_set_sei_esr (struct kvm_vcpu *vcpu, u64 esr)
 
void kvm_inject_vabt (struct kvm_vcpu *vcpu)
 

Macro Definition Documentation

◆ DFSR_FSC_EXTABT_LPAE

#define DFSR_FSC_EXTABT_LPAE   0x10

Definition at line 114 of file inject_fault.c.

◆ DFSR_FSC_EXTABT_nLPAE

#define DFSR_FSC_EXTABT_nLPAE   0x08

Definition at line 115 of file inject_fault.c.

◆ DFSR_LPAE

#define DFSR_LPAE   BIT(9)

Definition at line 116 of file inject_fault.c.

◆ TTBCR_EAE

#define TTBCR_EAE   BIT(31)

Definition at line 117 of file inject_fault.c.

Function Documentation

◆ inject_abt32()

static void inject_abt32 ( struct kvm_vcpu *  vcpu,
bool  is_pabt,
u32  addr 
)
static

Definition at line 128 of file inject_fault.c.

129 {
130  u64 far;
131  u32 fsr;
132 
133  /* Give the guest an IMPLEMENTATION DEFINED exception */
134  if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
136  } else {
137  /* no need to shuffle FS[4] into DFSR[10] as its 0 */
138  fsr = DFSR_FSC_EXTABT_nLPAE;
139  }
140 
141  far = vcpu_read_sys_reg(vcpu, FAR_EL1);
142 
143  if (is_pabt) {
144  kvm_pend_exception(vcpu, EXCEPT_AA32_IABT);
145  far &= GENMASK(31, 0);
146  far |= (u64)addr << 32;
147  vcpu_write_sys_reg(vcpu, fsr, IFSR32_EL2);
148  } else { /* !iabt */
149  kvm_pend_exception(vcpu, EXCEPT_AA32_DABT);
150  far &= GENMASK(63, 32);
151  far |= addr;
152  vcpu_write_sys_reg(vcpu, fsr, ESR_EL1);
153  }
154 
155  vcpu_write_sys_reg(vcpu, far, FAR_EL1);
156 }
#define TTBCR_EAE
Definition: inject_fault.c:117
#define DFSR_FSC_EXTABT_LPAE
Definition: inject_fault.c:114
#define DFSR_LPAE
Definition: inject_fault.c:116
#define DFSR_FSC_EXTABT_nLPAE
Definition: inject_fault.c:115
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
Definition: sys_regs.c:128
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
Definition: sys_regs.c:172
Here is the call graph for this function:
Here is the caller graph for this function:

◆ inject_abt64()

static void inject_abt64 ( struct kvm_vcpu *  vcpu,
bool  is_iabt,
unsigned long  addr 
)
static

Definition at line 57 of file inject_fault.c.

58 {
59  unsigned long cpsr = *vcpu_cpsr(vcpu);
60  bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
61  u64 esr = 0;
62 
63  pend_sync_exception(vcpu);
64 
65  /*
66  * Build an {i,d}abort, depending on the level and the
67  * instruction set. Report an external synchronous abort.
68  */
69  if (kvm_vcpu_trap_il_is32bit(vcpu))
70  esr |= ESR_ELx_IL;
71 
72  /*
73  * Here, the guest runs in AArch64 mode when in EL1. If we get
74  * an AArch32 fault, it means we managed to trap an EL0 fault.
75  */
76  if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
77  esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
78  else
79  esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
80 
81  if (!is_iabt)
82  esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
83 
84  esr |= ESR_ELx_FSC_EXTABT;
85 
86  if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) {
87  vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
88  vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
89  } else {
90  vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
91  vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
92  }
93 }
static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target)
Definition: inject_fault.c:52
static void pend_sync_exception(struct kvm_vcpu *vcpu)
Definition: inject_fault.c:18
Here is the call graph for this function:
Here is the caller graph for this function:

◆ inject_undef32()

static void inject_undef32 ( struct kvm_vcpu *  vcpu)
static

Definition at line 119 of file inject_fault.c.

120 {
121  kvm_pend_exception(vcpu, EXCEPT_AA32_UND);
122 }
Here is the caller graph for this function:

◆ inject_undef64()

static void inject_undef64 ( struct kvm_vcpu *  vcpu)
static

Definition at line 95 of file inject_fault.c.

96 {
97  u64 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
98 
99  pend_sync_exception(vcpu);
100 
101  /*
102  * Build an unknown exception, depending on the instruction
103  * set.
104  */
105  if (kvm_vcpu_trap_il_is32bit(vcpu))
106  esr |= ESR_ELx_IL;
107 
108  if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC)))
109  vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
110  else
111  vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
112 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_inject_dabt()

void kvm_inject_dabt ( struct kvm_vcpu *  vcpu,
unsigned long  addr 
)

kvm_inject_dabt - inject a data abort into the guest @vcpu: The VCPU to receive the data abort @addr: The address to report in the DFAR

It is assumed that this code is called from the VCPU thread and that the VCPU therefore is not currently executing guest code.

Definition at line 166 of file inject_fault.c.

167 {
168  if (vcpu_el1_is_32bit(vcpu))
169  inject_abt32(vcpu, false, addr);
170  else
171  inject_abt64(vcpu, false, addr);
172 }
static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
Definition: inject_fault.c:128
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
Definition: inject_fault.c:57
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_inject_pabt()

void kvm_inject_pabt ( struct kvm_vcpu *  vcpu,
unsigned long  addr 
)

kvm_inject_pabt - inject a prefetch abort into the guest @vcpu: The VCPU to receive the prefetch abort @addr: The address to report in the DFAR

It is assumed that this code is called from the VCPU thread and that the VCPU therefore is not currently executing guest code.

Definition at line 182 of file inject_fault.c.

183 {
184  if (vcpu_el1_is_32bit(vcpu))
185  inject_abt32(vcpu, true, addr);
186  else
187  inject_abt64(vcpu, true, addr);
188 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_inject_size_fault()

void kvm_inject_size_fault ( struct kvm_vcpu *  vcpu)

Definition at line 190 of file inject_fault.c.

191 {
192  unsigned long addr, esr;
193 
194  addr = kvm_vcpu_get_fault_ipa(vcpu);
195  addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
196 
197  if (kvm_vcpu_trap_is_iabt(vcpu))
198  kvm_inject_pabt(vcpu, addr);
199  else
200  kvm_inject_dabt(vcpu, addr);
201 
202  /*
203  * If AArch64 or LPAE, set FSC to 0 to indicate an Address
204  * Size Fault at level 0, as if exceeding PARange.
205  *
206  * Non-LPAE guests will only get the external abort, as there
207  * is no way to describe the ASF.
208  */
209  if (vcpu_el1_is_32bit(vcpu) &&
210  !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
211  return;
212 
213  esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
214  esr &= ~GENMASK_ULL(5, 0);
215  vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
216 }
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
Definition: inject_fault.c:166
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
Definition: inject_fault.c:182
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_inject_undefined()

void kvm_inject_undefined ( struct kvm_vcpu *  vcpu)

kvm_inject_undefined - inject an undefined instruction into the guest @vcpu: The vCPU in which to inject the exception

It is assumed that this code is called from the VCPU thread and that the VCPU therefore is not currently executing guest code.

Definition at line 225 of file inject_fault.c.

226 {
227  if (vcpu_el1_is_32bit(vcpu))
228  inject_undef32(vcpu);
229  else
230  inject_undef64(vcpu);
231 }
static void inject_undef64(struct kvm_vcpu *vcpu)
Definition: inject_fault.c:95
static void inject_undef32(struct kvm_vcpu *vcpu)
Definition: inject_fault.c:119
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_inject_vabt()

void kvm_inject_vabt ( struct kvm_vcpu *  vcpu)

kvm_inject_vabt - inject an async abort / SError into the guest @vcpu: The VCPU to receive the exception

It is assumed that this code is called from the VCPU thread and that the VCPU therefore is not currently executing guest code.

Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with the remaining ISS all-zeros so that this error is not interpreted as an uncategorized RAS error. Without the RAS Extensions we can't specify an ESR value, so the CPU generates an imp-def value.

Definition at line 251 of file inject_fault.c.

252 {
253  kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
254 }
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
Definition: inject_fault.c:233
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_set_sei_esr()

void kvm_set_sei_esr ( struct kvm_vcpu *  vcpu,
u64  esr 
)

Definition at line 233 of file inject_fault.c.

234 {
235  vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
236  *vcpu_hcr(vcpu) |= HCR_VSE;
237 }
Here is the caller graph for this function:

◆ match_target_el()

static bool match_target_el ( struct kvm_vcpu *  vcpu,
unsigned long  target 
)
static

Definition at line 52 of file inject_fault.c.

53 {
54  return (vcpu_get_flag(vcpu, EXCEPT_MASK) == target);
55 }
Here is the caller graph for this function:

◆ pend_sync_exception()

static void pend_sync_exception ( struct kvm_vcpu *  vcpu)
static

Definition at line 18 of file inject_fault.c.

19 {
20  /* If not nesting, EL1 is the only possible exception target */
21  if (likely(!vcpu_has_nv(vcpu))) {
22  kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
23  return;
24  }
25 
26  /*
27  * With NV, we need to pick between EL1 and EL2. Note that we
28  * never deal with a nesting exception here, hence never
29  * changing context, and the exception itself can be delayed
30  * until the next entry.
31  */
32  switch(*vcpu_cpsr(vcpu) & PSR_MODE_MASK) {
33  case PSR_MODE_EL2h:
34  case PSR_MODE_EL2t:
35  kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
36  break;
37  case PSR_MODE_EL1h:
38  case PSR_MODE_EL1t:
39  kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
40  break;
41  case PSR_MODE_EL0t:
42  if (vcpu_el2_tge_is_set(vcpu))
43  kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
44  else
45  kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
46  break;
47  default:
48  BUG();
49  }
50 }
Here is the caller graph for this function: