12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/kvm_host.h>
17 #include <linux/printk.h>
18 #include <linux/uaccess.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cputype.h>
22 #include <asm/debug-monitors.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_nested.h>
29 #include <asm/perf_event.h>
30 #include <asm/sysreg.h>
32 #include <trace/events/kvm.h>
53 WARN_ONCE(1,
"Unexpected %s\n", msg);
64 "sys_reg read to write-only register");
72 "sys_reg write to read-only register");
75 #define PURE_EL2_SYSREG(el2) \
81 #define MAPPED_EL2_SYSREG(el2, el1, fn) \
89 unsigned int *el1r, u64 (**xlate)(u64))
106 translate_sctlr_el2_to_sctlr_el1 );
108 translate_cptr_el2_to_cpacr_el1 );
110 translate_ttbr0_el2_to_ttbr0_el1 );
113 translate_tcr_el2_to_tcr_el1 );
130 u64 val = 0x8badf00d8badf00d;
131 u64 (*xlate)(u64) = NULL;
134 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
138 if (!is_hyp_ctxt(vcpu))
153 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
157 WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
162 if (unlikely(is_hyp_ctxt(vcpu)))
165 if (__vcpu_read_sys_reg_from_cpu(reg, &val))
169 return __vcpu_sys_reg(vcpu, reg);
174 u64 (*xlate)(u64) = NULL;
177 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
181 if (!is_hyp_ctxt(vcpu))
189 __vcpu_sys_reg(vcpu, reg) = val;
195 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
199 WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
204 if (unlikely(is_hyp_ctxt(vcpu)))
207 if (__vcpu_write_sys_reg_to_cpu(val, reg))
211 __vcpu_sys_reg(vcpu, reg) = val;
215 #define CSSELR_MAX 14
223 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
227 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
229 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
247 if (vcpu->arch.ccsidr)
248 return vcpu->arch.ccsidr[csselr];
275 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
278 static int set_ccsidr(
struct kvm_vcpu *vcpu, u32 csselr, u32 val)
280 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
281 u32 *ccsidr = vcpu->arch.ccsidr;
284 if ((val & CCSIDR_EL1_RES0) ||
292 ccsidr = kmalloc_array(
CSSELR_MAX,
sizeof(u32), GFP_KERNEL_ACCOUNT);
299 vcpu->arch.ccsidr = ccsidr;
302 ccsidr[csselr] = val;
336 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
346 if (!kvm_has_mte(vcpu->kvm)) {
359 *mask = GENMASK_ULL(31, 0);
363 *mask = GENMASK_ULL(63, 32);
367 *mask = GENMASK_ULL(63, 0);
382 bool was_enabled = vcpu_has_cache_enabled(vcpu);
383 u64 val, mask, shift;
396 val |= (p->
regval & (mask >> shift)) << shift;
476 p->
regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
508 u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1);
511 if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
516 if (p->
is_write && sr == SYS_LORID_EL1)
532 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
533 if (p->
regval & OSLAR_EL1_OSLK)
534 oslsr |= OSLSR_EL1_OSLK;
536 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
547 p->
regval = __vcpu_sys_reg(vcpu, r->
reg);
558 if ((val ^ rd->
val) & ~OSLSR_EL1_OSLK)
561 __vcpu_sys_reg(vcpu, rd->
reg) = val;
572 p->
regval = read_sysreg(dbgauthstatus_el1);
610 vcpu_set_flag(vcpu, DEBUG_DIRTY);
631 u64 mask, shift, val;
637 val |= (p->
regval & (mask >> shift)) << shift;
640 vcpu_set_flag(vcpu, DEBUG_DIRTY);
651 p->
regval = (*dbg_reg & mask) >> shift;
658 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->
CRm];
665 trace_trap_reg(__func__, rd->
CRm, p->
is_write, *dbg_reg);
673 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->
CRm] = val;
680 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->
CRm];
687 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->
CRm] = rd->
val;
695 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->
CRm];
702 trace_trap_reg(__func__, rd->
CRm, p->
is_write, *dbg_reg);
710 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->
CRm] = val;
717 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->
CRm];
724 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->
CRm] = rd->
val;
732 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->
CRm];
740 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->
CRm]);
748 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->
CRm] = val;
755 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->
CRm];
762 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->
CRm] = rd->
val;
770 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->
CRm];
777 trace_trap_reg(__func__, rd->
CRm, p->
is_write, *dbg_reg);
785 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->
CRm] = val;
792 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->
CRm];
799 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->
CRm] = rd->
val;
805 u64 amair = read_sysreg(amair_el1);
812 u64 actlr = read_sysreg(actlr_el1);
828 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
829 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
830 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
831 mpidr |= (1ULL << 31);
849 u8 n = vcpu->kvm->arch.pmcr_n;
852 mask |= GENMASK(n - 1, 0);
855 __vcpu_sys_reg(vcpu, r->
reg) &= mask;
857 return __vcpu_sys_reg(vcpu, r->
reg);
863 __vcpu_sys_reg(vcpu, r->
reg) &= GENMASK(31, 0);
865 return __vcpu_sys_reg(vcpu, r->
reg);
877 return __vcpu_sys_reg(vcpu, r->
reg);
883 __vcpu_sys_reg(vcpu, r->
reg) &= ARMV8_PMU_COUNTER_MASK;
885 return __vcpu_sys_reg(vcpu, r->
reg);
892 if (!kvm_supports_32bit_el0())
893 pmcr |= ARMV8_PMU_PMCR_LC;
899 __vcpu_sys_reg(vcpu, r->
reg) = pmcr;
901 return __vcpu_sys_reg(vcpu, r->
reg);
906 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
907 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
949 val &= ~ARMV8_PMU_PMCR_MASK;
950 val |= p->
regval & ARMV8_PMU_PMCR_MASK;
951 if (!kvm_supports_32bit_el0())
952 val |= ARMV8_PMU_PMCR_LC;
957 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
971 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->
regval;
974 p->
regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
975 & ARMV8_PMU_COUNTER_MASK;
983 u64 pmceid, mask, shift;
1006 val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
1020 if (r->
CRn == 9 && r->
CRm == 13 && r->
Op2 == 0)
1025 idx = ((r->
CRm & 3) << 3) | (r->
Op2 & 7);
1037 if (r->
CRn == 9 && r->
CRm == 13) {
1043 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
1044 & ARMV8_PMU_COUNTER_MASK;
1045 }
else if (r->
Op2 == 0) {
1052 }
else if (r->
CRn == 0 && r->
CRm == 9) {
1058 }
else if (r->
CRn == 14 && (r->
CRm & 12) == 8) {
1063 idx = ((r->
CRm & 3) << 3) | (r->
Op2 & 7);
1067 WARN_ON(idx == ~0UL);
1092 if (r->
CRn == 9 && r->
CRm == 13 && r->
Op2 == 1) {
1094 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
1095 reg = PMEVTYPER0_EL0 + idx;
1096 }
else if (r->
CRn == 14 && (r->
CRm & 12) == 12) {
1097 idx = ((r->
CRm & 3) << 3) | (r->
Op2 & 7);
1099 reg = PMCCFILTR_EL0;
1102 reg = PMEVTYPER0_EL0 + idx;
1114 p->
regval = __vcpu_sys_reg(vcpu, reg);
1138 __vcpu_sys_reg(vcpu, r->
reg) |= val;
1140 __vcpu_sys_reg(vcpu, r->
reg) &= ~val;
1149 *val = __vcpu_sys_reg(vcpu, r->
reg) & mask;
1166 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1171 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1175 p->
regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1190 u64 val = p->
regval & mask;
1194 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1197 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1199 p->
regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1216 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->
regval & mask);
1219 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->
regval & mask);
1221 p->
regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1247 if (!vcpu_mode_priv(vcpu)) {
1252 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1253 p->
regval & ARMV8_PMU_USERENR_MASK;
1255 p->
regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1256 & ARMV8_PMU_USERENR_MASK;
1272 u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1273 struct kvm *kvm = vcpu->kvm;
1275 mutex_lock(&kvm->arch.config_lock);
1282 if (!kvm_vm_has_ran_once(kvm) &&
1284 kvm->arch.pmcr_n = new_n;
1286 mutex_unlock(&kvm->arch.config_lock);
1297 val &= ARMV8_PMU_PMCR_MASK;
1300 if (!kvm_supports_32bit_el0())
1301 val |= ARMV8_PMU_PMCR_LC;
1303 __vcpu_sys_reg(vcpu, r->
reg) = val;
1308 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1309 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1310 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
1311 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1312 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
1313 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1314 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
1315 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1316 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
1318 #define PMU_SYS_REG(name) \
1319 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1320 .visibility = pmu_visibility
1323 #define PMU_PMEVCNTR_EL0(n) \
1324 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1325 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1326 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1329 #define PMU_PMEVTYPER_EL0(n) \
1330 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1331 .reset = reset_pmevtyper, \
1332 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1343 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1344 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1345 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1346 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1351 return vcpu_has_ptrauth(vcpu) ? 0 :
REG_HIDDEN;
1360 #define __PTRAUTH_KEY(k) \
1361 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1362 .visibility = ptrauth_visibility}
1364 #define PTRAUTH_KEY(k) \
1365 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1366 __PTRAUTH_KEY(k ## KEYHI_EL1)
1377 case SYS_CNTP_TVAL_EL0:
1378 case SYS_AARCH32_CNTP_TVAL:
1382 case SYS_CNTP_CTL_EL0:
1383 case SYS_AARCH32_CNTP_CTL:
1387 case SYS_CNTP_CVAL_EL0:
1388 case SYS_AARCH32_CNTP_CVAL:
1392 case SYS_CNTPCT_EL0:
1393 case SYS_CNTPCTSS_EL0:
1394 case SYS_AARCH32_CNTPCT:
1399 print_sys_reg_msg(p,
"%s",
"Unhandled trapped timer register");
1415 struct arm64_ftr_bits kvm_ftr = *ftrp;
1419 case SYS_ID_AA64DFR0_EL1:
1420 switch (kvm_ftr.shift) {
1421 case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1422 kvm_ftr.type = FTR_LOWER_SAFE;
1424 case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1425 kvm_ftr.type = FTR_LOWER_SAFE;
1429 case SYS_ID_DFR0_EL1:
1430 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1431 kvm_ftr.type = FTR_LOWER_SAFE;
1435 return arm64_ftr_safe_value(&kvm_ftr,
new,
cur);
1454 const struct arm64_ftr_reg *ftr_reg;
1455 const struct arm64_ftr_bits *ftrp = NULL;
1457 u64 writable_mask = rd->
val;
1458 u64 limit = rd->
reset(vcpu, rd);
1467 return val ? -E2BIG : 0;
1469 ftr_reg = get_arm64_ftr_reg(
id);
1473 ftrp = ftr_reg->ftr_bits;
1475 for (; ftrp && ftrp->width; ftrp++) {
1476 s64 f_val, f_lim, safe_val;
1479 ftr_mask = arm64_ftr_mask(ftrp);
1480 if ((ftr_mask & writable_mask) != ftr_mask)
1483 f_val = arm64_ftr_value(ftrp, val);
1484 f_lim = arm64_ftr_value(ftrp, limit);
1492 if (safe_val != f_val)
1497 if ((val & ~mask) != (limit & ~mask))
1506 case ID_AA64DFR0_EL1_PMUVer_IMP:
1507 return ID_DFR0_EL1_PerfMon_PMUv3;
1508 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1509 return ID_DFR0_EL1_PerfMon_IMPDEF;
1526 val = read_sanitised_ftr_reg(
id);
1529 case SYS_ID_AA64PFR1_EL1:
1530 if (!kvm_has_mte(vcpu->kvm))
1531 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1533 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1535 case SYS_ID_AA64ISAR1_EL1:
1536 if (!vcpu_has_ptrauth(vcpu))
1537 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1538 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1539 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1540 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1542 case SYS_ID_AA64ISAR2_EL1:
1543 if (!vcpu_has_ptrauth(vcpu))
1544 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1545 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1546 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1547 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1549 case SYS_ID_AA64MMFR2_EL1:
1550 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1552 case SYS_ID_MMFR4_EL1:
1553 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1577 return (sys_reg_Op0(
id) == 3 && sys_reg_Op1(
id) == 0 &&
1578 sys_reg_CRn(
id) == 0 && sys_reg_CRm(
id) >= 1 &&
1579 sys_reg_CRm(
id) < 8);
1584 return (sys_reg_Op0(
id) == 3 && sys_reg_Op1(
id) == 0 &&
1585 sys_reg_CRn(
id) == 0 && sys_reg_CRm(
id) >= 1 &&
1586 sys_reg_CRm(
id) <= 3);
1595 case SYS_ID_AA64ZFR0_EL1:
1596 if (!vcpu_has_sve(vcpu))
1612 if (!kvm_supports_32bit_el0())
1642 if (vcpu_has_sve(vcpu))
1651 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1653 if (!vcpu_has_sve(vcpu))
1654 val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1664 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1665 val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1666 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1668 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1669 val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1670 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1674 val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1675 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1678 val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1683 #define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \
1685 u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
1686 (val) &= ~reg##_##field##_MASK; \
1687 (val) |= FIELD_PREP(reg##_##field##_MASK, \
1688 min(__f_val, (u64)reg##_##field##_##limit)); \
1695 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1702 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1704 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1708 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1717 u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
1718 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
1734 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
1735 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1741 if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
1751 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
1753 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1755 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
1766 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
1767 u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
1769 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
1770 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1780 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
1783 if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
1803 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1808 mutex_lock(&vcpu->kvm->arch.config_lock);
1810 mutex_unlock(&vcpu->kvm->arch.config_lock);
1821 mutex_lock(&vcpu->kvm->arch.config_lock);
1827 if (kvm_vm_has_ran_once(vcpu->kvm)) {
1833 mutex_unlock(&vcpu->kvm->arch.config_lock);
1839 IDREG(vcpu->kvm,
id) = val;
1841 mutex_unlock(&vcpu->kvm->arch.config_lock);
1874 p->
regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1884 p->
regval = __vcpu_sys_reg(vcpu, r->
reg);
1894 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1898 if ((ctr_el0 & CTR_EL0_IDC)) {
1907 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
1908 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
1916 clidr = 1 << CLIDR_LOUU_SHIFT;
1917 clidr |= 1 << CLIDR_LOUIS_SHIFT;
1918 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
1926 if (!(ctr_el0 & CTR_EL0_DIC))
1927 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
1929 clidr |= loc << CLIDR_LOC_SHIFT;
1936 if (kvm_has_mte(vcpu->kvm))
1937 clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
1939 __vcpu_sys_reg(vcpu, r->
reg) = clidr;
1941 return __vcpu_sys_reg(vcpu, r->
reg);
1947 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1948 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
1950 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
1953 __vcpu_sys_reg(vcpu, rd->
reg) = val;
1979 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
1989 if (kvm_has_mte(vcpu->kvm))
1995 #define MTE_REG(name) { \
1996 SYS_DESC(SYS_##name), \
1997 .access = undef_access, \
1998 .reset = reset_unknown, \
2000 .visibility = mte_visibility, \
2006 if (vcpu_has_nv(vcpu))
2022 "trap of VNCR-backed register");
2035 "trap of EL2 register redirected to EL1");
2038 #define EL2_REG(name, acc, rst, v) { \
2039 SYS_DESC(SYS_##name), \
2043 .visibility = el2_visibility, \
2047 #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
2048 #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
2061 #define EL12_REG(name, acc, rst, v) { \
2062 SYS_DESC(SYS_##name##_EL12), \
2065 .reg = name##_EL1, \
2067 .visibility = hidden_user_visibility, \
2081 #define ID_DESC(name) \
2082 SYS_DESC(SYS_##name), \
2083 .access = access_id_reg, \
2084 .get_user = get_id_reg \
2087 #define ID_SANITISED(name) { \
2089 .set_user = set_id_reg, \
2090 .visibility = id_visibility, \
2091 .reset = kvm_read_sanitised_id_reg, \
2096 #define AA32_ID_SANITISED(name) { \
2098 .set_user = set_id_reg, \
2099 .visibility = aa32_id_visibility, \
2100 .reset = kvm_read_sanitised_id_reg, \
2105 #define ID_WRITABLE(name, mask) { \
2107 .set_user = set_id_reg, \
2108 .visibility = id_visibility, \
2109 .reset = kvm_read_sanitised_id_reg, \
2118 #define ID_UNALLOCATED(crm, op2) { \
2119 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
2120 .access = access_id_reg, \
2121 .get_user = get_id_reg, \
2122 .set_user = set_id_reg, \
2123 .visibility = raz_visibility, \
2124 .reset = kvm_read_sanitised_id_reg, \
2133 #define ID_HIDDEN(name) { \
2135 .set_user = set_id_reg, \
2136 .visibility = raz_visibility, \
2137 .reset = kvm_read_sanitised_id_reg, \
2146 __vcpu_sys_reg(vcpu, SP_EL1) = p->
regval;
2148 p->
regval = __vcpu_sys_reg(vcpu, SP_EL1);
2170 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->
regval;
2172 p->
regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2252 .val = ID_DFR0_EL1_PerfMon_MASK |
2253 ID_DFR0_EL1_CopDbg_MASK, },
2287 .val = ~(ID_AA64PFR0_EL1_AMU |
2288 ID_AA64PFR0_EL1_MPAM |
2289 ID_AA64PFR0_EL1_SVE |
2290 ID_AA64PFR0_EL1_RAS |
2291 ID_AA64PFR0_EL1_GIC |
2292 ID_AA64PFR0_EL1_AdvSIMD |
2293 ID_AA64PFR0_EL1_FP), },
2297 ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
2308 .val = ID_AA64DFR0_EL1_PMUVer_MASK |
2309 ID_AA64DFR0_EL1_DebugVer_MASK, },
2319 ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
2320 ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
2321 ID_AA64ISAR1_EL1_GPA |
2322 ID_AA64ISAR1_EL1_API |
2323 ID_AA64ISAR1_EL1_APA)),
2324 ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
2325 ID_AA64ISAR2_EL1_APA3 |
2326 ID_AA64ISAR2_EL1_GPA3)),
2334 ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
2335 ID_AA64MMFR0_EL1_TGRAN4_2 |
2336 ID_AA64MMFR0_EL1_TGRAN64_2 |
2337 ID_AA64MMFR0_EL1_TGRAN16_2)),
2338 ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
2339 ID_AA64MMFR1_EL1_HCX |
2340 ID_AA64MMFR1_EL1_XNX |
2341 ID_AA64MMFR1_EL1_TWED |
2342 ID_AA64MMFR1_EL1_XNX |
2343 ID_AA64MMFR1_EL1_VH |
2344 ID_AA64MMFR1_EL1_VMIDBits)),
2345 ID_WRITABLE(ID_AA64MMFR2_EL1, ~(ID_AA64MMFR2_EL1_RES0 |
2346 ID_AA64MMFR2_EL1_EVT |
2347 ID_AA64MMFR2_EL1_FWB |
2348 ID_AA64MMFR2_EL1_IDS |
2349 ID_AA64MMFR2_EL1_NV |
2350 ID_AA64MMFR2_EL1_CCIDX)),
2503 .reset =
reset_val, .reg = PMUSERENR_EL0, .val = 0 },
2662 .reset =
reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2739 u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
2740 u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1);
2741 u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr);
2743 p->
regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
2744 (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
2745 (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
2746 (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
2747 (1 << 15) | (el3 << 14) | (el3 << 12));
2761 #define DBG_BCR_BVR_WCR_WVR(n) \
2763 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
2765 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
2767 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
2769 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
2771 #define DBGBXVR(n) \
2772 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
2872 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2874 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2875 .visibility = pmu_visibility
2878 #define PMU_PMEVCNTR(n) \
2879 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2880 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2881 .access = access_pmu_evcntr }
2884 #define PMU_PMEVTYPER(n) \
2885 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2886 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2887 .access = access_pmu_evtyper }
3058 for (i = 0; i < n; i++) {
3059 if (!is_32 && table[i].
reg && !table[i].
reset) {
3060 kvm_err(
"sys_reg table %pS entry %d lacks reset\n", &table[i], i);
3064 if (i &&
cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
3065 kvm_err(
"sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
3083 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
3099 if (likely(r->
access(vcpu, params, r)))
3137 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
3141 case ESR_ELx_EC_CP15_32:
3142 case ESR_ELx_EC_CP15_64:
3145 case ESR_ELx_EC_CP14_MR:
3146 case ESR_ELx_EC_CP14_64:
3153 print_sys_reg_msg(params,
3154 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
3155 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3169 u64 esr = kvm_vcpu_get_esr(vcpu);
3170 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3171 int Rt2 = (esr >> 10) & 0x1f;
3173 params.
CRm = (esr >> 1) & 0xf;
3174 params.
is_write = ((esr & 1) == 0);
3177 params.
Op1 = (esr >> 16) & 0xf;
3186 params.
regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
3187 params.
regval |= vcpu_get_reg(vcpu, Rt2) << 32;
3195 if (
emulate_cp(vcpu, ¶ms, global, nr_global)) {
3198 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.
regval));
3199 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.
regval));
3218 u8 reg_id = (esr >> 10) & 0xf;
3221 params->
is_write = ((esr & 1) == 0);
3251 params->
is_write ?
"write" :
"read", reg_id);
3266 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3267 u64 esr = kvm_vcpu_get_esr(vcpu);
3277 vcpu_set_reg(vcpu, Rt, params.
regval);
3302 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3317 if (params->
CRm > 3)
3322 vcpu_set_reg(vcpu, Rt, params->
regval);
3336 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3338 params->
regval = vcpu_get_reg(vcpu, Rt);
3340 if (
emulate_cp(vcpu, params, global, nr_global)) {
3342 vcpu_set_reg(vcpu, Rt, params->
regval);
3367 if (params.
Op1 == 0 && params.
CRn == 0 && params.
CRm)
3390 return params->
Op0 == 3 && (params->
CRn & 0b1011) == 0b1011;
3415 print_sys_reg_msg(params,
3416 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
3417 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3427 struct kvm *kvm = vcpu->kvm;
3429 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
3432 lockdep_assert_held(&kvm->arch.config_lock);
3436 IDREG(kvm,
id) = idreg->
reset(vcpu, idreg);
3442 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
3476 unsigned long esr = kvm_vcpu_get_esr(vcpu);
3477 int Rt = kvm_vcpu_sys_get_rt(vcpu);
3479 trace_kvm_handle_sys_reg(esr);
3485 params.
regval = vcpu_get_reg(vcpu, Rt);
3491 vcpu_set_reg(vcpu, Rt, params.
regval);
3501 switch (
id & KVM_REG_SIZE_MASK) {
3502 case KVM_REG_SIZE_U64:
3504 if (
id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
3505 | KVM_REG_ARM_COPROC_MASK
3506 | KVM_REG_ARM64_SYSREG_OP0_MASK
3507 | KVM_REG_ARM64_SYSREG_OP1_MASK
3508 | KVM_REG_ARM64_SYSREG_CRN_MASK
3509 | KVM_REG_ARM64_SYSREG_CRM_MASK
3510 | KVM_REG_ARM64_SYSREG_OP2_MASK))
3512 params->
Op0 = ((
id & KVM_REG_ARM64_SYSREG_OP0_MASK)
3513 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
3514 params->
Op1 = ((
id & KVM_REG_ARM64_SYSREG_OP1_MASK)
3515 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
3516 params->
CRn = ((
id & KVM_REG_ARM64_SYSREG_CRN_MASK)
3517 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
3518 params->
CRm = ((
id & KVM_REG_ARM64_SYSREG_CRM_MASK)
3519 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
3520 params->
Op2 = ((
id & KVM_REG_ARM64_SYSREG_OP2_MASK)
3521 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
3537 return find_reg(¶ms, table, num);
3549 if ((
id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
3569 #define FUNCTION_INVARIANT(reg) \
3570 static u64 get_##reg(struct kvm_vcpu *v, \
3571 const struct sys_reg_desc *r) \
3573 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
3574 return ((struct sys_reg_desc *)r)->val; \
3583 ((
struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
3589 {
SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
3590 {
SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
3591 {
SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
3600 ARRAY_SIZE(invariant_sys_regs));
3604 return put_user(r->
val, uaddr);
3613 ARRAY_SIZE(invariant_sys_regs));
3630 u32 __user *uval = uaddr;
3633 if (
id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3634 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3637 switch (
id & KVM_REG_ARM_DEMUX_ID_MASK) {
3638 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3639 if (KVM_REG_SIZE(
id) != 4)
3641 val = (
id & KVM_REG_ARM_DEMUX_VAL_MASK)
3642 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3655 u32 __user *uval = uaddr;
3658 if (
id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
3659 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
3662 switch (
id & KVM_REG_ARM_DEMUX_ID_MASK) {
3663 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
3664 if (KVM_REG_SIZE(
id) != 4)
3666 val = (
id & KVM_REG_ARM_DEMUX_VAL_MASK)
3667 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
3683 u64 __user *uaddr = (u64 __user *)(
unsigned long)
reg->addr;
3695 val = __vcpu_sys_reg(vcpu, r->
reg);
3700 ret = put_user(
val, uaddr);
3707 void __user *uaddr = (
void __user *)(
unsigned long)
reg->addr;
3710 if ((
reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3724 u64 __user *uaddr = (u64 __user *)(
unsigned long)
reg->addr;
3742 __vcpu_sys_reg(vcpu, r->
reg) =
val;
3751 void __user *uaddr = (
void __user *)(
unsigned long)
reg->addr;
3754 if ((
reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
3772 u64
val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
3775 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
3777 if (put_user(
val | i, uindices))
3786 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
3787 KVM_REG_ARM64_SYSREG |
3788 (
reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
3789 (
reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
3790 (
reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
3791 (
reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
3792 (
reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
3810 unsigned int *total)
3833 unsigned int total = 0;
3839 while (i2 != end2) {
3849 return ARRAY_SIZE(invariant_sys_regs)
3860 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
3874 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
3875 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
3883 return (sys_reg_Op0(encoding) == 3 &&
3884 (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
3885 sys_reg_CRn(encoding) == 0 &&
3886 sys_reg_CRm(encoding) <= 7);
3891 const void *zero_page = page_to_virt(ZERO_PAGE(0));
3892 u64 __user *masks = (u64 __user *)range->addr;
3896 memcmp(range->reserved, zero_page,
sizeof(range->reserved)))
3900 if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE *
sizeof(__u64)))
3945 valid &=
check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs),
false);
3951 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
3952 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, enum kvm_arch_timers tmr, enum kvm_arch_timer_regs treg, u64 val)
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, enum kvm_arch_timers tmr, enum kvm_arch_timer_regs treg)
enum kvm_mode kvm_get_mode(void)
#define kvm_vcpu_has_pmu(vcpu)
#define ARMV8_PMU_CYCLE_IDX
struct vgic_global kvm_vgic_global_state
bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
int __init populate_nv_trap_config(void)
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
void kvm_set_way_flush(struct kvm_vcpu *vcpu)
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx)
u8 kvm_arm_pmu_get_pmuver_limit(void)
u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
bool(* access)(struct kvm_vcpu *, struct sys_reg_params *, const struct sys_reg_desc *)
enum sys_reg_desc::@9 aarch32_map
int(* set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
int(* get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val)
u64(* reset)(struct kvm_vcpu *, const struct sys_reg_desc *)
static bool index_to_params(u64 id, struct sys_reg_params *params)
static bool access_vm_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n, bool is_32)
static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool bad_vncr_trap(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
#define EL12_REG(name, acc, rst, v)
#define KVM_ARM_FEATURE_ID_RANGE_INDEX(r)
static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
static unsigned int id_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
static unsigned int mte_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static int arm64_check_features(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
static bool read_from_write_only(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *r)
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
static bool trap_dbgdidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
static const struct sys_reg_desc * first_idreg
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
#define MAPPED_EL2_SYSREG(el2, el1, fn)
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, const struct sys_reg_desc table[], unsigned int num)
static void unhandled_cp_access(struct kvm_vcpu *vcpu, struct sys_reg_params *params)
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
static unsigned int raz_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val)
static bool bad_redir_trap(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params)
#define ID_SANITISED(name)
static bool trap_bcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd)
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
static bool trap_bvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd)
static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
static bool access_elr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
#define ID_UNALLOCATED(crm, op2)
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val)
static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static const struct sys_reg_desc sys_reg_descs[]
#define EL2_REG_VNCR(name, rst, v)
#define ID_WRITABLE(name, mask)
#define DBG_BCR_BVR_WCR_WVR_EL1(n)
#define AMU_AMEVCNTR1_EL0(n)
#define EL2_REG_REDIR(name, rst, v)
static u8 get_min_cache_line_size(bool icache)
static int write_demux_regids(u64 __user *uindices)
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *global, size_t nr_global)
static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val)
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 __user **uind, unsigned int *total)
static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
#define PMU_PMEVTYPER_EL0(n)
static bool trap_debug_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static const struct sys_reg_desc cp15_64_regs[]
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val)
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
#define EL2_REG(name, acc, rst, v)
static bool access_rw(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool emulate_cp(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *table, size_t num)
static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
#define AMU_AMEVTYPER1_EL0(n)
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static u64 reset_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params)
static unsigned int num_demux_regs(void)
static void perform_access(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *r)
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
static bool get_el2_to_el1_mapping(unsigned int reg, unsigned int *el1r, u64(**xlate)(u64))
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
static bool access_dcsw(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool is_id_reg(u32 id)
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool access_arch_timer(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool access_gic_sgi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool is_feature_id_reg(u32 encoding)
static unsigned int el2_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static u64 reset_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val)
static bool bad_trap(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *r, const char *msg)
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
static u64 get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
#define AMU_AMEVCNTR0_EL0(n)
#define PMU_PMEVCNTR_EL0(n)
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, s64 new, s64 cur)
static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, const struct sys_reg_desc table[], unsigned int num)
static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
#define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2)
#define PMU_SYS_REG(name)
static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 *val)
static bool access_spsr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
#define AA32_ID_SANITISED(name)
static bool trap_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static u8 pmuver_to_perfmon(u8 pmuver)
const struct sys_reg_desc * get_reg_by_id(u64 id, const struct sys_reg_desc table[], unsigned int num)
static struct sys_reg_desc invariant_sys_regs[] __ro_after_init
static bool is_imp_def_sys_reg(struct sys_reg_params *params)
static bool trap_wcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd)
static const struct sys_reg_desc cp15_regs[]
static void reg_to_dbg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd, u64 *dbg_reg)
#define DBG_BCR_BVR_WCR_WVR(n)
#define FUNCTION_INVARIANT(reg)
static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, const struct sys_reg_desc *global, size_t nr_global)
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
static bool trap_wvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd)
static void dbg_to_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd, u64 *dbg_reg)
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val)
static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool is_aa32_id_reg(u32 id)
static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
#define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit)
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static const struct sys_reg_desc cp14_regs[]
static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
#define AMU_AMEVTYPER0_EL0(n)
static bool access_id_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool access_sp_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static const struct sys_reg_desc cp14_64_regs[]
static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static bool write_to_read_only(struct kvm_vcpu *vcpu, struct sys_reg_params *params, const struct sys_reg_desc *r)
int __init kvm_sys_reg_table_init(void)
static u64 reset_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool access_actlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
#define PURE_EL2_SYSREG(el2)
static const struct sys_reg_desc * id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id, const struct sys_reg_desc table[], unsigned int num)
static bool trap_loregion(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
static bool trap_oslar_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool trap_undef(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
static unsigned int hidden_user_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static u64 reset_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
static bool access_dcgsw(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r)
#define esr_cp1x_32_to_params(esr)
#define encoding_to_params(reg)
static bool sysreg_hidden(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
#define reg_to_encoding(x)
#define esr_sys64_to_params(esr)
static const struct sys_reg_desc * find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[], unsigned int num)
static bool read_zero(struct kvm_vcpu *vcpu, struct sys_reg_params *p)
static bool ignore_write(struct kvm_vcpu *vcpu, const struct sys_reg_params *p)
static bool sysreg_user_write_ignore(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static u64 reset_unknown(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static void print_sys_reg_instr(const struct sys_reg_params *p)
kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n", &(struct va_format){ fmt, &va }, p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" :"read")
static bool sysreg_hidden_user(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static int cmp_sys_reg(const struct sys_reg_desc *i1, const struct sys_reg_desc *i2)
static bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)