KVM
Classes | Macros | Functions
pgtable.c File Reference
#include <linux/bitfield.h>
#include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h>
Include dependency graph for pgtable.c:

Go to the source code of this file.

Classes

struct  kvm_pgtable_walk_data
 
struct  leaf_walk_data
 
struct  hyp_map_data
 
struct  stage2_map_data
 
struct  stage2_attr_data
 
struct  stage2_age_data
 

Macros

#define KVM_PTE_TYPE   BIT(1)
 
#define KVM_PTE_TYPE_BLOCK   0
 
#define KVM_PTE_TYPE_PAGE   1
 
#define KVM_PTE_TYPE_TABLE   1
 
#define KVM_PTE_LEAF_ATTR_LO   GENMASK(11, 2)
 
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX   GENMASK(4, 2)
 
#define KVM_PTE_LEAF_ATTR_LO_S1_AP   GENMASK(7, 6)
 
#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO    ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })
 
#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW    ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })
 
#define KVM_PTE_LEAF_ATTR_LO_S1_SH   GENMASK(9, 8)
 
#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS   3
 
#define KVM_PTE_LEAF_ATTR_LO_S1_AF   BIT(10)
 
#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR   GENMASK(5, 2)
 
#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R   BIT(6)
 
#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W   BIT(7)
 
#define KVM_PTE_LEAF_ATTR_LO_S2_SH   GENMASK(9, 8)
 
#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS   3
 
#define KVM_PTE_LEAF_ATTR_LO_S2_AF   BIT(10)
 
#define KVM_PTE_LEAF_ATTR_HI   GENMASK(63, 50)
 
#define KVM_PTE_LEAF_ATTR_HI_SW   GENMASK(58, 55)
 
#define KVM_PTE_LEAF_ATTR_HI_S1_XN   BIT(54)
 
#define KVM_PTE_LEAF_ATTR_HI_S2_XN   BIT(54)
 
#define KVM_PTE_LEAF_ATTR_HI_S1_GP   BIT(50)
 
#define KVM_PTE_LEAF_ATTR_S2_PERMS
 
#define KVM_INVALID_PTE_OWNER_MASK   GENMASK(9, 2)
 
#define KVM_MAX_OWNER_ID   1
 
#define KVM_INVALID_PTE_LOCKED   BIT(10)
 
#define KVM_S2_MEMATTR(pgt, attr)   PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
 

Functions

static bool kvm_pgtable_walk_skip_bbm_tlbi (const struct kvm_pgtable_visit_ctx *ctx)
 
static bool kvm_pgtable_walk_skip_cmo (const struct kvm_pgtable_visit_ctx *ctx)
 
static bool kvm_phys_is_valid (u64 phys)
 
static bool kvm_block_mapping_supported (const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
 
static u32 kvm_pgtable_idx (struct kvm_pgtable_walk_data *data, s8 level)
 
static u32 kvm_pgd_page_idx (struct kvm_pgtable *pgt, u64 addr)
 
static u32 kvm_pgd_pages (u32 ia_bits, s8 start_level)
 
static bool kvm_pte_table (kvm_pte_t pte, s8 level)
 
static kvm_pte_t * kvm_pte_follow (kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
 
static void kvm_clear_pte (kvm_pte_t *ptep)
 
static kvm_pte_t kvm_init_table_pte (kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
 
static kvm_pte_t kvm_init_valid_leaf_pte (u64 pa, kvm_pte_t attr, s8 level)
 
static kvm_pte_t kvm_init_invalid_leaf_owner (u8 owner_id)
 
static int kvm_pgtable_visitor_cb (struct kvm_pgtable_walk_data *data, const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
static bool kvm_pgtable_walk_continue (const struct kvm_pgtable_walker *walker, int r)
 
static int __kvm_pgtable_walk (struct kvm_pgtable_walk_data *data, struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
 
static int __kvm_pgtable_visit (struct kvm_pgtable_walk_data *data, struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pteref, s8 level)
 
static int _kvm_pgtable_walk (struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
 
int kvm_pgtable_walk (struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker)
 
static int leaf_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
int kvm_pgtable_get_leaf (struct kvm_pgtable *pgt, u64 addr, kvm_pte_t *ptep, s8 *level)
 
static int hyp_set_prot_attr (enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
 
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot (kvm_pte_t pte)
 
static bool hyp_map_walker_try_leaf (const struct kvm_pgtable_visit_ctx *ctx, struct hyp_map_data *data)
 
static int hyp_map_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
int kvm_pgtable_hyp_map (struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot)
 
static int hyp_unmap_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
u64 kvm_pgtable_hyp_unmap (struct kvm_pgtable *pgt, u64 addr, u64 size)
 
int kvm_pgtable_hyp_init (struct kvm_pgtable *pgt, u32 va_bits, struct kvm_pgtable_mm_ops *mm_ops)
 
static int hyp_free_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
void kvm_pgtable_hyp_destroy (struct kvm_pgtable *pgt)
 
u64 kvm_get_vtcr (u64 mmfr0, u64 mmfr1, u32 phys_shift)
 
static bool stage2_has_fwb (struct kvm_pgtable *pgt)
 
void kvm_tlb_flush_vmid_range (struct kvm_s2_mmu *mmu, phys_addr_t addr, size_t size)
 
static int stage2_set_prot_attr (struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
 
enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot (kvm_pte_t pte)
 
static bool stage2_pte_needs_update (kvm_pte_t old, kvm_pte_t new)
 
static bool stage2_pte_is_counted (kvm_pte_t pte)
 
static bool stage2_pte_is_locked (kvm_pte_t pte)
 
static bool stage2_try_set_pte (const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
 
static bool stage2_try_break_pte (const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu)
 
static void stage2_make_pte (const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
 
static bool stage2_unmap_defer_tlb_flush (struct kvm_pgtable *pgt)
 
static void stage2_unmap_put_pte (const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops)
 
static bool stage2_pte_cacheable (struct kvm_pgtable *pgt, kvm_pte_t pte)
 
static bool stage2_pte_executable (kvm_pte_t pte)
 
static u64 stage2_map_walker_phys_addr (const struct kvm_pgtable_visit_ctx *ctx, const struct stage2_map_data *data)
 
static bool stage2_leaf_mapping_allowed (const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
 
static int stage2_map_walker_try_leaf (const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
 
static int stage2_map_walk_table_pre (const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
 
static int stage2_map_walk_leaf (const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
 
static int stage2_map_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
int kvm_pgtable_stage2_map (struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, enum kvm_pgtable_prot prot, void *mc, enum kvm_pgtable_walk_flags flags)
 
int kvm_pgtable_stage2_set_owner (struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc, u8 owner_id)
 
static int stage2_unmap_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
int kvm_pgtable_stage2_unmap (struct kvm_pgtable *pgt, u64 addr, u64 size)
 
static int stage2_attr_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
static int stage2_update_leaf_attrs (struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, s8 *level, enum kvm_pgtable_walk_flags flags)
 
int kvm_pgtable_stage2_wrprotect (struct kvm_pgtable *pgt, u64 addr, u64 size)
 
kvm_pte_t kvm_pgtable_stage2_mkyoung (struct kvm_pgtable *pgt, u64 addr)
 
static int stage2_age_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
bool kvm_pgtable_stage2_test_clear_young (struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold)
 
int kvm_pgtable_stage2_relax_perms (struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot)
 
static int stage2_flush_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
int kvm_pgtable_stage2_flush (struct kvm_pgtable *pgt, u64 addr, u64 size)
 
kvm_pte_t * kvm_pgtable_stage2_create_unlinked (struct kvm_pgtable *pgt, u64 phys, s8 level, enum kvm_pgtable_prot prot, void *mc, bool force_pte)
 
static int stage2_block_get_nr_page_tables (s8 level)
 
static int stage2_split_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
int kvm_pgtable_stage2_split (struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_mmu_memory_cache *mc)
 
int __kvm_pgtable_stage2_init (struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb)
 
size_t kvm_pgtable_stage2_pgd_size (u64 vtcr)
 
static int stage2_free_walker (const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
 
void kvm_pgtable_stage2_destroy (struct kvm_pgtable *pgt)
 
void kvm_pgtable_stage2_free_unlinked (struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
 

Macro Definition Documentation

◆ KVM_INVALID_PTE_LOCKED

#define KVM_INVALID_PTE_LOCKED   BIT(10)

Definition at line 60 of file pgtable.c.

◆ KVM_INVALID_PTE_OWNER_MASK

#define KVM_INVALID_PTE_OWNER_MASK   GENMASK(9, 2)

Definition at line 53 of file pgtable.c.

◆ KVM_MAX_OWNER_ID

#define KVM_MAX_OWNER_ID   1

Definition at line 54 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_HI

#define KVM_PTE_LEAF_ATTR_HI   GENMASK(63, 50)

Definition at line 39 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_HI_S1_GP

#define KVM_PTE_LEAF_ATTR_HI_S1_GP   BIT(50)

Definition at line 47 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_HI_S1_XN

#define KVM_PTE_LEAF_ATTR_HI_S1_XN   BIT(54)

Definition at line 43 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_HI_S2_XN

#define KVM_PTE_LEAF_ATTR_HI_S2_XN   BIT(54)

Definition at line 45 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_HI_SW

#define KVM_PTE_LEAF_ATTR_HI_SW   GENMASK(58, 55)

Definition at line 41 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO

#define KVM_PTE_LEAF_ATTR_LO   GENMASK(11, 2)

Definition at line 20 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S1_AF

#define KVM_PTE_LEAF_ATTR_LO_S1_AF   BIT(10)

Definition at line 30 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S1_AP

#define KVM_PTE_LEAF_ATTR_LO_S1_AP   GENMASK(7, 6)

Definition at line 23 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S1_AP_RO

#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO    ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; })

Definition at line 24 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S1_AP_RW

#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW    ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; })

Definition at line 26 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX

#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX   GENMASK(4, 2)

Definition at line 22 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S1_SH

#define KVM_PTE_LEAF_ATTR_LO_S1_SH   GENMASK(9, 8)

Definition at line 28 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S1_SH_IS

#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS   3

Definition at line 29 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S2_AF

#define KVM_PTE_LEAF_ATTR_LO_S2_AF   BIT(10)

Definition at line 37 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR

#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR   GENMASK(5, 2)

Definition at line 32 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R

#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R   BIT(6)

Definition at line 33 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W

#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W   BIT(7)

Definition at line 34 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S2_SH

#define KVM_PTE_LEAF_ATTR_LO_S2_SH   GENMASK(9, 8)

Definition at line 35 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_LO_S2_SH_IS

#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS   3

Definition at line 36 of file pgtable.c.

◆ KVM_PTE_LEAF_ATTR_S2_PERMS

#define KVM_PTE_LEAF_ATTR_S2_PERMS
Value:
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
KVM_PTE_LEAF_ATTR_HI_S2_XN)
#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R
Definition: pgtable.c:33

Definition at line 49 of file pgtable.c.

◆ KVM_PTE_TYPE

#define KVM_PTE_TYPE   BIT(1)

Definition at line 15 of file pgtable.c.

◆ KVM_PTE_TYPE_BLOCK

#define KVM_PTE_TYPE_BLOCK   0

Definition at line 16 of file pgtable.c.

◆ KVM_PTE_TYPE_PAGE

#define KVM_PTE_TYPE_PAGE   1

Definition at line 17 of file pgtable.c.

◆ KVM_PTE_TYPE_TABLE

#define KVM_PTE_TYPE_TABLE   1

Definition at line 18 of file pgtable.c.

◆ KVM_S2_MEMATTR

#define KVM_S2_MEMATTR (   pgt,
  attr 
)    PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))

Definition at line 715 of file pgtable.c.

Function Documentation

◆ __kvm_pgtable_stage2_init()

int __kvm_pgtable_stage2_init ( struct kvm_pgtable *  pgt,
struct kvm_s2_mmu *  mmu,
struct kvm_pgtable_mm_ops *  mm_ops,
enum kvm_pgtable_stage2_flags  flags,
kvm_pgtable_force_pte_cb_t  force_pte_cb 
)

Definition at line 1533 of file pgtable.c.

1537 {
1538  size_t pgd_sz;
1539  u64 vtcr = mmu->vtcr;
1540  u32 ia_bits = VTCR_EL2_IPA(vtcr);
1541  u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1542  s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1543 
1544  pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1545  pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_pages_exact(pgd_sz);
1546  if (!pgt->pgd)
1547  return -ENOMEM;
1548 
1549  pgt->ia_bits = ia_bits;
1550  pgt->start_level = start_level;
1551  pgt->mm_ops = mm_ops;
1552  pgt->mmu = mmu;
1553  pgt->flags = flags;
1554  pgt->force_pte_cb = force_pte_cb;
1555 
1556  /* Ensure zeroed PGD pages are visible to the hardware walker */
1557  dsb(ishst);
1558  return 0;
1559 }
static u32 kvm_pgd_pages(u32 ia_bits, s8 start_level)
Definition: pgtable.c:120
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __kvm_pgtable_visit()

static int __kvm_pgtable_visit ( struct kvm_pgtable_walk_data data,
struct kvm_pgtable_mm_ops *  mm_ops,
kvm_pteref_t  pteref,
s8  level 
)
inlinestatic

Definition at line 211 of file pgtable.c.

214 {
215  enum kvm_pgtable_walk_flags flags = data->walker->flags;
216  kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
217  struct kvm_pgtable_visit_ctx ctx = {
218  .ptep = ptep,
219  .old = READ_ONCE(*ptep),
220  .arg = data->walker->arg,
221  .mm_ops = mm_ops,
222  .start = data->start,
223  .addr = data->addr,
224  .end = data->end,
225  .level = level,
226  .flags = flags,
227  };
228  int ret = 0;
229  bool reload = false;
230  kvm_pteref_t childp;
231  bool table = kvm_pte_table(ctx.old, level);
232 
233  if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
234  ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
235  reload = true;
236  }
237 
238  if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
239  ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
240  reload = true;
241  }
242 
243  /*
244  * Reload the page table after invoking the walker callback for leaf
245  * entries or after pre-order traversal, to allow the walker to descend
246  * into a newly installed or replaced table.
247  */
248  if (reload) {
249  ctx.old = READ_ONCE(*ptep);
250  table = kvm_pte_table(ctx.old, level);
251  }
252 
253  if (!kvm_pgtable_walk_continue(data->walker, ret))
254  goto out;
255 
256  if (!table) {
257  data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
258  data->addr += kvm_granule_size(level);
259  goto out;
260  }
261 
262  childp = (kvm_pteref_t)kvm_pte_follow(ctx.old, mm_ops);
263  ret = __kvm_pgtable_walk(data, mm_ops, childp, level + 1);
264  if (!kvm_pgtable_walk_continue(data->walker, ret))
265  goto out;
266 
267  if (ctx.flags & KVM_PGTABLE_WALK_TABLE_POST)
268  ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_POST);
269 
270 out:
271  if (kvm_pgtable_walk_continue(data->walker, ret))
272  return 0;
273 
274  return ret;
275 }
static kvm_pte_t * kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
Definition: pgtable.c:141
static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data, struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pgtable, s8 level)
Definition: pgtable.c:277
static bool kvm_pte_table(kvm_pte_t pte, s8 level)
Definition: pgtable.c:130
static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:178
static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker, int r)
Definition: pgtable.c:189
const u64 start
Definition: pgtable.c:65
struct kvm_pgtable_walker * walker
Definition: pgtable.c:63
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __kvm_pgtable_walk()

static int __kvm_pgtable_walk ( struct kvm_pgtable_walk_data data,
struct kvm_pgtable_mm_ops *  mm_ops,
kvm_pteref_t  pgtable,
s8  level 
)
static

Definition at line 277 of file pgtable.c.

279 {
280  u32 idx;
281  int ret = 0;
282 
283  if (WARN_ON_ONCE(level < KVM_PGTABLE_FIRST_LEVEL ||
284  level > KVM_PGTABLE_LAST_LEVEL))
285  return -EINVAL;
286 
287  for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
288  kvm_pteref_t pteref = &pgtable[idx];
289 
290  if (data->addr >= data->end)
291  break;
292 
293  ret = __kvm_pgtable_visit(data, mm_ops, pteref, level);
294  if (ret)
295  break;
296  }
297 
298  return ret;
299 }
static int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data, struct kvm_pgtable_mm_ops *mm_ops, kvm_pteref_t pteref, s8 level)
Definition: pgtable.c:211
static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, s8 level)
Definition: pgtable.c:104
Here is the call graph for this function:
Here is the caller graph for this function:

◆ _kvm_pgtable_walk()

static int _kvm_pgtable_walk ( struct kvm_pgtable *  pgt,
struct kvm_pgtable_walk_data data 
)
static

Definition at line 301 of file pgtable.c.

302 {
303  u32 idx;
304  int ret = 0;
305  u64 limit = BIT(pgt->ia_bits);
306 
307  if (data->addr > limit || data->end > limit)
308  return -ERANGE;
309 
310  if (!pgt->pgd)
311  return -EINVAL;
312 
313  for (idx = kvm_pgd_page_idx(pgt, data->addr); data->addr < data->end; ++idx) {
314  kvm_pteref_t pteref = &pgt->pgd[idx * PTRS_PER_PTE];
315 
316  ret = __kvm_pgtable_walk(data, pgt->mm_ops, pteref, pgt->start_level);
317  if (ret)
318  break;
319  }
320 
321  return ret;
322 }
static u32 kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
Definition: pgtable.c:112
Here is the call graph for this function:
Here is the caller graph for this function:

◆ hyp_free_walker()

static int hyp_free_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 591 of file pgtable.c.

593 {
594  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
595 
596  if (!kvm_pte_valid(ctx->old))
597  return 0;
598 
599  mm_ops->put_page(ctx->ptep);
600 
601  if (kvm_pte_table(ctx->old, ctx->level))
602  mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
603 
604  return 0;
605 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ hyp_map_walker()

static int hyp_map_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 465 of file pgtable.c.

467 {
468  kvm_pte_t *childp, new;
469  struct hyp_map_data *data = ctx->arg;
470  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
471 
472  if (hyp_map_walker_try_leaf(ctx, data))
473  return 0;
474 
475  if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
476  return -EINVAL;
477 
478  childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
479  if (!childp)
480  return -ENOMEM;
481 
482  new = kvm_init_table_pte(childp, mm_ops);
483  mm_ops->get_page(ctx->ptep);
484  smp_store_release(ctx->ptep, new);
485 
486  return 0;
487 }
static kvm_pte_t kvm_init_table_pte(kvm_pte_t *childp, struct kvm_pgtable_mm_ops *mm_ops)
Definition: pgtable.c:151
static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct hyp_map_data *data)
Definition: pgtable.c:444
Here is the call graph for this function:
Here is the caller graph for this function:

◆ hyp_map_walker_try_leaf()

static bool hyp_map_walker_try_leaf ( const struct kvm_pgtable_visit_ctx *  ctx,
struct hyp_map_data data 
)
static

Definition at line 444 of file pgtable.c.

446 {
447  u64 phys = data->phys + (ctx->addr - ctx->start);
448  kvm_pte_t new;
449 
450  if (!kvm_block_mapping_supported(ctx, phys))
451  return false;
452 
453  new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
454  if (ctx->old == new)
455  return true;
456  if (!kvm_pte_valid(ctx->old))
457  ctx->mm_ops->get_page(ctx->ptep);
458  else if (WARN_ON((ctx->old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
459  return false;
460 
461  smp_store_release(ctx->ptep, new);
462  return true;
463 }
#define KVM_PTE_LEAF_ATTR_HI_SW
Definition: pgtable.c:41
static bool kvm_block_mapping_supported(const struct kvm_pgtable_visit_ctx *ctx, u64 phys)
Definition: pgtable.c:88
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, s8 level)
Definition: pgtable.c:160
kvm_pte_t attr
Definition: pgtable.c:386
const u64 phys
Definition: pgtable.c:385
Here is the call graph for this function:
Here is the caller graph for this function:

◆ hyp_set_prot_attr()

static int hyp_set_prot_attr ( enum kvm_pgtable_prot  prot,
kvm_pte_t *  ptep 
)
static

Definition at line 389 of file pgtable.c.

390 {
391  bool device = prot & KVM_PGTABLE_PROT_DEVICE;
392  u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
393  kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
395  u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
397 
398  if (!(prot & KVM_PGTABLE_PROT_R))
399  return -EINVAL;
400 
401  if (prot & KVM_PGTABLE_PROT_X) {
402  if (prot & KVM_PGTABLE_PROT_W)
403  return -EINVAL;
404 
405  if (device)
406  return -EINVAL;
407 
408  if (system_supports_bti_kernel())
410  } else {
412  }
413 
414  attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
415  if (!kvm_lpa2_is_enabled())
416  attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
418  attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
419  *ptep = attr;
420 
421  return 0;
422 }
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX
Definition: pgtable.c:22
#define KVM_PTE_LEAF_ATTR_LO_S1_AP
Definition: pgtable.c:23
#define KVM_PTE_LEAF_ATTR_HI_S1_XN
Definition: pgtable.c:43
#define KVM_PTE_LEAF_ATTR_HI_S1_GP
Definition: pgtable.c:47
#define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS
Definition: pgtable.c:29
#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW
Definition: pgtable.c:26
#define KVM_PTE_LEAF_ATTR_LO_S1_SH
Definition: pgtable.c:28
#define KVM_PTE_LEAF_ATTR_LO_S1_AF
Definition: pgtable.c:30
#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO
Definition: pgtable.c:24
Here is the caller graph for this function:

◆ hyp_unmap_walker()

static int hyp_unmap_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 512 of file pgtable.c.

514 {
515  kvm_pte_t *childp = NULL;
516  u64 granule = kvm_granule_size(ctx->level);
517  u64 *unmapped = ctx->arg;
518  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
519 
520  if (!kvm_pte_valid(ctx->old))
521  return -EINVAL;
522 
523  if (kvm_pte_table(ctx->old, ctx->level)) {
524  childp = kvm_pte_follow(ctx->old, mm_ops);
525 
526  if (mm_ops->page_count(childp) != 1)
527  return 0;
528 
529  kvm_clear_pte(ctx->ptep);
530  dsb(ishst);
531  __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
532  } else {
533  if (ctx->end - ctx->addr < granule)
534  return -EINVAL;
535 
536  kvm_clear_pte(ctx->ptep);
537  dsb(ishst);
538  __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
539  *unmapped += granule;
540  }
541 
542  dsb(ish);
543  isb();
544  mm_ops->put_page(ctx->ptep);
545 
546  if (childp)
547  mm_ops->put_page(childp);
548 
549  return 0;
550 }
static void kvm_clear_pte(kvm_pte_t *ptep)
Definition: pgtable.c:146
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_block_mapping_supported()

static bool kvm_block_mapping_supported ( const struct kvm_pgtable_visit_ctx *  ctx,
u64  phys 
)
static

Definition at line 88 of file pgtable.c.

89 {
90  u64 granule = kvm_granule_size(ctx->level);
91 
92  if (!kvm_level_supports_block_mapping(ctx->level))
93  return false;
94 
95  if (granule > (ctx->end - ctx->addr))
96  return false;
97 
98  if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
99  return false;
100 
101  return IS_ALIGNED(ctx->addr, granule);
102 }
static bool kvm_phys_is_valid(u64 phys)
Definition: pgtable.c:80
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_clear_pte()

static void kvm_clear_pte ( kvm_pte_t *  ptep)
static

Definition at line 146 of file pgtable.c.

147 {
148  WRITE_ONCE(*ptep, 0);
149 }
Here is the caller graph for this function:

◆ kvm_get_vtcr()

u64 kvm_get_vtcr ( u64  mmfr0,
u64  mmfr1,
u32  phys_shift 
)

Definition at line 634 of file pgtable.c.

635 {
636  u64 vtcr = VTCR_EL2_FLAGS;
637  s8 lvls;
638 
639  vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
640  vtcr |= VTCR_EL2_T0SZ(phys_shift);
641  /*
642  * Use a minimum 2 level page table to prevent splitting
643  * host PMD huge pages at stage2.
644  */
645  lvls = stage2_pgtable_levels(phys_shift);
646  if (lvls < 2)
647  lvls = 2;
648 
649  /*
650  * When LPA2 is enabled, the HW supports an extra level of translation
651  * (for 5 in total) when using 4K pages. It also introduces VTCR_EL2.SL2
652  * to as an addition to SL0 to enable encoding this extra start level.
653  * However, since we always use concatenated pages for the first level
654  * lookup, we will never need this extra level and therefore do not need
655  * to touch SL2.
656  */
657  vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
658 
659 #ifdef CONFIG_ARM64_HW_AFDBM
660  /*
661  * Enable the Hardware Access Flag management, unconditionally
662  * on all CPUs. In systems that have asymmetric support for the feature
663  * this allows KVM to leverage hardware support on the subset of cores
664  * that implement the feature.
665  *
666  * The architecture requires VTCR_EL2.HA to be RES0 (thus ignored by
667  * hardware) on implementations that do not advertise support for the
668  * feature. As such, setting HA unconditionally is safe, unless you
669  * happen to be running on a design that has unadvertised support for
670  * HAFDBS. Here be dragons.
671  */
672  if (!cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
673  vtcr |= VTCR_EL2_HA;
674 #endif /* CONFIG_ARM64_HW_AFDBM */
675 
676  if (kvm_lpa2_is_enabled())
677  vtcr |= VTCR_EL2_DS;
678 
679  /* Set the vmid bits */
680  vtcr |= (get_vmid_bits(mmfr1) == 16) ?
681  VTCR_EL2_VS_16BIT :
682  VTCR_EL2_VS_8BIT;
683 
684  return vtcr;
685 }
Here is the caller graph for this function:

◆ kvm_init_invalid_leaf_owner()

static kvm_pte_t kvm_init_invalid_leaf_owner ( u8  owner_id)
static

Definition at line 173 of file pgtable.c.

174 {
175  return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
176 }
#define KVM_INVALID_PTE_OWNER_MASK
Definition: pgtable.c:53
Here is the caller graph for this function:

◆ kvm_init_table_pte()

static kvm_pte_t kvm_init_table_pte ( kvm_pte_t *  childp,
struct kvm_pgtable_mm_ops *  mm_ops 
)
static

Definition at line 151 of file pgtable.c.

152 {
153  kvm_pte_t pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
154 
155  pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
156  pte |= KVM_PTE_VALID;
157  return pte;
158 }
#define KVM_PTE_TYPE
Definition: pgtable.c:15
#define KVM_PTE_TYPE_TABLE
Definition: pgtable.c:18
Here is the caller graph for this function:

◆ kvm_init_valid_leaf_pte()

static kvm_pte_t kvm_init_valid_leaf_pte ( u64  pa,
kvm_pte_t  attr,
s8  level 
)
static

Definition at line 160 of file pgtable.c.

161 {
162  kvm_pte_t pte = kvm_phys_to_pte(pa);
163  u64 type = (level == KVM_PGTABLE_LAST_LEVEL) ? KVM_PTE_TYPE_PAGE :
165 
166  pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
167  pte |= FIELD_PREP(KVM_PTE_TYPE, type);
168  pte |= KVM_PTE_VALID;
169 
170  return pte;
171 }
#define KVM_PTE_LEAF_ATTR_LO
Definition: pgtable.c:20
#define KVM_PTE_TYPE_PAGE
Definition: pgtable.c:17
#define KVM_PTE_TYPE_BLOCK
Definition: pgtable.c:16
#define KVM_PTE_LEAF_ATTR_HI
Definition: pgtable.c:39
Here is the caller graph for this function:

◆ kvm_pgd_page_idx()

static u32 kvm_pgd_page_idx ( struct kvm_pgtable *  pgt,
u64  addr 
)
static

Definition at line 112 of file pgtable.c.

113 {
114  u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
115  u64 mask = BIT(pgt->ia_bits) - 1;
116 
117  return (addr & mask) >> shift;
118 }
Here is the caller graph for this function:

◆ kvm_pgd_pages()

static u32 kvm_pgd_pages ( u32  ia_bits,
s8  start_level 
)
static

Definition at line 120 of file pgtable.c.

121 {
122  struct kvm_pgtable pgt = {
123  .ia_bits = ia_bits,
124  .start_level = start_level,
125  };
126 
127  return kvm_pgd_page_idx(&pgt, -1ULL) + 1;
128 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_get_leaf()

int kvm_pgtable_get_leaf ( struct kvm_pgtable *  pgt,
u64  addr,
kvm_pte_t *  ptep,
s8 *  level 
)

Definition at line 361 of file pgtable.c.

363 {
364  struct leaf_walk_data data;
365  struct kvm_pgtable_walker walker = {
366  .cb = leaf_walker,
367  .flags = KVM_PGTABLE_WALK_LEAF,
368  .arg = &data,
369  };
370  int ret;
371 
372  ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
373  PAGE_SIZE, &walker);
374  if (!ret) {
375  if (ptep)
376  *ptep = data.pte;
377  if (level)
378  *level = data.level;
379  }
380 
381  return ret;
382 }
int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, struct kvm_pgtable_walker *walker)
Definition: pgtable.c:324
static int leaf_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:350
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_hyp_destroy()

void kvm_pgtable_hyp_destroy ( struct kvm_pgtable *  pgt)

Definition at line 607 of file pgtable.c.

608 {
609  struct kvm_pgtable_walker walker = {
610  .cb = hyp_free_walker,
611  .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
612  };
613 
614  WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
615  pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
616  pgt->pgd = NULL;
617 }
static int hyp_free_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:591
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_hyp_init()

int kvm_pgtable_hyp_init ( struct kvm_pgtable *  pgt,
u32  va_bits,
struct kvm_pgtable_mm_ops *  mm_ops 
)

Definition at line 568 of file pgtable.c.

570 {
571  s8 start_level = KVM_PGTABLE_LAST_LEVEL + 1 -
572  ARM64_HW_PGTABLE_LEVELS(va_bits);
573 
574  if (start_level < KVM_PGTABLE_FIRST_LEVEL ||
575  start_level > KVM_PGTABLE_LAST_LEVEL)
576  return -EINVAL;
577 
578  pgt->pgd = (kvm_pteref_t)mm_ops->zalloc_page(NULL);
579  if (!pgt->pgd)
580  return -ENOMEM;
581 
582  pgt->ia_bits = va_bits;
583  pgt->start_level = start_level;
584  pgt->mm_ops = mm_ops;
585  pgt->mmu = NULL;
586  pgt->force_pte_cb = NULL;
587 
588  return 0;
589 }
Here is the caller graph for this function:

◆ kvm_pgtable_hyp_map()

int kvm_pgtable_hyp_map ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size,
u64  phys,
enum kvm_pgtable_prot  prot 
)

Definition at line 489 of file pgtable.c.

491 {
492  int ret;
493  struct hyp_map_data map_data = {
494  .phys = ALIGN_DOWN(phys, PAGE_SIZE),
495  };
496  struct kvm_pgtable_walker walker = {
497  .cb = hyp_map_walker,
498  .flags = KVM_PGTABLE_WALK_LEAF,
499  .arg = &map_data,
500  };
501 
502  ret = hyp_set_prot_attr(prot, &map_data.attr);
503  if (ret)
504  return ret;
505 
506  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
507  dsb(ishst);
508  isb();
509  return ret;
510 }
size_t size
Definition: gen-hyprel.c:133
static int hyp_map_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:465
static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
Definition: pgtable.c:389
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_hyp_pte_prot()

enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot ( kvm_pte_t  pte)

Definition at line 389 of file pgtable.c.

425 {
426  enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
427  u32 ap;
428 
429  if (!kvm_pte_valid(pte))
430  return prot;
431 
432  if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
433  prot |= KVM_PGTABLE_PROT_X;
434 
435  ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
437  prot |= KVM_PGTABLE_PROT_R;
438  else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
439  prot |= KVM_PGTABLE_PROT_RW;
440 
441  return prot;
442 }
Here is the caller graph for this function:

◆ kvm_pgtable_hyp_unmap()

u64 kvm_pgtable_hyp_unmap ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size 
)

Definition at line 552 of file pgtable.c.

553 {
554  u64 unmapped = 0;
555  struct kvm_pgtable_walker walker = {
556  .cb = hyp_unmap_walker,
557  .arg = &unmapped,
558  .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
559  };
560 
561  if (!pgt->mm_ops->page_count)
562  return 0;
563 
564  kvm_pgtable_walk(pgt, addr, size, &walker);
565  return unmapped;
566 }
static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:512
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_idx()

static u32 kvm_pgtable_idx ( struct kvm_pgtable_walk_data data,
s8  level 
)
static

Definition at line 104 of file pgtable.c.

105 {
106  u64 shift = kvm_granule_shift(level);
107  u64 mask = BIT(PAGE_SHIFT - 3) - 1;
108 
109  return (data->addr >> shift) & mask;
110 }
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_create_unlinked()

kvm_pte_t* kvm_pgtable_stage2_create_unlinked ( struct kvm_pgtable *  pgt,
u64  phys,
s8  level,
enum kvm_pgtable_prot  prot,
void *  mc,
bool  force_pte 
)

Definition at line 1378 of file pgtable.c.

1382 {
1383  struct stage2_map_data map_data = {
1384  .phys = phys,
1385  .mmu = pgt->mmu,
1386  .memcache = mc,
1387  .force_pte = force_pte,
1388  };
1389  struct kvm_pgtable_walker walker = {
1390  .cb = stage2_map_walker,
1391  .flags = KVM_PGTABLE_WALK_LEAF |
1392  KVM_PGTABLE_WALK_SKIP_BBM_TLBI |
1393  KVM_PGTABLE_WALK_SKIP_CMO,
1394  .arg = &map_data,
1395  };
1396  /*
1397  * The input address (.addr) is irrelevant for walking an
1398  * unlinked table. Construct an ambiguous IA range to map
1399  * kvm_granule_size(level) worth of memory.
1400  */
1401  struct kvm_pgtable_walk_data data = {
1402  .walker = &walker,
1403  .addr = 0,
1404  .end = kvm_granule_size(level),
1405  };
1406  struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1407  kvm_pte_t *pgtable;
1408  int ret;
1409 
1410  if (!IS_ALIGNED(phys, kvm_granule_size(level)))
1411  return ERR_PTR(-EINVAL);
1412 
1413  ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1414  if (ret)
1415  return ERR_PTR(ret);
1416 
1417  pgtable = mm_ops->zalloc_page(mc);
1418  if (!pgtable)
1419  return ERR_PTR(-ENOMEM);
1420 
1421  ret = __kvm_pgtable_walk(&data, mm_ops, (kvm_pteref_t)pgtable,
1422  level + 1);
1423  if (ret) {
1424  kvm_pgtable_stage2_free_unlinked(mm_ops, pgtable, level);
1425  return ERR_PTR(ret);
1426  }
1427 
1428  return pgtable;
1429 }
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
Definition: pgtable.c:717
static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1046
void kvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level)
Definition: pgtable.c:1601
kvm_pte_t attr
Definition: pgtable.c:621
bool force_pte
Definition: pgtable.c:631
const u64 phys
Definition: pgtable.c:620
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_destroy()

void kvm_pgtable_stage2_destroy ( struct kvm_pgtable *  pgt)

Definition at line 1586 of file pgtable.c.

1587 {
1588  size_t pgd_sz;
1589  struct kvm_pgtable_walker walker = {
1590  .cb = stage2_free_walker,
1591  .flags = KVM_PGTABLE_WALK_LEAF |
1592  KVM_PGTABLE_WALK_TABLE_POST,
1593  };
1594 
1595  WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1596  pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1597  pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
1598  pgt->pgd = NULL;
1599 }
static int stage2_free_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1570
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_flush()

int kvm_pgtable_stage2_flush ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size 
)

Definition at line 1364 of file pgtable.c.

1365 {
1366  struct kvm_pgtable_walker walker = {
1367  .cb = stage2_flush_walker,
1368  .flags = KVM_PGTABLE_WALK_LEAF,
1369  .arg = pgt,
1370  };
1371 
1372  if (stage2_has_fwb(pgt))
1373  return 0;
1374 
1375  return kvm_pgtable_walk(pgt, addr, size, &walker);
1376 }
static bool stage2_has_fwb(struct kvm_pgtable *pgt)
Definition: pgtable.c:687
static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1349
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_free_unlinked()

void kvm_pgtable_stage2_free_unlinked ( struct kvm_pgtable_mm_ops *  mm_ops,
void *  pgtable,
s8  level 
)

Definition at line 1601 of file pgtable.c.

1602 {
1603  kvm_pteref_t ptep = (kvm_pteref_t)pgtable;
1604  struct kvm_pgtable_walker walker = {
1605  .cb = stage2_free_walker,
1606  .flags = KVM_PGTABLE_WALK_LEAF |
1607  KVM_PGTABLE_WALK_TABLE_POST,
1608  };
1609  struct kvm_pgtable_walk_data data = {
1610  .walker = &walker,
1611 
1612  /*
1613  * At this point the IPA really doesn't matter, as the page
1614  * table being traversed has already been removed from the stage
1615  * 2. Set an appropriate range to cover the entire page table.
1616  */
1617  .addr = 0,
1618  .end = kvm_granule_size(level),
1619  };
1620 
1621  WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
1622 
1623  WARN_ON(mm_ops->page_count(pgtable) != 1);
1624  mm_ops->put_page(pgtable);
1625 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_map()

int kvm_pgtable_stage2_map ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size,
u64  phys,
enum kvm_pgtable_prot  prot,
void *  mc,
enum kvm_pgtable_walk_flags  flags 
)

Definition at line 1061 of file pgtable.c.

1064 {
1065  int ret;
1066  struct stage2_map_data map_data = {
1067  .phys = ALIGN_DOWN(phys, PAGE_SIZE),
1068  .mmu = pgt->mmu,
1069  .memcache = mc,
1070  .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
1071  };
1072  struct kvm_pgtable_walker walker = {
1073  .cb = stage2_map_walker,
1074  .flags = flags |
1075  KVM_PGTABLE_WALK_TABLE_PRE |
1076  KVM_PGTABLE_WALK_LEAF,
1077  .arg = &map_data,
1078  };
1079 
1080  if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
1081  return -EINVAL;
1082 
1083  ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
1084  if (ret)
1085  return ret;
1086 
1087  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1088  dsb(ishst);
1089  return ret;
1090 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_mkyoung()

kvm_pte_t kvm_pgtable_stage2_mkyoung ( struct kvm_pgtable *  pgt,
u64  addr 
)

Definition at line 1257 of file pgtable.c.

1258 {
1259  kvm_pte_t pte = 0;
1260  int ret;
1261 
1263  &pte, NULL,
1264  KVM_PGTABLE_WALK_HANDLE_FAULT |
1265  KVM_PGTABLE_WALK_SHARED);
1266  if (!ret)
1267  dsb(ishst);
1268 
1269  return pte;
1270 }
static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, s8 *level, enum kvm_pgtable_walk_flags flags)
Definition: pgtable.c:1221
#define KVM_PTE_LEAF_ATTR_LO_S2_AF
Definition: pgtable.c:37
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_pgd_size()

size_t kvm_pgtable_stage2_pgd_size ( u64  vtcr)

Definition at line 1561 of file pgtable.c.

1562 {
1563  u32 ia_bits = VTCR_EL2_IPA(vtcr);
1564  u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1565  s8 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1566 
1567  return kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1568 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_pte_prot()

enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot ( kvm_pte_t  pte)

Definition at line 717 of file pgtable.c.

747 {
748  enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
749 
750  if (!kvm_pte_valid(pte))
751  return prot;
752 
754  prot |= KVM_PGTABLE_PROT_R;
756  prot |= KVM_PGTABLE_PROT_W;
757  if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
758  prot |= KVM_PGTABLE_PROT_X;
759 
760  return prot;
761 }
#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W
Definition: pgtable.c:34
#define KVM_PTE_LEAF_ATTR_HI_S2_XN
Definition: pgtable.c:45
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_relax_perms()

int kvm_pgtable_stage2_relax_perms ( struct kvm_pgtable *  pgt,
u64  addr,
enum kvm_pgtable_prot  prot 
)

Definition at line 1322 of file pgtable.c.

1324 {
1325  int ret;
1326  s8 level;
1327  kvm_pte_t set = 0, clr = 0;
1328 
1329  if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
1330  return -EINVAL;
1331 
1332  if (prot & KVM_PGTABLE_PROT_R)
1334 
1335  if (prot & KVM_PGTABLE_PROT_W)
1337 
1338  if (prot & KVM_PGTABLE_PROT_X)
1340 
1341  ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level,
1342  KVM_PGTABLE_WALK_HANDLE_FAULT |
1343  KVM_PGTABLE_WALK_SHARED);
1344  if (!ret || ret == -EAGAIN)
1345  kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level);
1346  return ret;
1347 }
void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
Definition: tlb.c:111
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_set_owner()

int kvm_pgtable_stage2_set_owner ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size,
void *  mc,
u8  owner_id 
)

Definition at line 1092 of file pgtable.c.

1094 {
1095  int ret;
1096  struct stage2_map_data map_data = {
1097  .phys = KVM_PHYS_INVALID,
1098  .mmu = pgt->mmu,
1099  .memcache = mc,
1100  .owner_id = owner_id,
1101  .force_pte = true,
1102  };
1103  struct kvm_pgtable_walker walker = {
1104  .cb = stage2_map_walker,
1105  .flags = KVM_PGTABLE_WALK_TABLE_PRE |
1106  KVM_PGTABLE_WALK_LEAF,
1107  .arg = &map_data,
1108  };
1109 
1110  if (owner_id > KVM_MAX_OWNER_ID)
1111  return -EINVAL;
1112 
1113  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1114  return ret;
1115 }
#define KVM_MAX_OWNER_ID
Definition: pgtable.c:54
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_split()

int kvm_pgtable_stage2_split ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size,
struct kvm_mmu_memory_cache *  mc 
)

Definition at line 1521 of file pgtable.c.

1523 {
1524  struct kvm_pgtable_walker walker = {
1525  .cb = stage2_split_walker,
1526  .flags = KVM_PGTABLE_WALK_LEAF,
1527  .arg = mc,
1528  };
1529 
1530  return kvm_pgtable_walk(pgt, addr, size, &walker);
1531 }
static int stage2_split_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1452
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_test_clear_young()

bool kvm_pgtable_stage2_test_clear_young ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size,
bool  mkold 
)

Definition at line 1306 of file pgtable.c.

1308 {
1309  struct stage2_age_data data = {
1310  .mkold = mkold,
1311  };
1312  struct kvm_pgtable_walker walker = {
1313  .cb = stage2_age_walker,
1314  .arg = &data,
1315  .flags = KVM_PGTABLE_WALK_LEAF,
1316  };
1317 
1318  WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1319  return data.young;
1320 }
static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1277
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_unmap()

int kvm_pgtable_stage2_unmap ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size 
)

Definition at line 1160 of file pgtable.c.

1161 {
1162  int ret;
1163  struct kvm_pgtable_walker walker = {
1164  .cb = stage2_unmap_walker,
1165  .arg = pgt,
1166  .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
1167  };
1168 
1169  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1171  /* Perform the deferred TLB invalidations */
1172  kvm_tlb_flush_vmid_range(pgt->mmu, addr, size);
1173 
1174  return ret;
1175 }
static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
Definition: pgtable.c:861
static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1117
void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, size_t size)
Definition: pgtable.c:695
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_stage2_wrprotect()

int kvm_pgtable_stage2_wrprotect ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size 
)

Definition at line 1250 of file pgtable.c.

1251 {
1252  return stage2_update_leaf_attrs(pgt, addr, size, 0,
1254  NULL, NULL, 0);
1255 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_visitor_cb()

static int kvm_pgtable_visitor_cb ( struct kvm_pgtable_walk_data data,
const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 178 of file pgtable.c.

181 {
182  struct kvm_pgtable_walker *walker = data->walker;
183 
184  /* Ensure the appropriate lock is held (e.g. RCU lock for stage-2 MMU) */
185  WARN_ON_ONCE(kvm_pgtable_walk_shared(ctx) && !kvm_pgtable_walk_lock_held());
186  return walker->cb(ctx, visit);
187 }
Here is the caller graph for this function:

◆ kvm_pgtable_walk()

int kvm_pgtable_walk ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size,
struct kvm_pgtable_walker *  walker 
)

Definition at line 324 of file pgtable.c.

326 {
327  struct kvm_pgtable_walk_data walk_data = {
328  .start = ALIGN_DOWN(addr, PAGE_SIZE),
329  .addr = ALIGN_DOWN(addr, PAGE_SIZE),
330  .end = PAGE_ALIGN(walk_data.addr + size),
331  .walker = walker,
332  };
333  int r;
334 
335  r = kvm_pgtable_walk_begin(walker);
336  if (r)
337  return r;
338 
339  r = _kvm_pgtable_walk(pgt, &walk_data);
340  kvm_pgtable_walk_end(walker);
341 
342  return r;
343 }
static int _kvm_pgtable_walk(struct kvm_pgtable *pgt, struct kvm_pgtable_walk_data *data)
Definition: pgtable.c:301
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_pgtable_walk_continue()

static bool kvm_pgtable_walk_continue ( const struct kvm_pgtable_walker *  walker,
int  r 
)
static

Definition at line 189 of file pgtable.c.

191 {
192  /*
193  * Visitor callbacks return EAGAIN when the conditions that led to a
194  * fault are no longer reflected in the page tables due to a race to
195  * update a PTE. In the context of a fault handler this is interpreted
196  * as a signal to retry guest execution.
197  *
198  * Ignore the return code altogether for walkers outside a fault handler
199  * (e.g. write protecting a range of memory) and chug along with the
200  * page table walk.
201  */
202  if (r == -EAGAIN)
203  return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT);
204 
205  return !r;
206 }
Here is the caller graph for this function:

◆ kvm_pgtable_walk_skip_bbm_tlbi()

static bool kvm_pgtable_walk_skip_bbm_tlbi ( const struct kvm_pgtable_visit_ctx *  ctx)
static

Definition at line 70 of file pgtable.c.

71 {
72  return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_BBM_TLBI);
73 }
Here is the caller graph for this function:

◆ kvm_pgtable_walk_skip_cmo()

static bool kvm_pgtable_walk_skip_cmo ( const struct kvm_pgtable_visit_ctx *  ctx)
static

Definition at line 75 of file pgtable.c.

76 {
77  return unlikely(ctx->flags & KVM_PGTABLE_WALK_SKIP_CMO);
78 }
Here is the caller graph for this function:

◆ kvm_phys_is_valid()

static bool kvm_phys_is_valid ( u64  phys)
static

Definition at line 80 of file pgtable.c.

81 {
82  u64 parange_max = kvm_get_parange_max();
83  u8 shift = id_aa64mmfr0_parange_to_phys_shift(parange_max);
84 
85  return phys < BIT(shift);
86 }
Here is the caller graph for this function:

◆ kvm_pte_follow()

static kvm_pte_t* kvm_pte_follow ( kvm_pte_t  pte,
struct kvm_pgtable_mm_ops *  mm_ops 
)
static

Definition at line 141 of file pgtable.c.

142 {
143  return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
144 }
Here is the caller graph for this function:

◆ kvm_pte_table()

static bool kvm_pte_table ( kvm_pte_t  pte,
s8  level 
)
static

Definition at line 130 of file pgtable.c.

131 {
132  if (level == KVM_PGTABLE_LAST_LEVEL)
133  return false;
134 
135  if (!kvm_pte_valid(pte))
136  return false;
137 
138  return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
139 }
Here is the caller graph for this function:

◆ kvm_tlb_flush_vmid_range()

void kvm_tlb_flush_vmid_range ( struct kvm_s2_mmu *  mmu,
phys_addr_t  addr,
size_t  size 
)

Definition at line 695 of file pgtable.c.

697 {
698  unsigned long pages, inval_pages;
699 
700  if (!system_supports_tlb_range()) {
701  kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
702  return;
703  }
704 
705  pages = size >> PAGE_SHIFT;
706  while (pages > 0) {
707  inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
708  kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages);
709 
710  addr += inval_pages << PAGE_SHIFT;
711  pages -= inval_pages;
712  }
713 }
void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, phys_addr_t start, unsigned long pages)
Definition: tlb.c:141
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
Definition: tlb.c:168
Here is the call graph for this function:
Here is the caller graph for this function:

◆ leaf_walker()

static int leaf_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 350 of file pgtable.c.

352 {
353  struct leaf_walk_data *data = ctx->arg;
354 
355  data->pte = ctx->old;
356  data->level = ctx->level;
357 
358  return 0;
359 }
kvm_pte_t pte
Definition: pgtable.c:346
Here is the caller graph for this function:

◆ stage2_age_walker()

static int stage2_age_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 1277 of file pgtable.c.

1279 {
1280  kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
1281  struct stage2_age_data *data = ctx->arg;
1282 
1283  if (!kvm_pte_valid(ctx->old) || new == ctx->old)
1284  return 0;
1285 
1286  data->young = true;
1287 
1288  /*
1289  * stage2_age_walker() is always called while holding the MMU lock for
1290  * write, so this will always succeed. Nonetheless, this deliberately
1291  * follows the race detection pattern of the other stage-2 walkers in
1292  * case the locking mechanics of the MMU notifiers is ever changed.
1293  */
1294  if (data->mkold && !stage2_try_set_pte(ctx, new))
1295  return -EAGAIN;
1296 
1297  /*
1298  * "But where's the TLBI?!", you scream.
1299  * "Over in the core code", I sigh.
1300  *
1301  * See the '->clear_flush_young()' callback on the KVM mmu notifier.
1302  */
1303  return 0;
1304 }
static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
Definition: pgtable.c:786
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_attr_walker()

static int stage2_attr_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 1184 of file pgtable.c.

1186 {
1187  kvm_pte_t pte = ctx->old;
1188  struct stage2_attr_data *data = ctx->arg;
1189  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1190 
1191  if (!kvm_pte_valid(ctx->old))
1192  return -EAGAIN;
1193 
1194  data->level = ctx->level;
1195  data->pte = pte;
1196  pte &= ~data->attr_clr;
1197  pte |= data->attr_set;
1198 
1199  /*
1200  * We may race with the CPU trying to set the access flag here,
1201  * but worst-case the access flag update gets lost and will be
1202  * set on the next access instead.
1203  */
1204  if (data->pte != pte) {
1205  /*
1206  * Invalidate instruction cache before updating the guest
1207  * stage-2 PTE if we are going to add executable permission.
1208  */
1209  if (mm_ops->icache_inval_pou &&
1210  stage2_pte_executable(pte) && !stage2_pte_executable(ctx->old))
1211  mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
1212  kvm_granule_size(ctx->level));
1213 
1214  if (!stage2_try_set_pte(ctx, pte))
1215  return -EAGAIN;
1216  }
1217 
1218  return 0;
1219 }
static bool stage2_pte_executable(kvm_pte_t pte)
Definition: pgtable.c:902
kvm_pte_t pte
Definition: pgtable.c:1180
kvm_pte_t attr_clr
Definition: pgtable.c:1179
kvm_pte_t attr_set
Definition: pgtable.c:1178
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_block_get_nr_page_tables()

static int stage2_block_get_nr_page_tables ( s8  level)
static

Definition at line 1436 of file pgtable.c.

1437 {
1438  switch (level) {
1439  case 1:
1440  return PTRS_PER_PTE + 1;
1441  case 2:
1442  return 1;
1443  case 3:
1444  return 0;
1445  default:
1446  WARN_ON_ONCE(level < KVM_PGTABLE_MIN_BLOCK_LEVEL ||
1447  level > KVM_PGTABLE_LAST_LEVEL);
1448  return -EINVAL;
1449  };
1450 }
Here is the caller graph for this function:

◆ stage2_flush_walker()

static int stage2_flush_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 1349 of file pgtable.c.

1351 {
1352  struct kvm_pgtable *pgt = ctx->arg;
1353  struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1354 
1355  if (!kvm_pte_valid(ctx->old) || !stage2_pte_cacheable(pgt, ctx->old))
1356  return 0;
1357 
1358  if (mm_ops->dcache_clean_inval_poc)
1359  mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1360  kvm_granule_size(ctx->level));
1361  return 0;
1362 }
static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
Definition: pgtable.c:896
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_free_walker()

static int stage2_free_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 1570 of file pgtable.c.

1572 {
1573  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1574 
1575  if (!stage2_pte_is_counted(ctx->old))
1576  return 0;
1577 
1578  mm_ops->put_page(ctx->ptep);
1579 
1580  if (kvm_pte_table(ctx->old, ctx->level))
1581  mm_ops->put_page(kvm_pte_follow(ctx->old, mm_ops));
1582 
1583  return 0;
1584 }
static bool stage2_pte_is_counted(kvm_pte_t pte)
Definition: pgtable.c:771
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_has_fwb()

static bool stage2_has_fwb ( struct kvm_pgtable *  pgt)
static

Definition at line 687 of file pgtable.c.

688 {
689  if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
690  return false;
691 
692  return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
693 }
Here is the caller graph for this function:

◆ stage2_leaf_mapping_allowed()

static bool stage2_leaf_mapping_allowed ( const struct kvm_pgtable_visit_ctx *  ctx,
struct stage2_map_data data 
)
static

Definition at line 927 of file pgtable.c.

929 {
930  u64 phys = stage2_map_walker_phys_addr(ctx, data);
931 
932  if (data->force_pte && ctx->level < KVM_PGTABLE_LAST_LEVEL)
933  return false;
934 
935  return kvm_block_mapping_supported(ctx, phys);
936 }
static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx, const struct stage2_map_data *data)
Definition: pgtable.c:907
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_make_pte()

static void stage2_make_pte ( const struct kvm_pgtable_visit_ctx *  ctx,
kvm_pte_t  new 
)
static

Definition at line 849 of file pgtable.c.

850 {
851  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
852 
853  WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
854 
855  if (stage2_pte_is_counted(new))
856  mm_ops->get_page(ctx->ptep);
857 
858  smp_store_release(ctx->ptep, new);
859 }
static bool stage2_pte_is_locked(kvm_pte_t pte)
Definition: pgtable.c:781
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_map_walk_leaf()

static int stage2_map_walk_leaf ( const struct kvm_pgtable_visit_ctx *  ctx,
struct stage2_map_data data 
)
static

Definition at line 1000 of file pgtable.c.

1002 {
1003  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1004  kvm_pte_t *childp, new;
1005  int ret;
1006 
1007  ret = stage2_map_walker_try_leaf(ctx, data);
1008  if (ret != -E2BIG)
1009  return ret;
1010 
1011  if (WARN_ON(ctx->level == KVM_PGTABLE_LAST_LEVEL))
1012  return -EINVAL;
1013 
1014  if (!data->memcache)
1015  return -ENOMEM;
1016 
1017  childp = mm_ops->zalloc_page(data->memcache);
1018  if (!childp)
1019  return -ENOMEM;
1020 
1021  if (!stage2_try_break_pte(ctx, data->mmu)) {
1022  mm_ops->put_page(childp);
1023  return -EAGAIN;
1024  }
1025 
1026  /*
1027  * If we've run into an existing block mapping then replace it with
1028  * a table. Accesses beyond 'end' that fall within the new table
1029  * will be mapped lazily.
1030  */
1031  new = kvm_init_table_pte(childp, mm_ops);
1032  stage2_make_pte(ctx, new);
1033 
1034  return 0;
1035 }
static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
Definition: pgtable.c:849
static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu)
Definition: pgtable.c:810
static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
Definition: pgtable.c:938
void * memcache
Definition: pgtable.c:628
struct kvm_s2_mmu * mmu
Definition: pgtable.c:627
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_map_walk_table_pre()

static int stage2_map_walk_table_pre ( const struct kvm_pgtable_visit_ctx *  ctx,
struct stage2_map_data data 
)
static

Definition at line 982 of file pgtable.c.

984 {
985  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
986  kvm_pte_t *childp = kvm_pte_follow(ctx->old, mm_ops);
987  int ret;
988 
989  if (!stage2_leaf_mapping_allowed(ctx, data))
990  return 0;
991 
992  ret = stage2_map_walker_try_leaf(ctx, data);
993  if (ret)
994  return ret;
995 
996  mm_ops->free_unlinked_table(childp, ctx->level);
997  return 0;
998 }
static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
Definition: pgtable.c:927
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_map_walker()

static int stage2_map_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 1046 of file pgtable.c.

1048 {
1049  struct stage2_map_data *data = ctx->arg;
1050 
1051  switch (visit) {
1052  case KVM_PGTABLE_WALK_TABLE_PRE:
1053  return stage2_map_walk_table_pre(ctx, data);
1054  case KVM_PGTABLE_WALK_LEAF:
1055  return stage2_map_walk_leaf(ctx, data);
1056  default:
1057  return -EINVAL;
1058  }
1059 }
static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
Definition: pgtable.c:1000
static int stage2_map_walk_table_pre(const struct kvm_pgtable_visit_ctx *ctx, struct stage2_map_data *data)
Definition: pgtable.c:982
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_map_walker_phys_addr()

static u64 stage2_map_walker_phys_addr ( const struct kvm_pgtable_visit_ctx *  ctx,
const struct stage2_map_data data 
)
static

Definition at line 907 of file pgtable.c.

909 {
910  u64 phys = data->phys;
911 
912  /*
913  * Stage-2 walks to update ownership data are communicated to the map
914  * walker using an invalid PA. Avoid offsetting an already invalid PA,
915  * which could overflow and make the address valid again.
916  */
917  if (!kvm_phys_is_valid(phys))
918  return phys;
919 
920  /*
921  * Otherwise, work out the correct PA based on how far the walk has
922  * gotten.
923  */
924  return phys + (ctx->addr - ctx->start);
925 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_map_walker_try_leaf()

static int stage2_map_walker_try_leaf ( const struct kvm_pgtable_visit_ctx *  ctx,
struct stage2_map_data data 
)
static

Definition at line 938 of file pgtable.c.

940 {
941  kvm_pte_t new;
942  u64 phys = stage2_map_walker_phys_addr(ctx, data);
943  u64 granule = kvm_granule_size(ctx->level);
944  struct kvm_pgtable *pgt = data->mmu->pgt;
945  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
946 
947  if (!stage2_leaf_mapping_allowed(ctx, data))
948  return -E2BIG;
949 
950  if (kvm_phys_is_valid(phys))
951  new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
952  else
954 
955  /*
956  * Skip updating the PTE if we are trying to recreate the exact
957  * same mapping or only change the access permissions. Instead,
958  * the vCPU will exit one more time from guest if still needed
959  * and then go through the path of relaxing permissions.
960  */
961  if (!stage2_pte_needs_update(ctx->old, new))
962  return -EAGAIN;
963 
964  if (!stage2_try_break_pte(ctx, data->mmu))
965  return -EAGAIN;
966 
967  /* Perform CMOs before installation of the guest stage-2 PTE */
968  if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->dcache_clean_inval_poc &&
969  stage2_pte_cacheable(pgt, new))
970  mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
971  granule);
972 
973  if (!kvm_pgtable_walk_skip_cmo(ctx) && mm_ops->icache_inval_pou &&
975  mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
976 
977  stage2_make_pte(ctx, new);
978 
979  return 0;
980 }
static bool kvm_pgtable_walk_skip_cmo(const struct kvm_pgtable_visit_ctx *ctx)
Definition: pgtable.c:75
static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
Definition: pgtable.c:763
static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
Definition: pgtable.c:173
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_pte_cacheable()

static bool stage2_pte_cacheable ( struct kvm_pgtable *  pgt,
kvm_pte_t  pte 
)
static

Definition at line 896 of file pgtable.c.

897 {
898  u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
899  return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
900 }
#define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR
Definition: pgtable.c:32
#define KVM_S2_MEMATTR(pgt, attr)
Definition: pgtable.c:715
Here is the caller graph for this function:

◆ stage2_pte_executable()

static bool stage2_pte_executable ( kvm_pte_t  pte)
static

Definition at line 902 of file pgtable.c.

903 {
904  return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
905 }
Here is the caller graph for this function:

◆ stage2_pte_is_counted()

static bool stage2_pte_is_counted ( kvm_pte_t  pte)
static

Definition at line 771 of file pgtable.c.

772 {
773  /*
774  * The refcount tracks valid entries as well as invalid entries if they
775  * encode ownership of a page to another entity than the page-table
776  * owner, whose id is 0.
777  */
778  return !!pte;
779 }
Here is the caller graph for this function:

◆ stage2_pte_is_locked()

static bool stage2_pte_is_locked ( kvm_pte_t  pte)
static

Definition at line 781 of file pgtable.c.

782 {
783  return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
784 }
#define KVM_INVALID_PTE_LOCKED
Definition: pgtable.c:60
Here is the caller graph for this function:

◆ stage2_pte_needs_update()

static bool stage2_pte_needs_update ( kvm_pte_t  old,
kvm_pte_t  new 
)
static

Definition at line 763 of file pgtable.c.

764 {
765  if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
766  return true;
767 
768  return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
769 }
#define KVM_PTE_LEAF_ATTR_S2_PERMS
Definition: pgtable.c:49
Here is the caller graph for this function:

◆ stage2_set_prot_attr()

static int stage2_set_prot_attr ( struct kvm_pgtable *  pgt,
enum kvm_pgtable_prot  prot,
kvm_pte_t *  ptep 
)
static

Definition at line 717 of file pgtable.c.

719 {
720  bool device = prot & KVM_PGTABLE_PROT_DEVICE;
721  kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
722  KVM_S2_MEMATTR(pgt, NORMAL);
724 
725  if (!(prot & KVM_PGTABLE_PROT_X))
727  else if (device)
728  return -EINVAL;
729 
730  if (prot & KVM_PGTABLE_PROT_R)
732 
733  if (prot & KVM_PGTABLE_PROT_W)
735 
736  if (!kvm_lpa2_is_enabled())
737  attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
738 
740  attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
741  *ptep = attr;
742 
743  return 0;
744 }
#define KVM_PTE_LEAF_ATTR_LO_S2_SH
Definition: pgtable.c:35
#define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS
Definition: pgtable.c:36
Here is the caller graph for this function:

◆ stage2_split_walker()

static int stage2_split_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 1452 of file pgtable.c.

1454 {
1455  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1456  struct kvm_mmu_memory_cache *mc = ctx->arg;
1457  struct kvm_s2_mmu *mmu;
1458  kvm_pte_t pte = ctx->old, new, *childp;
1459  enum kvm_pgtable_prot prot;
1460  s8 level = ctx->level;
1461  bool force_pte;
1462  int nr_pages;
1463  u64 phys;
1464 
1465  /* No huge-pages exist at the last level */
1466  if (level == KVM_PGTABLE_LAST_LEVEL)
1467  return 0;
1468 
1469  /* We only split valid block mappings */
1470  if (!kvm_pte_valid(pte))
1471  return 0;
1472 
1473  nr_pages = stage2_block_get_nr_page_tables(level);
1474  if (nr_pages < 0)
1475  return nr_pages;
1476 
1477  if (mc->nobjs >= nr_pages) {
1478  /* Build a tree mapped down to the PTE granularity. */
1479  force_pte = true;
1480  } else {
1481  /*
1482  * Don't force PTEs, so create_unlinked() below does
1483  * not populate the tree up to the PTE level. The
1484  * consequence is that the call will require a single
1485  * page of level 2 entries at level 1, or a single
1486  * page of PTEs at level 2. If we are at level 1, the
1487  * PTEs will be created recursively.
1488  */
1489  force_pte = false;
1490  nr_pages = 1;
1491  }
1492 
1493  if (mc->nobjs < nr_pages)
1494  return -ENOMEM;
1495 
1496  mmu = container_of(mc, struct kvm_s2_mmu, split_page_cache);
1497  phys = kvm_pte_to_phys(pte);
1498  prot = kvm_pgtable_stage2_pte_prot(pte);
1499 
1500  childp = kvm_pgtable_stage2_create_unlinked(mmu->pgt, phys,
1501  level, prot, mc, force_pte);
1502  if (IS_ERR(childp))
1503  return PTR_ERR(childp);
1504 
1505  if (!stage2_try_break_pte(ctx, mmu)) {
1506  kvm_pgtable_stage2_free_unlinked(mm_ops, childp, level);
1507  return -EAGAIN;
1508  }
1509 
1510  /*
1511  * Note, the contents of the page table are guaranteed to be made
1512  * visible before the new PTE is assigned because stage2_make_pte()
1513  * writes the PTE using smp_store_release().
1514  */
1515  new = kvm_init_table_pte(childp, mm_ops);
1516  stage2_make_pte(ctx, new);
1517  dsb(ishst);
1518  return 0;
1519 }
kvm_pte_t * kvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level, enum kvm_pgtable_prot prot, void *mc, bool force_pte)
Definition: pgtable.c:1378
static int stage2_block_get_nr_page_tables(s8 level)
Definition: pgtable.c:1436
enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
Definition: pgtable.c:746
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_try_break_pte()

static bool stage2_try_break_pte ( const struct kvm_pgtable_visit_ctx *  ctx,
struct kvm_s2_mmu *  mmu 
)
static

stage2_try_break_pte() - Invalidates a pte according to the 'break-before-make' requirements of the architecture.

@ctx: context of the visited pte. @mmu: stage-2 mmu

Returns: true if the pte was successfully broken.

If the removed pte was valid, performs the necessary serialization and TLB invalidation for the old value. For counted ptes, drops the reference count on the containing table page.

Definition at line 810 of file pgtable.c.

812 {
813  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
814 
815  if (stage2_pte_is_locked(ctx->old)) {
816  /*
817  * Should never occur if this walker has exclusive access to the
818  * page tables.
819  */
820  WARN_ON(!kvm_pgtable_walk_shared(ctx));
821  return false;
822  }
823 
825  return false;
826 
827  if (!kvm_pgtable_walk_skip_bbm_tlbi(ctx)) {
828  /*
829  * Perform the appropriate TLB invalidation based on the
830  * evicted pte value (if any).
831  */
832  if (kvm_pte_table(ctx->old, ctx->level)) {
833  u64 size = kvm_granule_size(ctx->level);
834  u64 addr = ALIGN_DOWN(ctx->addr, size);
835 
836  kvm_tlb_flush_vmid_range(mmu, addr, size);
837  } else if (kvm_pte_valid(ctx->old)) {
838  kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
839  ctx->addr, ctx->level);
840  }
841  }
842 
843  if (stage2_pte_is_counted(ctx->old))
844  mm_ops->put_page(ctx->ptep);
845 
846  return true;
847 }
void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa, int level)
Definition: tlb.c:81
static bool kvm_pgtable_walk_skip_bbm_tlbi(const struct kvm_pgtable_visit_ctx *ctx)
Definition: pgtable.c:70
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_try_set_pte()

static bool stage2_try_set_pte ( const struct kvm_pgtable_visit_ctx *  ctx,
kvm_pte_t  new 
)
static

Definition at line 786 of file pgtable.c.

787 {
788  if (!kvm_pgtable_walk_shared(ctx)) {
789  WRITE_ONCE(*ctx->ptep, new);
790  return true;
791  }
792 
793  return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
794 }
Here is the caller graph for this function:

◆ stage2_unmap_defer_tlb_flush()

static bool stage2_unmap_defer_tlb_flush ( struct kvm_pgtable *  pgt)
static

Definition at line 861 of file pgtable.c.

862 {
863  /*
864  * If FEAT_TLBIRANGE is implemented, defer the individual
865  * TLB invalidations until the entire walk is finished, and
866  * then use the range-based TLBI instructions to do the
867  * invalidations. Condition deferred TLB invalidation on the
868  * system supporting FWB as the optimization is entirely
869  * pointless when the unmap walker needs to perform CMOs.
870  */
871  return system_supports_tlb_range() && stage2_has_fwb(pgt);
872 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_unmap_put_pte()

static void stage2_unmap_put_pte ( const struct kvm_pgtable_visit_ctx *  ctx,
struct kvm_s2_mmu *  mmu,
struct kvm_pgtable_mm_ops *  mm_ops 
)
static

Definition at line 874 of file pgtable.c.

877 {
878  struct kvm_pgtable *pgt = ctx->arg;
879 
880  /*
881  * Clear the existing PTE, and perform break-before-make if it was
882  * valid. Depending on the system support, defer the TLB maintenance
883  * for the same until the entire unmap walk is completed.
884  */
885  if (kvm_pte_valid(ctx->old)) {
886  kvm_clear_pte(ctx->ptep);
887 
889  kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
890  ctx->addr, ctx->level);
891  }
892 
893  mm_ops->put_page(ctx->ptep);
894 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_unmap_walker()

static int stage2_unmap_walker ( const struct kvm_pgtable_visit_ctx *  ctx,
enum kvm_pgtable_walk_flags  visit 
)
static

Definition at line 1117 of file pgtable.c.

1119 {
1120  struct kvm_pgtable *pgt = ctx->arg;
1121  struct kvm_s2_mmu *mmu = pgt->mmu;
1122  struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
1123  kvm_pte_t *childp = NULL;
1124  bool need_flush = false;
1125 
1126  if (!kvm_pte_valid(ctx->old)) {
1127  if (stage2_pte_is_counted(ctx->old)) {
1128  kvm_clear_pte(ctx->ptep);
1129  mm_ops->put_page(ctx->ptep);
1130  }
1131  return 0;
1132  }
1133 
1134  if (kvm_pte_table(ctx->old, ctx->level)) {
1135  childp = kvm_pte_follow(ctx->old, mm_ops);
1136 
1137  if (mm_ops->page_count(childp) != 1)
1138  return 0;
1139  } else if (stage2_pte_cacheable(pgt, ctx->old)) {
1140  need_flush = !stage2_has_fwb(pgt);
1141  }
1142 
1143  /*
1144  * This is similar to the map() path in that we unmap the entire
1145  * block entry and rely on the remaining portions being faulted
1146  * back lazily.
1147  */
1148  stage2_unmap_put_pte(ctx, mmu, mm_ops);
1149 
1150  if (need_flush && mm_ops->dcache_clean_inval_poc)
1151  mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
1152  kvm_granule_size(ctx->level));
1153 
1154  if (childp)
1155  mm_ops->put_page(childp);
1156 
1157  return 0;
1158 }
static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops)
Definition: pgtable.c:874
Here is the call graph for this function:
Here is the caller graph for this function:

◆ stage2_update_leaf_attrs()

static int stage2_update_leaf_attrs ( struct kvm_pgtable *  pgt,
u64  addr,
u64  size,
kvm_pte_t  attr_set,
kvm_pte_t  attr_clr,
kvm_pte_t *  orig_pte,
s8 *  level,
enum kvm_pgtable_walk_flags  flags 
)
static

Definition at line 1221 of file pgtable.c.

1225 {
1226  int ret;
1227  kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
1228  struct stage2_attr_data data = {
1229  .attr_set = attr_set & attr_mask,
1230  .attr_clr = attr_clr & attr_mask,
1231  };
1232  struct kvm_pgtable_walker walker = {
1233  .cb = stage2_attr_walker,
1234  .arg = &data,
1235  .flags = flags | KVM_PGTABLE_WALK_LEAF,
1236  };
1237 
1238  ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1239  if (ret)
1240  return ret;
1241 
1242  if (orig_pte)
1243  *orig_pte = data.pte;
1244 
1245  if (level)
1246  *level = data.level;
1247  return 0;
1248 }
static int stage2_attr_walker(const struct kvm_pgtable_visit_ctx *ctx, enum kvm_pgtable_walk_flags visit)
Definition: pgtable.c:1184
Here is the call graph for this function:
Here is the caller graph for this function: