KVM
Classes | Macros | Enumerations | Functions | Variables
mem_protect.h File Reference
#include <linux/kvm_host.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
#include <asm/virt.h>
#include <nvhe/pkvm.h>
#include <nvhe/spinlock.h>
Include dependency graph for mem_protect.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  host_mmu
 

Macros

#define PKVM_PAGE_STATE_PROT_MASK   (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
 

Enumerations

enum  pkvm_page_state {
  PKVM_PAGE_OWNED = 0ULL , PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0 , PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1 , __PKVM_PAGE_RESERVED ,
  PKVM_NOPAGE
}
 
enum  pkvm_component_id { PKVM_ID_HOST , PKVM_ID_HYP , PKVM_ID_FFA }
 

Functions

static enum kvm_pgtable_prot pkvm_mkstate (enum kvm_pgtable_prot prot, enum pkvm_page_state state)
 
static enum pkvm_page_state pkvm_getstate (enum kvm_pgtable_prot prot)
 
int __pkvm_prot_finalize (void)
 
int __pkvm_host_share_hyp (u64 pfn)
 
int __pkvm_host_unshare_hyp (u64 pfn)
 
int __pkvm_host_donate_hyp (u64 pfn, u64 nr_pages)
 
int __pkvm_hyp_donate_host (u64 pfn, u64 nr_pages)
 
int __pkvm_host_share_ffa (u64 pfn, u64 nr_pages)
 
int __pkvm_host_unshare_ffa (u64 pfn, u64 nr_pages)
 
bool addr_is_memory (phys_addr_t phys)
 
int host_stage2_idmap_locked (phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot)
 
int host_stage2_set_owner_locked (phys_addr_t addr, u64 size, u8 owner_id)
 
int kvm_host_prepare_stage2 (void *pgt_pool_base)
 
int kvm_guest_prepare_stage2 (struct pkvm_hyp_vm *vm, void *pgd)
 
void handle_host_mem_abort (struct kvm_cpu_context *host_ctxt)
 
int hyp_pin_shared_mem (void *from, void *to)
 
void hyp_unpin_shared_mem (void *from, void *to)
 
void reclaim_guest_pages (struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
 
int refill_memcache (struct kvm_hyp_memcache *mc, unsigned long min_pages, struct kvm_hyp_memcache *host_mc)
 
static __always_inline void __load_host_stage2 (void)
 

Variables

struct host_mmu host_mmu
 
unsigned long hyp_nr_cpus
 

Macro Definition Documentation

◆ PKVM_PAGE_STATE_PROT_MASK

#define PKVM_PAGE_STATE_PROT_MASK   (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)

Definition at line 36 of file mem_protect.h.

Enumeration Type Documentation

◆ pkvm_component_id

Enumerator
PKVM_ID_HOST 
PKVM_ID_HYP 
PKVM_ID_FFA 

Definition at line 57 of file mem_protect.h.

57  {
61 };
@ PKVM_ID_HOST
Definition: mem_protect.h:58
@ PKVM_ID_HYP
Definition: mem_protect.h:59
@ PKVM_ID_FFA
Definition: mem_protect.h:60

◆ pkvm_page_state

Enumerator
PKVM_PAGE_OWNED 
PKVM_PAGE_SHARED_OWNED 
PKVM_PAGE_SHARED_BORROWED 
__PKVM_PAGE_RESERVED 
PKVM_NOPAGE 

Definition at line 25 of file mem_protect.h.

25  {
26  PKVM_PAGE_OWNED = 0ULL,
27  PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0,
28  PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1,
29  __PKVM_PAGE_RESERVED = KVM_PGTABLE_PROT_SW0 |
30  KVM_PGTABLE_PROT_SW1,
31 
32  /* Meta-states which aren't encoded directly in the PTE's SW bits */
34 };
@ PKVM_PAGE_OWNED
Definition: mem_protect.h:26
@ PKVM_NOPAGE
Definition: mem_protect.h:33
@ PKVM_PAGE_SHARED_OWNED
Definition: mem_protect.h:27
@ PKVM_PAGE_SHARED_BORROWED
Definition: mem_protect.h:28
@ __PKVM_PAGE_RESERVED
Definition: mem_protect.h:29

Function Documentation

◆ __load_host_stage2()

static __always_inline void __load_host_stage2 ( void  )
static

Definition at line 86 of file mem_protect.h.

87 {
88  if (static_branch_likely(&kvm_protected_mode_initialized))
89  __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
90  else
91  write_sysreg(0, vttbr_el2);
92 }
struct kvm_arch arch
Definition: mem_protect.h:49
Here is the caller graph for this function:

◆ __pkvm_host_donate_hyp()

int __pkvm_host_donate_hyp ( u64  pfn,
u64  nr_pages 
)

Definition at line 1152 of file mem_protect.c.

1153 {
1154  int ret;
1155  u64 host_addr = hyp_pfn_to_phys(pfn);
1156  u64 hyp_addr = (u64)__hyp_va(host_addr);
1157  struct pkvm_mem_donation donation = {
1158  .tx = {
1159  .nr_pages = nr_pages,
1160  .initiator = {
1161  .id = PKVM_ID_HOST,
1162  .addr = host_addr,
1163  .host = {
1164  .completer_addr = hyp_addr,
1165  },
1166  },
1167  .completer = {
1168  .id = PKVM_ID_HYP,
1169  },
1170  },
1171  };
1172 
1175 
1176  ret = do_donate(&donation);
1177 
1180 
1181  return ret;
1182 }
static void hyp_unlock_component(void)
Definition: mem_protect.c:58
static void host_unlock_component(void)
Definition: mem_protect.c:48
static int do_donate(struct pkvm_mem_donation *donation)
Definition: mem_protect.c:1075
static void host_lock_component(void)
Definition: mem_protect.c:43
static void hyp_lock_component(void)
Definition: mem_protect.c:53
#define __hyp_va(phys)
Definition: memory.h:18
#define hyp_pfn_to_phys(pfn)
Definition: memory.h:31
const struct pkvm_mem_transition tx
Definition: mem_protect.c:573
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __pkvm_host_share_ffa()

int __pkvm_host_share_ffa ( u64  pfn,
u64  nr_pages 
)

Definition at line 1261 of file mem_protect.c.

1262 {
1263  int ret;
1264  struct pkvm_mem_share share = {
1265  .tx = {
1266  .nr_pages = nr_pages,
1267  .initiator = {
1268  .id = PKVM_ID_HOST,
1269  .addr = hyp_pfn_to_phys(pfn),
1270  },
1271  .completer = {
1272  .id = PKVM_ID_FFA,
1273  },
1274  },
1275  };
1276 
1278  ret = do_share(&share);
1280 
1281  return ret;
1282 }
static int do_share(struct pkvm_mem_share *share)
Definition: mem_protect.c:903
const struct pkvm_mem_transition tx
Definition: mem_protect.c:568
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __pkvm_host_share_hyp()

int __pkvm_host_share_hyp ( u64  pfn)

Definition at line 1086 of file mem_protect.c.

1087 {
1088  int ret;
1089  u64 host_addr = hyp_pfn_to_phys(pfn);
1090  u64 hyp_addr = (u64)__hyp_va(host_addr);
1091  struct pkvm_mem_share share = {
1092  .tx = {
1093  .nr_pages = 1,
1094  .initiator = {
1095  .id = PKVM_ID_HOST,
1096  .addr = host_addr,
1097  .host = {
1098  .completer_addr = hyp_addr,
1099  },
1100  },
1101  .completer = {
1102  .id = PKVM_ID_HYP,
1103  },
1104  },
1105  .completer_prot = PAGE_HYP,
1106  };
1107 
1110 
1111  ret = do_share(&share);
1112 
1115 
1116  return ret;
1117 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __pkvm_host_unshare_ffa()

int __pkvm_host_unshare_ffa ( u64  pfn,
u64  nr_pages 
)

Definition at line 1284 of file mem_protect.c.

1285 {
1286  int ret;
1287  struct pkvm_mem_share share = {
1288  .tx = {
1289  .nr_pages = nr_pages,
1290  .initiator = {
1291  .id = PKVM_ID_HOST,
1292  .addr = hyp_pfn_to_phys(pfn),
1293  },
1294  .completer = {
1295  .id = PKVM_ID_FFA,
1296  },
1297  },
1298  };
1299 
1301  ret = do_unshare(&share);
1303 
1304  return ret;
1305 }
static int do_unshare(struct pkvm_mem_share *share)
Definition: mem_protect.c:987
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __pkvm_host_unshare_hyp()

int __pkvm_host_unshare_hyp ( u64  pfn)

Definition at line 1119 of file mem_protect.c.

1120 {
1121  int ret;
1122  u64 host_addr = hyp_pfn_to_phys(pfn);
1123  u64 hyp_addr = (u64)__hyp_va(host_addr);
1124  struct pkvm_mem_share share = {
1125  .tx = {
1126  .nr_pages = 1,
1127  .initiator = {
1128  .id = PKVM_ID_HOST,
1129  .addr = host_addr,
1130  .host = {
1131  .completer_addr = hyp_addr,
1132  },
1133  },
1134  .completer = {
1135  .id = PKVM_ID_HYP,
1136  },
1137  },
1138  .completer_prot = PAGE_HYP,
1139  };
1140 
1143 
1144  ret = do_unshare(&share);
1145 
1148 
1149  return ret;
1150 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __pkvm_hyp_donate_host()

int __pkvm_hyp_donate_host ( u64  pfn,
u64  nr_pages 
)

Definition at line 1184 of file mem_protect.c.

1185 {
1186  int ret;
1187  u64 host_addr = hyp_pfn_to_phys(pfn);
1188  u64 hyp_addr = (u64)__hyp_va(host_addr);
1189  struct pkvm_mem_donation donation = {
1190  .tx = {
1191  .nr_pages = nr_pages,
1192  .initiator = {
1193  .id = PKVM_ID_HYP,
1194  .addr = hyp_addr,
1195  .hyp = {
1196  .completer_addr = host_addr,
1197  },
1198  },
1199  .completer = {
1200  .id = PKVM_ID_HOST,
1201  },
1202  },
1203  };
1204 
1207 
1208  ret = do_donate(&donation);
1209 
1212 
1213  return ret;
1214 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ __pkvm_prot_finalize()

int __pkvm_prot_finalize ( void  )

Definition at line 289 of file mem_protect.c.

290 {
291  struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
292  struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
293 
294  if (params->hcr_el2 & HCR_VM)
295  return -EPERM;
296 
297  params->vttbr = kvm_get_vttbr(mmu);
298  params->vtcr = mmu->vtcr;
299  params->hcr_el2 |= HCR_VM;
300 
301  /*
302  * The CMO below not only cleans the updated params to the
303  * PoC, but also provides the DSB that ensures ongoing
304  * page-table walks that have started before we trapped to EL2
305  * have completed.
306  */
307  kvm_flush_dcache_to_poc(params, sizeof(*params));
308 
309  write_sysreg(params->hcr_el2, hcr_el2);
310  __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
311 
312  /*
313  * Make sure to have an ISB before the TLB maintenance below but only
314  * when __load_stage2() doesn't include one already.
315  */
316  asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
317 
318  /* Invalidate stale HCR bits that may be cached in TLBs */
319  __tlbi(vmalls12e1);
320  dsb(nsh);
321  isb();
322 
323  return 0;
324 }
Here is the caller graph for this function:

◆ addr_is_memory()

bool addr_is_memory ( phys_addr_t  phys)

Definition at line 378 of file mem_protect.c.

379 {
380  struct kvm_mem_range range;
381 
382  return !!find_mem_range(phys, &range);
383 }
static struct memblock_region * find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
Definition: mem_protect.c:348
Here is the call graph for this function:
Here is the caller graph for this function:

◆ handle_host_mem_abort()

void handle_host_mem_abort ( struct kvm_cpu_context *  host_ctxt)

Definition at line 529 of file mem_protect.c.

530 {
531  struct kvm_vcpu_fault_info fault;
532  u64 esr, addr;
533  int ret = 0;
534 
535  esr = read_sysreg_el2(SYS_ESR);
536  BUG_ON(!__get_fault_info(esr, &fault));
537 
538  addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
539  ret = host_stage2_idmap(addr);
540  BUG_ON(ret && ret != -EAGAIN);
541 }
static bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
Definition: fault.h:44
static int host_stage2_idmap(u64 addr)
Definition: mem_protect.c:508
Here is the call graph for this function:
Here is the caller graph for this function:

◆ host_stage2_idmap_locked()

int host_stage2_idmap_locked ( phys_addr_t  addr,
u64  size,
enum kvm_pgtable_prot  prot 
)

Definition at line 474 of file mem_protect.c.

476 {
477  return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot);
478 }
size_t size
Definition: gen-hyprel.c:133
#define host_stage2_try(fn,...)
Definition: mem_protect.c:423
static int __host_stage2_idmap(u64 start, u64 end, enum kvm_pgtable_prot prot)
Definition: mem_protect.c:410
Here is the call graph for this function:
Here is the caller graph for this function:

◆ host_stage2_set_owner_locked()

int host_stage2_set_owner_locked ( phys_addr_t  addr,
u64  size,
u8  owner_id 
)

Definition at line 480 of file mem_protect.c.

481 {
483  addr, size, &host_s2_pool, owner_id);
484 }
static struct hyp_pool host_s2_pool
Definition: mem_protect.c:26
int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size, void *mc, u8 owner_id)
Definition: pgtable.c:1092
struct kvm_pgtable pgt
Definition: mem_protect.h:50
Here is the call graph for this function:
Here is the caller graph for this function:

◆ hyp_pin_shared_mem()

int hyp_pin_shared_mem ( void *  from,
void *  to 
)

Definition at line 1216 of file mem_protect.c.

1217 {
1218  u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
1219  u64 end = PAGE_ALIGN((u64)to);
1220  u64 size = end - start;
1221  int ret;
1222 
1225 
1226  ret = __host_check_page_state_range(__hyp_pa(start), size,
1228  if (ret)
1229  goto unlock;
1230 
1231  ret = __hyp_check_page_state_range(start, size,
1233  if (ret)
1234  goto unlock;
1235 
1236  for (cur = start; cur < end; cur += PAGE_SIZE)
1238 
1239 unlock:
1242 
1243  return ret;
1244 }
static unsigned long cur
Definition: early_alloc.c:17
static unsigned long end
Definition: early_alloc.c:16
static int __hyp_check_page_state_range(u64 addr, u64 size, enum pkvm_page_state state)
Definition: mem_protect.c:720
static int __host_check_page_state_range(u64 addr, u64 size, enum pkvm_page_state state)
Definition: mem_protect.c:612
#define hyp_virt_to_page(virt)
Definition: memory.h:33
static void hyp_page_ref_inc(struct hyp_page *p)
Definition: memory.h:52
Here is the call graph for this function:
Here is the caller graph for this function:

◆ hyp_unpin_shared_mem()

void hyp_unpin_shared_mem ( void *  from,
void *  to 
)

Definition at line 1246 of file mem_protect.c.

1247 {
1248  u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
1249  u64 end = PAGE_ALIGN((u64)to);
1250 
1253 
1254  for (cur = start; cur < end; cur += PAGE_SIZE)
1256 
1259 }
static void hyp_page_ref_dec(struct hyp_page *p)
Definition: memory.h:58
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_guest_prepare_stage2()

int kvm_guest_prepare_stage2 ( struct pkvm_hyp_vm vm,
void *  pgd 
)

Definition at line 232 of file mem_protect.c.

233 {
234  struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu;
235  unsigned long nr_pages;
236  int ret;
237 
238  nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT;
239  ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
240  if (ret)
241  return ret;
242 
243  hyp_spin_lock_init(&vm->lock);
244  vm->mm_ops = (struct kvm_pgtable_mm_ops) {
245  .zalloc_pages_exact = guest_s2_zalloc_pages_exact,
246  .free_pages_exact = guest_s2_free_pages_exact,
247  .zalloc_page = guest_s2_zalloc_page,
248  .phys_to_virt = hyp_phys_to_virt,
249  .virt_to_phys = hyp_virt_to_phys,
250  .page_count = hyp_page_count,
251  .get_page = guest_s2_get_page,
252  .put_page = guest_s2_put_page,
253  .dcache_clean_inval_poc = clean_dcache_guest_page,
254  .icache_inval_pou = invalidate_icache_guest_page,
255  };
256 
258  ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
261  if (ret)
262  return ret;
263 
264  vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd);
265 
266  return 0;
267 }
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, unsigned int reserved_pages)
Definition: page_alloc.c:223
static bool guest_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
Definition: mem_protect.c:164
static void guest_s2_get_page(void *addr)
Definition: mem_protect.c:210
static void * guest_s2_zalloc_page(void *mc)
Definition: mem_protect.c:189
static void clean_dcache_guest_page(void *va, size_t size)
Definition: mem_protect.c:220
static void guest_lock_component(struct pkvm_hyp_vm *vm)
Definition: mem_protect.c:31
static void invalidate_icache_guest_page(void *va, size_t size)
Definition: mem_protect.c:226
static void guest_unlock_component(struct pkvm_hyp_vm *vm)
Definition: mem_protect.c:37
static void * guest_s2_zalloc_pages_exact(size_t size)
Definition: mem_protect.c:170
static void guest_s2_free_pages_exact(void *addr, unsigned long size)
Definition: mem_protect.c:180
static void guest_s2_put_page(void *addr)
Definition: mem_protect.c:215
static void * hyp_phys_to_virt(phys_addr_t phys)
Definition: memory.h:20
static int hyp_page_count(void *addr)
Definition: memory.h:45
static phys_addr_t hyp_virt_to_phys(void *addr)
Definition: memory.h:25
#define hyp_virt_to_pfn(virt)
Definition: memory.h:34
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, struct kvm_pgtable_mm_ops *mm_ops, enum kvm_pgtable_stage2_flags flags, kvm_pgtable_force_pte_cb_t force_pte_cb)
Definition: pgtable.c:1533
size_t kvm_pgtable_stage2_pgd_size(u64 vtcr)
Definition: pgtable.c:1561
#define hyp_spin_lock_init(l)
Definition: spinlock.h:39
struct kvm kvm
Definition: pkvm.h:29
struct hyp_pool pool
Definition: pkvm.h:37
struct kvm_pgtable pgt
Definition: pkvm.h:35
hyp_spinlock_t lock
Definition: pkvm.h:38
struct kvm_pgtable_mm_ops mm_ops
Definition: pkvm.h:36
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_host_prepare_stage2()

int kvm_host_prepare_stage2 ( void *  pgt_pool_base)

Definition at line 138 of file mem_protect.c.

139 {
140  struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
141  int ret;
142 
145  mmu->arch = &host_mmu.arch;
146 
147  ret = prepare_s2_pool(pgt_pool_base);
148  if (ret)
149  return ret;
150 
154  if (ret)
155  return ret;
156 
157  mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
158  mmu->pgt = &host_mmu.pgt;
159  atomic64_set(&mmu->vmid.id, 0);
160 
161  return 0;
162 }
#define KVM_HOST_S2_FLAGS
Definition: mem_protect.c:22
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot)
Definition: mem_protect.c:486
static int prepare_s2_pool(void *pgt_pool_base)
Definition: mem_protect.c:99
static void prepare_host_vtcr(void)
Definition: mem_protect.c:124
hyp_spinlock_t lock
Definition: mem_protect.h:52
struct kvm_pgtable_mm_ops mm_ops
Definition: mem_protect.h:51
Here is the call graph for this function:
Here is the caller graph for this function:

◆ pkvm_getstate()

static enum pkvm_page_state pkvm_getstate ( enum kvm_pgtable_prot  prot)
inlinestatic

Definition at line 1 of file mem_protect.h.

44 {
45  return prot & PKVM_PAGE_STATE_PROT_MASK;
46 }
#define PKVM_PAGE_STATE_PROT_MASK
Definition: mem_protect.h:36
Here is the caller graph for this function:

◆ pkvm_mkstate()

static enum kvm_pgtable_prot pkvm_mkstate ( enum kvm_pgtable_prot  prot,
enum pkvm_page_state  state 
)
inlinestatic

Definition at line 1 of file mem_protect.h.

39 {
40  return (prot & ~PKVM_PAGE_STATE_PROT_MASK) | state;
41 }
Here is the caller graph for this function:

◆ reclaim_guest_pages()

void reclaim_guest_pages ( struct pkvm_hyp_vm vm,
struct kvm_hyp_memcache *  mc 
)

Definition at line 269 of file mem_protect.c.

270 {
271  void *addr;
272 
273  /* Dump all pgtable pages in the hyp_pool */
276  vm->kvm.arch.mmu.pgd_phys = 0ULL;
278 
279  /* Drain the hyp_pool into the memcache */
280  addr = hyp_alloc_pages(&vm->pool, 0);
281  while (addr) {
282  memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
283  push_hyp_memcache(mc, addr, hyp_virt_to_phys);
284  WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
285  addr = hyp_alloc_pages(&vm->pool, 0);
286  }
287 }
void * hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
Definition: page_alloc.c:198
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
Definition: mem_protect.c:1184
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
Definition: pgtable.c:1586
Here is the call graph for this function:
Here is the caller graph for this function:

◆ refill_memcache()

int refill_memcache ( struct kvm_hyp_memcache *  mc,
unsigned long  min_pages,
struct kvm_hyp_memcache *  host_mc 
)

Definition at line 412 of file mm.c.

414 {
415  struct kvm_hyp_memcache tmp = *host_mc;
416  int ret;
417 
418  ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
419  hyp_virt_to_phys, &tmp);
420  *host_mc = tmp;
421 
422  return ret;
423 }
static void * admit_host_page(void *arg)
Definition: mm.c:392
Here is the call graph for this function:

Variable Documentation

◆ host_mmu

struct host_mmu host_mmu
extern

Definition at line 1 of file mem_protect.c.

◆ hyp_nr_cpus

unsigned long hyp_nr_cpus
extern

Definition at line 23 of file setup.c.