KVM
Functions
tdp_mmu.h File Reference
#include <linux/kvm_host.h>
#include "spte.h"
Include dependency graph for tdp_mmu.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Functions

void kvm_mmu_init_tdp_mmu (struct kvm *kvm)
 
void kvm_mmu_uninit_tdp_mmu (struct kvm *kvm)
 
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa (struct kvm_vcpu *vcpu)
 
static __must_check bool kvm_tdp_mmu_get_root (struct kvm_mmu_page *root)
 
void kvm_tdp_mmu_put_root (struct kvm *kvm, struct kvm_mmu_page *root)
 
bool kvm_tdp_mmu_zap_leafs (struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
 
bool kvm_tdp_mmu_zap_sp (struct kvm *kvm, struct kvm_mmu_page *sp)
 
void kvm_tdp_mmu_zap_all (struct kvm *kvm)
 
void kvm_tdp_mmu_invalidate_all_roots (struct kvm *kvm)
 
void kvm_tdp_mmu_zap_invalidated_roots (struct kvm *kvm)
 
int kvm_tdp_mmu_map (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 
bool kvm_tdp_mmu_unmap_gfn_range (struct kvm *kvm, struct kvm_gfn_range *range, bool flush)
 
bool kvm_tdp_mmu_age_gfn_range (struct kvm *kvm, struct kvm_gfn_range *range)
 
bool kvm_tdp_mmu_test_age_gfn (struct kvm *kvm, struct kvm_gfn_range *range)
 
bool kvm_tdp_mmu_set_spte_gfn (struct kvm *kvm, struct kvm_gfn_range *range)
 
bool kvm_tdp_mmu_wrprot_slot (struct kvm *kvm, const struct kvm_memory_slot *slot, int min_level)
 
bool kvm_tdp_mmu_clear_dirty_slot (struct kvm *kvm, const struct kvm_memory_slot *slot)
 
void kvm_tdp_mmu_clear_dirty_pt_masked (struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot)
 
void kvm_tdp_mmu_zap_collapsible_sptes (struct kvm *kvm, const struct kvm_memory_slot *slot)
 
bool kvm_tdp_mmu_write_protect_gfn (struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level)
 
void kvm_tdp_mmu_try_split_huge_pages (struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level, bool shared)
 
static void kvm_tdp_mmu_walk_lockless_begin (void)
 
static void kvm_tdp_mmu_walk_lockless_end (void)
 
int kvm_tdp_mmu_get_walk (struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
 
u64 * kvm_tdp_mmu_fast_pf_get_last_sptep (struct kvm_vcpu *vcpu, u64 addr, u64 *spte)
 
static bool is_tdp_mmu_page (struct kvm_mmu_page *sp)
 

Function Documentation

◆ is_tdp_mmu_page()

static bool is_tdp_mmu_page ( struct kvm_mmu_page sp)
inlinestatic

Definition at line 74 of file tdp_mmu.h.

74 { return false; }
Here is the caller graph for this function:

◆ kvm_mmu_init_tdp_mmu()

void kvm_mmu_init_tdp_mmu ( struct kvm *  kvm)

Definition at line 15 of file tdp_mmu.c.

16 {
17  INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
18  spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
19 }
Here is the caller graph for this function:

◆ kvm_mmu_uninit_tdp_mmu()

void kvm_mmu_uninit_tdp_mmu ( struct kvm *  kvm)

Definition at line 33 of file tdp_mmu.c.

34 {
35  /*
36  * Invalidate all roots, which besides the obvious, schedules all roots
37  * for zapping and thus puts the TDP MMU's reference to each root, i.e.
38  * ultimately frees all roots.
39  */
42 
43  WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
44  WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
45 
46  /*
47  * Ensure that all the outstanding RCU callbacks to free shadow pages
48  * can run before the VM is torn down. Putting the last reference to
49  * zapped roots will create new callbacks.
50  */
51  rcu_barrier();
52 }
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
Definition: tdp_mmu.c:856
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
Definition: tdp_mmu.c:901
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_age_gfn_range()

bool kvm_tdp_mmu_age_gfn_range ( struct kvm *  kvm,
struct kvm_gfn_range *  range 
)

Definition at line 1195 of file tdp_mmu.c.

1196 {
1197  return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1198 }
static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter, struct kvm_gfn_range *range)
Definition: tdp_mmu.c:1161
static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm, struct kvm_gfn_range *range, tdp_handler_t handler)
Definition: tdp_mmu.c:1129
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_clear_dirty_pt_masked()

void kvm_tdp_mmu_clear_dirty_pt_masked ( struct kvm *  kvm,
struct kvm_memory_slot *  slot,
gfn_t  gfn,
unsigned long  mask,
bool  wrprot 
)

Definition at line 1629 of file tdp_mmu.c.

1633 {
1634  struct kvm_mmu_page *root;
1635 
1636  for_each_tdp_mmu_root(kvm, root, slot->as_id)
1637  clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1638 }
static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, unsigned long mask, bool wrprot)
Definition: tdp_mmu.c:1581
#define for_each_tdp_mmu_root(_kvm, _root, _as_id)
Definition: tdp_mmu.c:174
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_clear_dirty_slot()

bool kvm_tdp_mmu_clear_dirty_slot ( struct kvm *  kvm,
const struct kvm_memory_slot *  slot 
)

Definition at line 1560 of file tdp_mmu.c.

1562 {
1563  struct kvm_mmu_page *root;
1564  bool spte_set = false;
1565 
1566  lockdep_assert_held_read(&kvm->mmu_lock);
1567  for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1568  spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1569  slot->base_gfn + slot->npages);
1570 
1571  return spte_set;
1572 }
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)
Definition: tdp_mmu.c:159
static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end)
Definition: tdp_mmu.c:1518
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_fast_pf_get_last_sptep()

u64* kvm_tdp_mmu_fast_pf_get_last_sptep ( struct kvm_vcpu *  vcpu,
u64  addr,
u64 *  spte 
)

Definition at line 1795 of file tdp_mmu.c.

1797 {
1798  struct tdp_iter iter;
1799  struct kvm_mmu *mmu = vcpu->arch.mmu;
1800  gfn_t gfn = addr >> PAGE_SHIFT;
1801  tdp_ptep_t sptep = NULL;
1802 
1803  tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1804  *spte = iter.old_spte;
1805  sptep = iter.sptep;
1806  }
1807 
1808  /*
1809  * Perform the rcu_dereference to get the raw spte pointer value since
1810  * we are passing it up to fast_page_fault, which is shared with the
1811  * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1812  * annotation.
1813  *
1814  * This is safe since fast_page_fault obeys the contracts of this
1815  * function as well as all TDP MMU contracts around modifying SPTEs
1816  * outside of mmu_lock.
1817  */
1818  return rcu_dereference(sptep);
1819 }
u64 __rcu * tdp_ptep_t
Definition: mmu_internal.h:50
#define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)
Definition: tdp_mmu.c:631
Here is the caller graph for this function:

◆ kvm_tdp_mmu_get_root()

static __must_check bool kvm_tdp_mmu_get_root ( struct kvm_mmu_page root)
inlinestatic

Definition at line 15 of file tdp_mmu.h.

16 {
17  return refcount_inc_not_zero(&root->tdp_mmu_root_count);
18 }
refcount_t tdp_mmu_root_count
Definition: mmu_internal.h:102
Here is the caller graph for this function:

◆ kvm_tdp_mmu_get_vcpu_root_hpa()

hpa_t kvm_tdp_mmu_get_vcpu_root_hpa ( struct kvm_vcpu *  vcpu)

Definition at line 219 of file tdp_mmu.c.

220 {
221  union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
222  struct kvm *kvm = vcpu->kvm;
223  struct kvm_mmu_page *root;
224 
225  lockdep_assert_held_write(&kvm->mmu_lock);
226 
227  /*
228  * Check for an existing root before allocating a new one. Note, the
229  * role check prevents consuming an invalid root.
230  */
232  if (root->role.word == role.word &&
233  kvm_tdp_mmu_get_root(root))
234  goto out;
235  }
236 
237  root = tdp_mmu_alloc_sp(vcpu);
238  tdp_mmu_init_sp(root, NULL, 0, role);
239 
240  /*
241  * TDP MMU roots are kept until they are explicitly invalidated, either
242  * by a memslot update or by the destruction of the VM. Initialize the
243  * refcount to two; one reference for the vCPU, and one reference for
244  * the TDP MMU itself, which is held until the root is invalidated and
245  * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
246  */
247  refcount_set(&root->tdp_mmu_root_count, 2);
248 
249  spin_lock(&kvm->arch.tdp_mmu_pages_lock);
250  list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
251  spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
252 
253 out:
254  return __pa(root->spt);
255 }
static int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
Definition: mmu_internal.h:138
union kvm_mmu_page_role role
Definition: mmu_internal.h:80
struct list_head link
Definition: mmu_internal.h:57
static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, gfn_t gfn, union kvm_mmu_page_role role)
Definition: tdp_mmu.c:190
static struct kvm_mmu_page * tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
Definition: tdp_mmu.c:180
static __must_check bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
Definition: tdp_mmu.h:15
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_get_walk()

int kvm_tdp_mmu_get_walk ( struct kvm_vcpu *  vcpu,
u64  addr,
u64 *  sptes,
int *  root_level 
)

Definition at line 1766 of file tdp_mmu.c.

1768 {
1769  struct tdp_iter iter;
1770  struct kvm_mmu *mmu = vcpu->arch.mmu;
1771  gfn_t gfn = addr >> PAGE_SHIFT;
1772  int leaf = -1;
1773 
1774  *root_level = vcpu->arch.mmu->root_role.level;
1775 
1776  tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1777  leaf = iter.level;
1778  sptes[leaf] = iter.old_spte;
1779  }
1780 
1781  return leaf;
1782 }
int level
Definition: tdp_iter.h:101
Here is the caller graph for this function:

◆ kvm_tdp_mmu_invalidate_all_roots()

void kvm_tdp_mmu_invalidate_all_roots ( struct kvm *  kvm)

Definition at line 901 of file tdp_mmu.c.

902 {
903  struct kvm_mmu_page *root;
904 
905  /*
906  * mmu_lock must be held for write to ensure that a root doesn't become
907  * invalid while there are active readers (invalidating a root while
908  * there are active readers may or may not be problematic in practice,
909  * but it's uncharted territory and not supported).
910  *
911  * Waive the assertion if there are no users of @kvm, i.e. the VM is
912  * being destroyed after all references have been put, or if no vCPUs
913  * have been created (which means there are no roots), i.e. the VM is
914  * being destroyed in an error path of KVM_CREATE_VM.
915  */
916  if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
917  refcount_read(&kvm->users_count) && kvm->created_vcpus)
918  lockdep_assert_held_write(&kvm->mmu_lock);
919 
920  /*
921  * As above, mmu_lock isn't held when destroying the VM! There can't
922  * be other references to @kvm, i.e. nothing else can invalidate roots
923  * or get/put references to roots.
924  */
925  list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
926  /*
927  * Note, invalid roots can outlive a memslot update! Invalid
928  * roots must be *zapped* before the memslot update completes,
929  * but a different task can acquire a reference and keep the
930  * root alive after its been zapped.
931  */
932  if (!root->role.invalid) {
933  root->tdp_mmu_scheduled_root_to_zap = true;
934  root->role.invalid = true;
935  }
936  }
937 }
bool tdp_mmu_scheduled_root_to_zap
Definition: mmu_internal.h:66
Here is the caller graph for this function:

◆ kvm_tdp_mmu_map()

int kvm_tdp_mmu_map ( struct kvm_vcpu *  vcpu,
struct kvm_page_fault fault 
)

Definition at line 1032 of file tdp_mmu.c.

1033 {
1034  struct kvm_mmu *mmu = vcpu->arch.mmu;
1035  struct kvm *kvm = vcpu->kvm;
1036  struct tdp_iter iter;
1037  struct kvm_mmu_page *sp;
1038  int ret = RET_PF_RETRY;
1039 
1040  kvm_mmu_hugepage_adjust(vcpu, fault);
1041 
1042  trace_kvm_mmu_spte_requested(fault);
1043 
1044  rcu_read_lock();
1045 
1046  tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1047  int r;
1048 
1050  disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1051 
1052  /*
1053  * If SPTE has been frozen by another thread, just give up and
1054  * retry, avoiding unnecessary page table allocation and free.
1055  */
1056  if (is_removed_spte(iter.old_spte))
1057  goto retry;
1058 
1059  if (iter.level == fault->goal_level)
1060  goto map_target_level;
1061 
1062  /* Step down into the lower level page table if it exists. */
1063  if (is_shadow_present_pte(iter.old_spte) &&
1064  !is_large_pte(iter.old_spte))
1065  continue;
1066 
1067  /*
1068  * The SPTE is either non-present or points to a huge page that
1069  * needs to be split.
1070  */
1071  sp = tdp_mmu_alloc_sp(vcpu);
1072  tdp_mmu_init_child_sp(sp, &iter);
1073 
1075 
1076  if (is_shadow_present_pte(iter.old_spte))
1077  r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1078  else
1079  r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1080 
1081  /*
1082  * Force the guest to retry if installing an upper level SPTE
1083  * failed, e.g. because a different task modified the SPTE.
1084  */
1085  if (r) {
1086  tdp_mmu_free_sp(sp);
1087  goto retry;
1088  }
1089 
1090  if (fault->huge_page_disallowed &&
1091  fault->req_level >= iter.level) {
1092  spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1093  if (sp->nx_huge_page_disallowed)
1094  track_possible_nx_huge_page(kvm, sp);
1095  spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1096  }
1097  }
1098 
1099  /*
1100  * The walk aborted before reaching the target level, e.g. because the
1101  * iterator detected an upper level SPTE was frozen during traversal.
1102  */
1103  WARN_ON_ONCE(iter.level == fault->goal_level);
1104  goto retry;
1105 
1106 map_target_level:
1107  ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1108 
1109 retry:
1110  rcu_read_unlock();
1111  return ret;
1112 }
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
Definition: mmu.c:3180
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
Definition: mmu.c:848
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
Definition: mmu.c:3216
@ RET_PF_RETRY
Definition: mmu_internal.h:275
static bool is_shadow_present_pte(u64 pte)
Definition: spte.h:258
static bool is_removed_spte(u64 spte)
Definition: spte.h:202
static bool is_large_pte(u64 pte)
Definition: spte.h:313
bool nx_huge_page_disallowed
Definition: mmu_internal.h:74
bool huge_page_disallowed
Definition: mmu_internal.h:212
const bool nx_huge_page_workaround_enabled
Definition: mmu_internal.h:206
static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, struct tdp_iter *iter)
Definition: tdp_mmu.c:205
static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *sp, bool shared)
Definition: tdp_mmu.c:1006
static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, struct tdp_iter *iter)
Definition: tdp_mmu.c:943
static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *sp, bool shared)
Definition: tdp_mmu.c:1376
static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
Definition: tdp_mmu.c:54
Here is the call graph for this function:

◆ kvm_tdp_mmu_put_root()

void kvm_tdp_mmu_put_root ( struct kvm *  kvm,
struct kvm_mmu_page root 
)

Definition at line 76 of file tdp_mmu.c.

77 {
78  if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
79  return;
80 
81  /*
82  * The TDP MMU itself holds a reference to each root until the root is
83  * explicitly invalidated, i.e. the final reference should be never be
84  * put for a valid root.
85  */
86  KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
87 
88  spin_lock(&kvm->arch.tdp_mmu_pages_lock);
89  list_del_rcu(&root->link);
90  spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
91  call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
92 }
static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
Definition: tdp_mmu.c:68
static bool is_tdp_mmu_page(struct kvm_mmu_page *sp)
Definition: tdp_mmu.h:74
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_set_spte_gfn()

bool kvm_tdp_mmu_set_spte_gfn ( struct kvm *  kvm,
struct kvm_gfn_range *  range 
)

Definition at line 1247 of file tdp_mmu.c.

1248 {
1249  /*
1250  * No need to handle the remote TLB flush under RCU protection, the
1251  * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
1252  * shadow page. See the WARN on pfn_changed in handle_changed_spte().
1253  */
1254  return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1255 }
static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, struct kvm_gfn_range *range)
Definition: tdp_mmu.c:1211
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_test_age_gfn()

bool kvm_tdp_mmu_test_age_gfn ( struct kvm *  kvm,
struct kvm_gfn_range *  range 
)

Definition at line 1206 of file tdp_mmu.c.

1207 {
1208  return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1209 }
static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter, struct kvm_gfn_range *range)
Definition: tdp_mmu.c:1200
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_try_split_huge_pages()

void kvm_tdp_mmu_try_split_huge_pages ( struct kvm *  kvm,
const struct kvm_memory_slot *  slot,
gfn_t  start,
gfn_t  end,
int  target_level,
bool  shared 
)

Definition at line 1483 of file tdp_mmu.c.

1487 {
1488  struct kvm_mmu_page *root;
1489  int r = 0;
1490 
1491  kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1492  for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id) {
1493  r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1494  if (r) {
1495  kvm_tdp_mmu_put_root(kvm, root);
1496  break;
1497  }
1498  }
1499 }
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
Definition: tdp_mmu.c:76
static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm, bool shared)
Definition: tdp_mmu.c:22
static int tdp_mmu_split_huge_pages_root(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, int target_level, bool shared)
Definition: tdp_mmu.c:1414
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_unmap_gfn_range()

bool kvm_tdp_mmu_unmap_gfn_range ( struct kvm *  kvm,
struct kvm_gfn_range *  range,
bool  flush 
)

Definition at line 1114 of file tdp_mmu.c.

1116 {
1117  struct kvm_mmu_page *root;
1118 
1119  __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
1120  flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1121  range->may_block, flush);
1122 
1123  return flush;
1124 }
static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, bool can_yield, bool flush)
Definition: tdp_mmu.c:780
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)
Definition: tdp_mmu.c:152
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_walk_lockless_begin()

static void kvm_tdp_mmu_walk_lockless_begin ( void  )
inlinestatic

Definition at line 56 of file tdp_mmu.h.

57 {
58  rcu_read_lock();
59 }
Here is the caller graph for this function:

◆ kvm_tdp_mmu_walk_lockless_end()

static void kvm_tdp_mmu_walk_lockless_end ( void  )
inlinestatic

Definition at line 61 of file tdp_mmu.h.

62 {
63  rcu_read_unlock();
64 }
Here is the caller graph for this function:

◆ kvm_tdp_mmu_write_protect_gfn()

bool kvm_tdp_mmu_write_protect_gfn ( struct kvm *  kvm,
struct kvm_memory_slot *  slot,
gfn_t  gfn,
int  min_level 
)

Definition at line 1746 of file tdp_mmu.c.

1749 {
1750  struct kvm_mmu_page *root;
1751  bool spte_set = false;
1752 
1753  lockdep_assert_held_write(&kvm->mmu_lock);
1754  for_each_tdp_mmu_root(kvm, root, slot->as_id)
1755  spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1756 
1757  return spte_set;
1758 }
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, int min_level)
Definition: tdp_mmu.c:1710
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_wrprot_slot()

bool kvm_tdp_mmu_wrprot_slot ( struct kvm *  kvm,
const struct kvm_memory_slot *  slot,
int  min_level 
)

Definition at line 1300 of file tdp_mmu.c.

1302 {
1303  struct kvm_mmu_page *root;
1304  bool spte_set = false;
1305 
1306  lockdep_assert_held_read(&kvm->mmu_lock);
1307 
1308  for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1309  spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1310  slot->base_gfn + slot->npages, min_level);
1311 
1312  return spte_set;
1313 }
static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, int min_level)
Definition: tdp_mmu.c:1262
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_zap_all()

void kvm_tdp_mmu_zap_all ( struct kvm *  kvm)

Definition at line 831 of file tdp_mmu.c.

832 {
833  struct kvm_mmu_page *root;
834 
835  /*
836  * Zap all roots, including invalid roots, as all SPTEs must be dropped
837  * before returning to the caller. Zap directly even if the root is
838  * also being zapped by a worker. Walking zapped top-level SPTEs isn't
839  * all that expensive and mmu_lock is already held, which means the
840  * worker has yielded, i.e. flushing the work instead of zapping here
841  * isn't guaranteed to be any faster.
842  *
843  * A TLB flush is unnecessary, KVM zaps everything if and only the VM
844  * is being destroyed or the userspace VMM has exited. In both cases,
845  * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
846  */
847  lockdep_assert_held_write(&kvm->mmu_lock);
849  tdp_mmu_zap_root(kvm, root, false);
850 }
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)
Definition: tdp_mmu.c:162
static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, bool shared)
Definition: tdp_mmu.c:716
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_zap_collapsible_sptes()

void kvm_tdp_mmu_zap_collapsible_sptes ( struct kvm *  kvm,
const struct kvm_memory_slot *  slot 
)

Definition at line 1695 of file tdp_mmu.c.

1697 {
1698  struct kvm_mmu_page *root;
1699 
1700  lockdep_assert_held_read(&kvm->mmu_lock);
1701  for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
1702  zap_collapsible_spte_range(kvm, root, slot);
1703 }
static void zap_collapsible_spte_range(struct kvm *kvm, struct kvm_mmu_page *root, const struct kvm_memory_slot *slot)
Definition: tdp_mmu.c:1640
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_zap_invalidated_roots()

void kvm_tdp_mmu_zap_invalidated_roots ( struct kvm *  kvm)

Definition at line 856 of file tdp_mmu.c.

857 {
858  struct kvm_mmu_page *root;
859 
860  read_lock(&kvm->mmu_lock);
861 
864  continue;
865 
866  root->tdp_mmu_scheduled_root_to_zap = false;
867  KVM_BUG_ON(!root->role.invalid, kvm);
868 
869  /*
870  * A TLB flush is not necessary as KVM performs a local TLB
871  * flush when allocating a new root (see kvm_mmu_load()), and
872  * when migrating a vCPU to a different pCPU. Note, the local
873  * TLB flush on reuse also invalidates paging-structure-cache
874  * entries, i.e. TLB entries for intermediate paging structures,
875  * that may be zapped, as such entries are associated with the
876  * ASID on both VMX and SVM.
877  */
878  tdp_mmu_zap_root(kvm, root, true);
879 
880  /*
881  * The referenced needs to be put *after* zapping the root, as
882  * the root must be reachable by mmu_notifiers while it's being
883  * zapped
884  */
885  kvm_tdp_mmu_put_root(kvm, root);
886  }
887 
888  read_unlock(&kvm->mmu_lock);
889 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_zap_leafs()

bool kvm_tdp_mmu_zap_leafs ( struct kvm *  kvm,
gfn_t  start,
gfn_t  end,
bool  flush 
)

Definition at line 820 of file tdp_mmu.c.

821 {
822  struct kvm_mmu_page *root;
823 
824  lockdep_assert_held_write(&kvm->mmu_lock);
826  flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
827 
828  return flush;
829 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_tdp_mmu_zap_sp()

bool kvm_tdp_mmu_zap_sp ( struct kvm *  kvm,
struct kvm_mmu_page sp 
)

Definition at line 752 of file tdp_mmu.c.

753 {
754  u64 old_spte;
755 
756  /*
757  * This helper intentionally doesn't allow zapping a root shadow page,
758  * which doesn't have a parent page table and thus no associated entry.
759  */
760  if (WARN_ON_ONCE(!sp->ptep))
761  return false;
762 
763  old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
764  if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
765  return false;
766 
767  tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
768  sp->gfn, sp->role.level + 1);
769 
770  return true;
771 }
static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
Definition: mmu_internal.h:143
tdp_ptep_t ptep
Definition: mmu_internal.h:107
static u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
Definition: tdp_iter.h:17
static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, u64 old_spte, u64 new_spte, gfn_t gfn, int level)
Definition: tdp_mmu.c:592
Here is the call graph for this function:
Here is the caller graph for this function: