3 #ifndef __KVM_X86_MMU_TDP_ITER_H
4 #define __KVM_X86_MMU_TDP_ITER_H
6 #include <linux/kvm_host.h>
19 return READ_ONCE(*rcu_dereference(sptep));
24 return xchg(rcu_dereference(sptep), new_spte);
29 WRITE_ONCE(*rcu_dereference(sptep), new_spte);
52 u64 new_spte,
int level)
64 atomic64_t *sptep_atomic;
67 sptep_atomic = (atomic64_t *)rcu_dereference(sptep);
68 return (u64)atomic64_fetch_and(~mask, sptep_atomic);
123 #define for_each_tdp_pte_min_level(iter, root, min_level, start, end) \
124 for (tdp_iter_start(&iter, root, min_level, start); \
125 iter.valid && iter.gfn < end; \
126 tdp_iter_next(&iter))
128 #define for_each_tdp_pte(iter, root, start, end) \
129 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end)
134 int min_level, gfn_t next_last_level_gfn);
bool spte_has_volatile_bits(u64 spte)
static bool is_last_spte(u64 pte, int level)
static bool is_shadow_present_pte(u64 pte)
tdp_ptep_t pt_path[PT64_ROOT_MAX_LEVEL]
gfn_t next_last_level_gfn
static u64 tdp_mmu_clear_spte_bits(tdp_ptep_t sptep, u64 old_spte, u64 mask, int level)
static u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
static u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte, u64 new_spte, int level)
void tdp_iter_next(struct tdp_iter *iter)
tdp_ptep_t spte_to_child_pt(u64 pte, int level)
void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, int min_level, gfn_t next_last_level_gfn)
static void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
static bool kvm_tdp_mmu_spte_need_atomic_write(u64 old_spte, int level)
static u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
void tdp_iter_restart(struct tdp_iter *iter)