|
static int | get_nx_huge_pages (char *buffer, const struct kernel_param *kp) |
|
static int | set_nx_huge_pages (const char *val, const struct kernel_param *kp) |
|
static int | set_nx_huge_pages_recovery_param (const char *val, const struct kernel_param *kp) |
|
| module_param_cb (nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644) |
|
| __MODULE_PARM_TYPE (nx_huge_pages, "bool") |
|
| module_param_cb (nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_param_ops, &nx_huge_pages_recovery_ratio, 0644) |
|
| __MODULE_PARM_TYPE (nx_huge_pages_recovery_ratio, "uint") |
|
| module_param_cb (nx_huge_pages_recovery_period_ms, &nx_huge_pages_recovery_param_ops, &nx_huge_pages_recovery_period_ms, 0644) |
|
| __MODULE_PARM_TYPE (nx_huge_pages_recovery_period_ms, "uint") |
|
| module_param_named (flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644) |
|
static void | mmu_spte_set (u64 *sptep, u64 spte) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (cr0, pg, X86_CR0_PG) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (cr0, wp, X86_CR0_WP) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (cr4, pse, X86_CR4_PSE) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (cr4, pae, X86_CR4_PAE) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (cr4, smep, X86_CR4_SMEP) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (cr4, smap, X86_CR4_SMAP) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (cr4, pke, X86_CR4_PKE) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (cr4, la57, X86_CR4_LA57) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (efer, nx, EFER_NX) |
|
| BUILD_MMU_ROLE_REGS_ACCESSOR (efer, lma, EFER_LMA) |
|
| BUILD_MMU_ROLE_ACCESSOR (base, cr0, wp) |
|
| BUILD_MMU_ROLE_ACCESSOR (ext, cr4, pse) |
|
| BUILD_MMU_ROLE_ACCESSOR (ext, cr4, smep) |
|
| BUILD_MMU_ROLE_ACCESSOR (ext, cr4, smap) |
|
| BUILD_MMU_ROLE_ACCESSOR (ext, cr4, pke) |
|
| BUILD_MMU_ROLE_ACCESSOR (ext, cr4, la57) |
|
| BUILD_MMU_ROLE_ACCESSOR (base, efer, nx) |
|
| BUILD_MMU_ROLE_ACCESSOR (ext, efer, lma) |
|
static bool | is_cr0_pg (struct kvm_mmu *mmu) |
|
static bool | is_cr4_pae (struct kvm_mmu *mmu) |
|
static struct kvm_mmu_role_regs | vcpu_to_role_regs (struct kvm_vcpu *vcpu) |
|
static unsigned long | get_guest_cr3 (struct kvm_vcpu *vcpu) |
|
static unsigned long | kvm_mmu_get_guest_pgd (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
|
static bool | kvm_available_flush_remote_tlbs_range (void) |
|
static gfn_t | kvm_mmu_page_get_gfn (struct kvm_mmu_page *sp, int index) |
|
static void | kvm_flush_remote_tlbs_sptep (struct kvm *kvm, u64 *sptep) |
|
static void | mark_mmio_spte (struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, unsigned int access) |
|
static gfn_t | get_mmio_spte_gfn (u64 spte) |
|
static unsigned | get_mmio_spte_access (u64 spte) |
|
static bool | check_mmio_spte (struct kvm_vcpu *vcpu, u64 spte) |
|
static int | is_cpuid_PSE36 (void) |
|
static void | count_spte_clear (u64 *sptep, u64 spte) |
|
static void | __set_spte (u64 *sptep, u64 spte) |
|
static void | __update_clear_spte_fast (u64 *sptep, u64 spte) |
|
static u64 | __update_clear_spte_slow (u64 *sptep, u64 spte) |
|
static u64 | __get_spte_lockless (u64 *sptep) |
|
static u64 | mmu_spte_update_no_track (u64 *sptep, u64 new_spte) |
|
static bool | mmu_spte_update (u64 *sptep, u64 new_spte) |
|
static u64 | mmu_spte_clear_track_bits (struct kvm *kvm, u64 *sptep) |
|
static void | mmu_spte_clear_no_track (u64 *sptep) |
|
static u64 | mmu_spte_get_lockless (u64 *sptep) |
|
static bool | mmu_spte_age (u64 *sptep) |
|
static bool | is_tdp_mmu_active (struct kvm_vcpu *vcpu) |
|
static void | walk_shadow_page_lockless_begin (struct kvm_vcpu *vcpu) |
|
static void | walk_shadow_page_lockless_end (struct kvm_vcpu *vcpu) |
|
static int | mmu_topup_memory_caches (struct kvm_vcpu *vcpu, bool maybe_indirect) |
|
static void | mmu_free_memory_caches (struct kvm_vcpu *vcpu) |
|
static void | mmu_free_pte_list_desc (struct pte_list_desc *pte_list_desc) |
|
static bool | sp_has_gptes (struct kvm_mmu_page *sp) |
|
static u32 | kvm_mmu_page_get_access (struct kvm_mmu_page *sp, int index) |
|
static void | kvm_mmu_page_set_translation (struct kvm_mmu_page *sp, int index, gfn_t gfn, unsigned int access) |
|
static void | kvm_mmu_page_set_access (struct kvm_mmu_page *sp, int index, unsigned int access) |
|
static struct kvm_lpage_info * | lpage_info_slot (gfn_t gfn, const struct kvm_memory_slot *slot, int level) |
|
static void | update_gfn_disallow_lpage_count (const struct kvm_memory_slot *slot, gfn_t gfn, int count) |
|
void | kvm_mmu_gfn_disallow_lpage (const struct kvm_memory_slot *slot, gfn_t gfn) |
|
void | kvm_mmu_gfn_allow_lpage (const struct kvm_memory_slot *slot, gfn_t gfn) |
|
static void | account_shadowed (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
void | track_possible_nx_huge_page (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
static void | account_nx_huge_page (struct kvm *kvm, struct kvm_mmu_page *sp, bool nx_huge_page_possible) |
|
static void | unaccount_shadowed (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
void | untrack_possible_nx_huge_page (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
static void | unaccount_nx_huge_page (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
static struct kvm_memory_slot * | gfn_to_memslot_dirty_bitmap (struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) |
|
static int | pte_list_add (struct kvm_mmu_memory_cache *cache, u64 *spte, struct kvm_rmap_head *rmap_head) |
|
static void | pte_list_desc_remove_entry (struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct pte_list_desc *desc, int i) |
|
static void | pte_list_remove (struct kvm *kvm, u64 *spte, struct kvm_rmap_head *rmap_head) |
|
static void | kvm_zap_one_rmap_spte (struct kvm *kvm, struct kvm_rmap_head *rmap_head, u64 *sptep) |
|
static bool | kvm_zap_all_rmap_sptes (struct kvm *kvm, struct kvm_rmap_head *rmap_head) |
|
unsigned int | pte_list_count (struct kvm_rmap_head *rmap_head) |
|
static struct kvm_rmap_head * | gfn_to_rmap (gfn_t gfn, int level, const struct kvm_memory_slot *slot) |
|
static void | rmap_remove (struct kvm *kvm, u64 *spte) |
|
static u64 * | rmap_get_first (struct kvm_rmap_head *rmap_head, struct rmap_iterator *iter) |
|
static u64 * | rmap_get_next (struct rmap_iterator *iter) |
|
static void | drop_spte (struct kvm *kvm, u64 *sptep) |
|
static void | drop_large_spte (struct kvm *kvm, u64 *sptep, bool flush) |
|
static bool | spte_write_protect (u64 *sptep, bool pt_protect) |
|
static bool | rmap_write_protect (struct kvm_rmap_head *rmap_head, bool pt_protect) |
|
static bool | spte_clear_dirty (u64 *sptep) |
|
static bool | spte_wrprot_for_clear_dirty (u64 *sptep) |
|
static bool | __rmap_clear_dirty (struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) |
|
static void | kvm_mmu_write_protect_pt_masked (struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) |
|
static void | kvm_mmu_clear_dirty_pt_masked (struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) |
|
void | kvm_arch_mmu_enable_log_dirty_pt_masked (struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) |
|
int | kvm_cpu_dirty_log_size (void) |
|
bool | kvm_mmu_slot_gfn_write_protect (struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level) |
|
static bool | kvm_vcpu_write_protect_gfn (struct kvm_vcpu *vcpu, u64 gfn) |
|
static bool | __kvm_zap_rmap (struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) |
|
static bool | kvm_zap_rmap (struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused) |
|
static bool | kvm_set_pte_rmap (struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t pte) |
|
static void | rmap_walk_init_level (struct slot_rmap_walk_iterator *iterator, int level) |
|
static void | slot_rmap_walk_init (struct slot_rmap_walk_iterator *iterator, const struct kvm_memory_slot *slot, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn) |
|
static bool | slot_rmap_walk_okay (struct slot_rmap_walk_iterator *iterator) |
|
static void | slot_rmap_walk_next (struct slot_rmap_walk_iterator *iterator) |
|
static __always_inline bool | kvm_handle_gfn_range (struct kvm *kvm, struct kvm_gfn_range *range, rmap_handler_t handler) |
|
bool | kvm_unmap_gfn_range (struct kvm *kvm, struct kvm_gfn_range *range) |
|
bool | kvm_set_spte_gfn (struct kvm *kvm, struct kvm_gfn_range *range) |
|
static bool | kvm_age_rmap (struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused) |
|
static bool | kvm_test_age_rmap (struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused) |
|
static void | __rmap_add (struct kvm *kvm, struct kvm_mmu_memory_cache *cache, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) |
|
static void | rmap_add (struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) |
|
bool | kvm_age_gfn (struct kvm *kvm, struct kvm_gfn_range *range) |
|
bool | kvm_test_age_gfn (struct kvm *kvm, struct kvm_gfn_range *range) |
|
static void | kvm_mmu_check_sptes_at_free (struct kvm_mmu_page *sp) |
|
static void | kvm_mod_used_mmu_pages (struct kvm *kvm, long nr) |
|
static void | kvm_account_mmu_page (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
static void | kvm_unaccount_mmu_page (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
static void | kvm_mmu_free_shadow_page (struct kvm_mmu_page *sp) |
|
static unsigned | kvm_page_table_hashfn (gfn_t gfn) |
|
static void | mmu_page_add_parent_pte (struct kvm_mmu_memory_cache *cache, struct kvm_mmu_page *sp, u64 *parent_pte) |
|
static void | mmu_page_remove_parent_pte (struct kvm *kvm, struct kvm_mmu_page *sp, u64 *parent_pte) |
|
static void | drop_parent_pte (struct kvm *kvm, struct kvm_mmu_page *sp, u64 *parent_pte) |
|
static void | mark_unsync (u64 *spte) |
|
static void | kvm_mmu_mark_parents_unsync (struct kvm_mmu_page *sp) |
|
static int | mmu_pages_add (struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, int idx) |
|
static void | clear_unsync_child_bit (struct kvm_mmu_page *sp, int idx) |
|
static int | __mmu_unsync_walk (struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) |
|
static int | mmu_unsync_walk (struct kvm_mmu_page *sp, struct kvm_mmu_pages *pvec) |
|
static void | kvm_unlink_unsync_page (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
static bool | kvm_mmu_prepare_zap_page (struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list) |
|
static void | kvm_mmu_commit_zap_page (struct kvm *kvm, struct list_head *invalid_list) |
|
static bool | kvm_sync_page_check (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
|
static int | kvm_sync_spte (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i) |
|
static int | __kvm_sync_page (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
|
static int | kvm_sync_page (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) |
|
static bool | kvm_mmu_remote_flush_or_zap (struct kvm *kvm, struct list_head *invalid_list, bool remote_flush) |
|
static bool | is_obsolete_sp (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
static int | mmu_pages_next (struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, int i) |
|
static int | mmu_pages_first (struct kvm_mmu_pages *pvec, struct mmu_page_path *parents) |
|
static void | mmu_pages_clear_parents (struct mmu_page_path *parents) |
|
static int | mmu_sync_children (struct kvm_vcpu *vcpu, struct kvm_mmu_page *parent, bool can_yield) |
|
static void | __clear_sp_write_flooding_count (struct kvm_mmu_page *sp) |
|
static void | clear_sp_write_flooding_count (u64 *spte) |
|
static struct kvm_mmu_page * | kvm_mmu_find_shadow_page (struct kvm *kvm, struct kvm_vcpu *vcpu, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) |
|
static struct kvm_mmu_page * | kvm_mmu_alloc_shadow_page (struct kvm *kvm, struct shadow_page_caches *caches, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) |
|
static struct kvm_mmu_page * | __kvm_mmu_get_shadow_page (struct kvm *kvm, struct kvm_vcpu *vcpu, struct shadow_page_caches *caches, gfn_t gfn, union kvm_mmu_page_role role) |
|
static struct kvm_mmu_page * | kvm_mmu_get_shadow_page (struct kvm_vcpu *vcpu, gfn_t gfn, union kvm_mmu_page_role role) |
|
static union kvm_mmu_page_role | kvm_mmu_child_role (u64 *sptep, bool direct, unsigned int access) |
|
static struct kvm_mmu_page * | kvm_mmu_get_child_sp (struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, bool direct, unsigned int access) |
|
static void | shadow_walk_init_using_root (struct kvm_shadow_walk_iterator *iterator, struct kvm_vcpu *vcpu, hpa_t root, u64 addr) |
|
static void | shadow_walk_init (struct kvm_shadow_walk_iterator *iterator, struct kvm_vcpu *vcpu, u64 addr) |
|
static bool | shadow_walk_okay (struct kvm_shadow_walk_iterator *iterator) |
|
static void | __shadow_walk_next (struct kvm_shadow_walk_iterator *iterator, u64 spte) |
|
static void | shadow_walk_next (struct kvm_shadow_walk_iterator *iterator) |
|
static void | __link_shadow_page (struct kvm *kvm, struct kvm_mmu_memory_cache *cache, u64 *sptep, struct kvm_mmu_page *sp, bool flush) |
|
static void | link_shadow_page (struct kvm_vcpu *vcpu, u64 *sptep, struct kvm_mmu_page *sp) |
|
static void | validate_direct_spte (struct kvm_vcpu *vcpu, u64 *sptep, unsigned direct_access) |
|
static int | mmu_page_zap_pte (struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte, struct list_head *invalid_list) |
|
static int | kvm_mmu_page_unlink_children (struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list) |
|
static void | kvm_mmu_unlink_parents (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
static int | mmu_zap_unsync_children (struct kvm *kvm, struct kvm_mmu_page *parent, struct list_head *invalid_list) |
|
static bool | __kvm_mmu_prepare_zap_page (struct kvm *kvm, struct kvm_mmu_page *sp, struct list_head *invalid_list, int *nr_zapped) |
|
static unsigned long | kvm_mmu_zap_oldest_mmu_pages (struct kvm *kvm, unsigned long nr_to_zap) |
|
static unsigned long | kvm_mmu_available_pages (struct kvm *kvm) |
|
static int | make_mmu_pages_available (struct kvm_vcpu *vcpu) |
|
void | kvm_mmu_change_mmu_pages (struct kvm *kvm, unsigned long goal_nr_mmu_pages) |
|
int | kvm_mmu_unprotect_page (struct kvm *kvm, gfn_t gfn) |
|
static int | kvm_mmu_unprotect_page_virt (struct kvm_vcpu *vcpu, gva_t gva) |
|
static void | kvm_unsync_page (struct kvm *kvm, struct kvm_mmu_page *sp) |
|
int | mmu_try_to_unsync_pages (struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch) |
|
static int | mmu_set_spte (struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, u64 *sptep, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, struct kvm_page_fault *fault) |
|
static int | direct_pte_prefetch_many (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *start, u64 *end) |
|
static void | __direct_pte_prefetch (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *sptep) |
|
static void | direct_pte_prefetch (struct kvm_vcpu *vcpu, u64 *sptep) |
|
static int | host_pfn_mapping_level (struct kvm *kvm, gfn_t gfn, const struct kvm_memory_slot *slot) |
|
static int | __kvm_mmu_max_mapping_level (struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level, bool is_private) |
|
int | kvm_mmu_max_mapping_level (struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level) |
|
void | kvm_mmu_hugepage_adjust (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
void | disallowed_hugepage_adjust (struct kvm_page_fault *fault, u64 spte, int cur_level) |
|
static int | direct_map (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static void | kvm_send_hwpoison_signal (struct kvm_memory_slot *slot, gfn_t gfn) |
|
static int | kvm_handle_error_pfn (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static int | kvm_handle_noslot_fault (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, unsigned int access) |
|
static bool | page_fault_can_be_fast (struct kvm_page_fault *fault) |
|
static bool | fast_pf_fix_direct_spte (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, u64 *sptep, u64 old_spte, u64 new_spte) |
|
static bool | is_access_allowed (struct kvm_page_fault *fault, u64 spte) |
|
static u64 * | fast_pf_get_last_sptep (struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte) |
|
static int | fast_page_fault (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static void | mmu_free_root_page (struct kvm *kvm, hpa_t *root_hpa, struct list_head *invalid_list) |
|
void | kvm_mmu_free_roots (struct kvm *kvm, struct kvm_mmu *mmu, ulong roots_to_free) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_free_roots) |
|
void | kvm_mmu_free_guest_mode_roots (struct kvm *kvm, struct kvm_mmu *mmu) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_free_guest_mode_roots) |
|
static hpa_t | mmu_alloc_root (struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, u8 level) |
|
static int | mmu_alloc_direct_roots (struct kvm_vcpu *vcpu) |
|
static int | mmu_first_shadow_root_alloc (struct kvm *kvm) |
|
static int | mmu_alloc_shadow_roots (struct kvm_vcpu *vcpu) |
|
static int | mmu_alloc_special_roots (struct kvm_vcpu *vcpu) |
|
static bool | is_unsync_root (hpa_t root) |
|
void | kvm_mmu_sync_roots (struct kvm_vcpu *vcpu) |
|
void | kvm_mmu_sync_prev_roots (struct kvm_vcpu *vcpu) |
|
static gpa_t | nonpaging_gva_to_gpa (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t vaddr, u64 access, struct x86_exception *exception) |
|
static bool | mmio_info_in_cache (struct kvm_vcpu *vcpu, u64 addr, bool direct) |
|
static int | get_walk (struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level) |
|
static bool | get_mmio_spte (struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) |
|
static int | handle_mmio_page_fault (struct kvm_vcpu *vcpu, u64 addr, bool direct) |
|
static bool | page_fault_handle_page_track (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static void | shadow_page_table_clear_flood (struct kvm_vcpu *vcpu, gva_t addr) |
|
static u32 | alloc_apf_token (struct kvm_vcpu *vcpu) |
|
static bool | kvm_arch_setup_async_pf (struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, gfn_t gfn) |
|
void | kvm_arch_async_page_ready (struct kvm_vcpu *vcpu, struct kvm_async_pf *work) |
|
static u8 | kvm_max_level_for_order (int order) |
|
static void | kvm_mmu_prepare_memory_fault_exit (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static int | kvm_faultin_pfn_private (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static int | __kvm_faultin_pfn (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static int | kvm_faultin_pfn (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, unsigned int access) |
|
static bool | is_page_fault_stale (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static int | direct_page_fault (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static int | nonpaging_page_fault (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
int | kvm_handle_page_fault (struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len) |
|
| EXPORT_SYMBOL_GPL (kvm_handle_page_fault) |
|
bool | __kvm_mmu_honors_guest_mtrrs (bool vm_has_noncoherent_dma) |
|
int | kvm_tdp_page_fault (struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) |
|
static void | nonpaging_init_context (struct kvm_mmu *context) |
|
static bool | is_root_usable (struct kvm_mmu_root_info *root, gpa_t pgd, union kvm_mmu_page_role role) |
|
static bool | cached_root_find_and_keep_current (struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role) |
|
static bool | cached_root_find_without_current (struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role) |
|
static bool | fast_pgd_switch (struct kvm *kvm, struct kvm_mmu *mmu, gpa_t new_pgd, union kvm_mmu_page_role new_role) |
|
void | kvm_mmu_new_pgd (struct kvm_vcpu *vcpu, gpa_t new_pgd) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_new_pgd) |
|
static bool | sync_mmio_spte (struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access) |
|
static void | __reset_rsvds_bits_mask (struct rsvd_bits_validate *rsvd_check, u64 pa_bits_rsvd, int level, bool nx, bool gbpages, bool pse, bool amd) |
|
static void | reset_guest_rsvds_bits_mask (struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
|
static void | __reset_rsvds_bits_mask_ept (struct rsvd_bits_validate *rsvd_check, u64 pa_bits_rsvd, bool execonly, int huge_page_level) |
|
static void | reset_rsvds_bits_mask_ept (struct kvm_vcpu *vcpu, struct kvm_mmu *context, bool execonly, int huge_page_level) |
|
static u64 | reserved_hpa_bits (void) |
|
static void | reset_shadow_zero_bits_mask (struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
|
static bool | boot_cpu_is_amd (void) |
|
static void | reset_tdp_shadow_zero_bits_mask (struct kvm_mmu *context) |
|
static void | reset_ept_shadow_zero_bits_mask (struct kvm_mmu *context, bool execonly) |
|
static void | update_permission_bitmask (struct kvm_mmu *mmu, bool ept) |
|
static void | update_pkru_bitmask (struct kvm_mmu *mmu) |
|
static void | reset_guest_paging_metadata (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
|
static void | paging64_init_context (struct kvm_mmu *context) |
|
static void | paging32_init_context (struct kvm_mmu *context) |
|
static union kvm_cpu_role | kvm_calc_cpu_role (struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs) |
|
void | __kvm_mmu_refresh_passthrough_bits (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
|
static int | kvm_mmu_get_tdp_level (struct kvm_vcpu *vcpu) |
|
static union kvm_mmu_page_role | kvm_calc_tdp_mmu_root_page_role (struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) |
|
static void | init_kvm_tdp_mmu (struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) |
|
static void | shadow_mmu_init_context (struct kvm_vcpu *vcpu, struct kvm_mmu *context, union kvm_cpu_role cpu_role, union kvm_mmu_page_role root_role) |
|
static void | kvm_init_shadow_mmu (struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) |
|
void | kvm_init_shadow_npt_mmu (struct kvm_vcpu *vcpu, unsigned long cr0, unsigned long cr4, u64 efer, gpa_t nested_cr3) |
|
| EXPORT_SYMBOL_GPL (kvm_init_shadow_npt_mmu) |
|
static union kvm_cpu_role | kvm_calc_shadow_ept_root_page_role (struct kvm_vcpu *vcpu, bool accessed_dirty, bool execonly, u8 level) |
|
void | kvm_init_shadow_ept_mmu (struct kvm_vcpu *vcpu, bool execonly, int huge_page_level, bool accessed_dirty, gpa_t new_eptp) |
|
| EXPORT_SYMBOL_GPL (kvm_init_shadow_ept_mmu) |
|
static void | init_kvm_softmmu (struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) |
|
static void | init_kvm_nested_mmu (struct kvm_vcpu *vcpu, union kvm_cpu_role new_mode) |
|
void | kvm_init_mmu (struct kvm_vcpu *vcpu) |
|
| EXPORT_SYMBOL_GPL (kvm_init_mmu) |
|
void | kvm_mmu_after_set_cpuid (struct kvm_vcpu *vcpu) |
|
void | kvm_mmu_reset_context (struct kvm_vcpu *vcpu) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_reset_context) |
|
int | kvm_mmu_load (struct kvm_vcpu *vcpu) |
|
void | kvm_mmu_unload (struct kvm_vcpu *vcpu) |
|
static bool | is_obsolete_root (struct kvm *kvm, hpa_t root_hpa) |
|
static void | __kvm_mmu_free_obsolete_roots (struct kvm *kvm, struct kvm_mmu *mmu) |
|
void | kvm_mmu_free_obsolete_roots (struct kvm_vcpu *vcpu) |
|
static u64 | mmu_pte_write_fetch_gpte (struct kvm_vcpu *vcpu, gpa_t *gpa, int *bytes) |
|
static bool | detect_write_flooding (struct kvm_mmu_page *sp) |
|
static bool | detect_write_misaligned (struct kvm_mmu_page *sp, gpa_t gpa, int bytes) |
|
static u64 * | get_written_sptes (struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) |
|
void | kvm_mmu_track_write (struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes) |
|
int noinline | kvm_mmu_page_fault (struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, void *insn, int insn_len) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_page_fault) |
|
static void | __kvm_mmu_invalidate_addr (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, hpa_t root_hpa) |
|
void | kvm_mmu_invalidate_addr (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, u64 addr, unsigned long roots) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_invalidate_addr) |
|
void | kvm_mmu_invlpg (struct kvm_vcpu *vcpu, gva_t gva) |
|
| EXPORT_SYMBOL_GPL (kvm_mmu_invlpg) |
|
void | kvm_mmu_invpcid_gva (struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) |
|
void | kvm_configure_mmu (bool enable_tdp, int tdp_forced_root_level, int tdp_max_root_level, int tdp_huge_page_level) |
|
| EXPORT_SYMBOL_GPL (kvm_configure_mmu) |
|
static __always_inline bool | __walk_slot_rmaps (struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield, bool flush) |
|
static __always_inline bool | walk_slot_rmaps (struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, int start_level, int end_level, bool flush_on_yield) |
|
static __always_inline bool | walk_slot_rmaps_4k (struct kvm *kvm, const struct kvm_memory_slot *slot, slot_rmaps_handler fn, bool flush_on_yield) |
|
static void | free_mmu_pages (struct kvm_mmu *mmu) |
|
static int | __kvm_mmu_create (struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) |
|
int | kvm_mmu_create (struct kvm_vcpu *vcpu) |
|
static void | kvm_zap_obsolete_pages (struct kvm *kvm) |
|
static void | kvm_mmu_zap_all_fast (struct kvm *kvm) |
|
static bool | kvm_has_zapped_obsolete_pages (struct kvm *kvm) |
|
void | kvm_mmu_init_vm (struct kvm *kvm) |
|
static void | mmu_free_vm_memory_caches (struct kvm *kvm) |
|
void | kvm_mmu_uninit_vm (struct kvm *kvm) |
|
static bool | kvm_rmap_zap_gfn_range (struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) |
|
void | kvm_zap_gfn_range (struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) |
|
static bool | slot_rmap_write_protect (struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) |
|
void | kvm_mmu_slot_remove_write_access (struct kvm *kvm, const struct kvm_memory_slot *memslot, int start_level) |
|
static bool | need_topup (struct kvm_mmu_memory_cache *cache, int min) |
|
static bool | need_topup_split_caches_or_resched (struct kvm *kvm) |
|
static int | topup_split_caches (struct kvm *kvm) |
|
static struct kvm_mmu_page * | shadow_mmu_get_sp_for_split (struct kvm *kvm, u64 *huge_sptep) |
|
static void | shadow_mmu_split_huge_page (struct kvm *kvm, const struct kvm_memory_slot *slot, u64 *huge_sptep) |
|
static int | shadow_mmu_try_split_huge_page (struct kvm *kvm, const struct kvm_memory_slot *slot, u64 *huge_sptep) |
|
static bool | shadow_mmu_try_split_huge_pages (struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) |
|
static void | kvm_shadow_mmu_try_split_huge_pages (struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level) |
|
void | kvm_mmu_try_split_huge_pages (struct kvm *kvm, const struct kvm_memory_slot *memslot, u64 start, u64 end, int target_level) |
|
void | kvm_mmu_slot_try_split_huge_pages (struct kvm *kvm, const struct kvm_memory_slot *memslot, int target_level) |
|
static bool | kvm_mmu_zap_collapsible_spte (struct kvm *kvm, struct kvm_rmap_head *rmap_head, const struct kvm_memory_slot *slot) |
|
static void | kvm_rmap_zap_collapsible_sptes (struct kvm *kvm, const struct kvm_memory_slot *slot) |
|
void | kvm_mmu_zap_collapsible_sptes (struct kvm *kvm, const struct kvm_memory_slot *slot) |
|
void | kvm_mmu_slot_leaf_clear_dirty (struct kvm *kvm, const struct kvm_memory_slot *memslot) |
|
static void | kvm_mmu_zap_all (struct kvm *kvm) |
|
void | kvm_arch_flush_shadow_all (struct kvm *kvm) |
|
void | kvm_arch_flush_shadow_memslot (struct kvm *kvm, struct kvm_memory_slot *slot) |
|
void | kvm_mmu_invalidate_mmio_sptes (struct kvm *kvm, u64 gen) |
|
static unsigned long | mmu_shrink_scan (struct shrinker *shrink, struct shrink_control *sc) |
|
static unsigned long | mmu_shrink_count (struct shrinker *shrink, struct shrink_control *sc) |
|
static void | mmu_destroy_caches (void) |
|
static bool | get_nx_auto_mode (void) |
|
static void | __set_nx_huge_pages (bool val) |
|
void __init | kvm_mmu_x86_module_init (void) |
|
int | kvm_mmu_vendor_module_init (void) |
|
void | kvm_mmu_destroy (struct kvm_vcpu *vcpu) |
|
void | kvm_mmu_vendor_module_exit (void) |
|
static bool | calc_nx_huge_pages_recovery_period (uint *period) |
|
static void | kvm_recover_nx_huge_pages (struct kvm *kvm) |
|
static long | get_nx_huge_page_recovery_timeout (u64 start_time) |
|
static int | kvm_nx_huge_page_recovery_worker (struct kvm *kvm, uintptr_t data) |
|
int | kvm_mmu_post_init_vm (struct kvm *kvm) |
|
void | kvm_mmu_pre_destroy_vm (struct kvm *kvm) |
|