16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kvm_host.h>
24 #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
25 #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
26 #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
37 int index = (msr - MTRRphysBase_MSR(0)) / 2;
39 return &vcpu->arch.mtrr_state.var_ranges[index];
45 case MTRRphysBase_MSR(0) ... MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1):
46 case MSR_MTRRfix64K_00000:
47 case MSR_MTRRfix16K_80000:
48 case MSR_MTRRfix16K_A0000:
49 case MSR_MTRRfix4K_C0000:
50 case MSR_MTRRfix4K_C8000:
51 case MSR_MTRRfix4K_D0000:
52 case MSR_MTRRfix4K_D8000:
53 case MSR_MTRRfix4K_E0000:
54 case MSR_MTRRfix4K_E8000:
55 case MSR_MTRRfix4K_F0000:
56 case MSR_MTRRfix4K_F8000:
65 return t < 8 && (1 << t) & 0x73;
76 if (msr == MSR_MTRRdefType) {
80 }
else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
81 for (i = 0; i < 8 ; i++)
88 WARN_ON(!(msr >= MTRRphysBase_MSR(0) &&
89 msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1)));
101 return (data & mask) == 0;
132 return MTRR_TYPE_UNCACHABLE;
134 return MTRR_TYPE_WRBACK;
197 case MSR_MTRRfix64K_00000:
201 case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
203 *unit = array_index_nospec(
204 msr - MSR_MTRRfix16K_80000,
205 MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
207 case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
209 *unit = array_index_nospec(
210 msr - MSR_MTRRfix4K_C0000,
211 MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
227 WARN_ON(*
end > mtrr_seg->
end);
276 for (seg = 0; seg < seg_num; seg++) {
278 if (mtrr_seg->
start <= addr && addr < mtrr_seg->
end)
308 *
start = range->base & PAGE_MASK;
310 mask = range->mask & PAGE_MASK;
320 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
333 }
else if (msr == MSR_MTRRdefType) {
346 return (range->mask & (1 << 11)) != 0;
351 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
352 struct kvm_mtrr_range *tmp, *cur;
358 list_del(&cur->node);
371 list_for_each_entry(tmp, &mtrr_state->head, node)
372 if (cur->base >= tmp->base)
374 list_add_tail(&cur->node, &tmp->node);
387 *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
388 else if (msr == MSR_MTRRdefType)
389 vcpu->arch.mtrr_state.deftype = data;
402 if (msr == MSR_MTRRcap) {
409 *pdata = 0x500 | KVM_NR_VAR_MTRR;
418 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
419 }
else if (msr == MSR_MTRRdefType) {
420 *pdata = vcpu->arch.mtrr_state.deftype;
436 INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
490 struct kvm_mtrr_range *range)
495 if (!(start >= iter->
end || end <= iter->start)) {
515 struct kvm_mtrr *mtrr_state = iter->
mtrr_state;
517 list_for_each_entry_continue(iter->
range, &mtrr_state->head, node)
527 struct kvm_mtrr *mtrr_state = iter->
mtrr_state;
532 iter->
range = list_prepare_entry(iter->
range, &mtrr_state->head, node);
574 struct kvm_mtrr *mtrr_state, u64 start, u64 end)
610 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
611 for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
612 mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
616 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
620 const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
621 | (1 << MTRR_TYPE_WRTHROUGH);
623 start = gfn_to_gpa(gfn);
644 if (type == curr_type)
651 if (curr_type == MTRR_TYPE_UNCACHABLE)
652 return MTRR_TYPE_UNCACHABLE;
658 if (((1 << type) & wt_wb_mask) &&
659 ((1 << curr_type) & wt_wb_mask)) {
660 type = MTRR_TYPE_WRTHROUGH;
670 return MTRR_TYPE_WRBACK;
693 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
698 start = gfn_to_gpa(gfn);
699 end = gfn_to_gpa(gfn + page_num);
u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
static bool kvm_mmu_honors_guest_mtrrs(struct kvm *kvm)
static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
static bool match_var_range(struct mtrr_iter *iter, struct kvm_mtrr_range *range)
static void mtrr_lookup_var_start(struct mtrr_iter *iter)
static bool is_mtrr_base_msr(unsigned int msr)
static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
static void mtrr_lookup_var_next(struct mtrr_iter *iter)
static void mtrr_lookup_start(struct mtrr_iter *iter)
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
#define IA32_MTRR_DEF_TYPE_E
static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
static u64 fixed_mtrr_range_end_addr(int seg, int index)
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
static void mtrr_lookup_next(struct mtrr_iter *iter)
static bool mtrr_lookup_okay(struct mtrr_iter *iter)
static int fixed_msr_to_range_index(u32 msr)
static struct fixed_mtrr_segment fixed_seg_table[]
static struct kvm_mtrr_range * var_mtrr_msr_to_range(struct kvm_vcpu *vcpu, unsigned int msr)
static void mtrr_lookup_init(struct mtrr_iter *iter, struct kvm_mtrr *mtrr_state, u64 start, u64 end)
static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type)
int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
#define IA32_MTRR_DEF_TYPE_TYPE_MASK
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
static u64 fixed_mtrr_seg_unit_size(int seg)
static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
static int fixed_mtrr_seg_end_range_index(int seg)
static bool msr_mtrr_valid(unsigned msr)
#define IA32_MTRR_DEF_TYPE_FE
static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
#define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_)
static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num)
static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
static bool valid_mtrr_type(unsigned t)
static int fixed_mtrr_addr_to_seg(u64 addr)
static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
struct kvm_mtrr_range * range
struct kvm_mtrr * mtrr_state