KVM
Macros | Enumerations | Functions | Variables
cpuid.h File Reference
#include "x86.h"
#include "reverse_cpuid.h"
#include <asm/cpu.h>
#include <asm/processor.h>
#include <uapi/asm/kvm_para.h>
#include "governed_features.h"
Include dependency graph for cpuid.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Macros

#define KVM_GOVERNED_FEATURE(x)   KVM_GOVERNED_##x,
 
#define KVM_GOVERNED_FEATURE(x)   case x: return KVM_GOVERNED_##x;
 

Enumerations

enum  kvm_governed_features { KVM_NR_GOVERNED_FEATURES }
 

Functions

void kvm_set_cpu_caps (void)
 
void kvm_update_cpuid_runtime (struct kvm_vcpu *vcpu)
 
void kvm_update_pv_runtime (struct kvm_vcpu *vcpu)
 
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index (struct kvm_vcpu *vcpu, u32 function, u32 index)
 
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry (struct kvm_vcpu *vcpu, u32 function)
 
int kvm_dev_ioctl_get_cpuid (struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries, unsigned int type)
 
int kvm_vcpu_ioctl_set_cpuid (struct kvm_vcpu *vcpu, struct kvm_cpuid *cpuid, struct kvm_cpuid_entry __user *entries)
 
int kvm_vcpu_ioctl_set_cpuid2 (struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries)
 
int kvm_vcpu_ioctl_get_cpuid2 (struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries)
 
bool kvm_cpuid (struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only)
 
u32 xstate_required_size (u64 xstate_bv, bool compacted)
 
int cpuid_query_maxphyaddr (struct kvm_vcpu *vcpu)
 
u64 kvm_vcpu_reserved_gpa_bits_raw (struct kvm_vcpu *vcpu)
 
static int cpuid_maxphyaddr (struct kvm_vcpu *vcpu)
 
static bool kvm_vcpu_is_legal_gpa (struct kvm_vcpu *vcpu, gpa_t gpa)
 
static bool kvm_vcpu_is_legal_aligned_gpa (struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment)
 
static bool page_address_valid (struct kvm_vcpu *vcpu, gpa_t gpa)
 
static __always_inline void cpuid_entry_override (struct kvm_cpuid_entry2 *entry, unsigned int leaf)
 
static __always_inline u32 * guest_cpuid_get_register (struct kvm_vcpu *vcpu, unsigned int x86_feature)
 
static __always_inline bool guest_cpuid_has (struct kvm_vcpu *vcpu, unsigned int x86_feature)
 
static __always_inline void guest_cpuid_clear (struct kvm_vcpu *vcpu, unsigned int x86_feature)
 
static bool guest_cpuid_is_amd_or_hygon (struct kvm_vcpu *vcpu)
 
static bool guest_cpuid_is_intel (struct kvm_vcpu *vcpu)
 
static bool guest_cpuid_is_amd_compatible (struct kvm_vcpu *vcpu)
 
static bool guest_cpuid_is_intel_compatible (struct kvm_vcpu *vcpu)
 
static int guest_cpuid_family (struct kvm_vcpu *vcpu)
 
static int guest_cpuid_model (struct kvm_vcpu *vcpu)
 
static bool cpuid_model_is_consistent (struct kvm_vcpu *vcpu)
 
static int guest_cpuid_stepping (struct kvm_vcpu *vcpu)
 
static bool guest_has_spec_ctrl_msr (struct kvm_vcpu *vcpu)
 
static bool guest_has_pred_cmd_msr (struct kvm_vcpu *vcpu)
 
static bool supports_cpuid_fault (struct kvm_vcpu *vcpu)
 
static bool cpuid_fault_enabled (struct kvm_vcpu *vcpu)
 
static __always_inline void kvm_cpu_cap_clear (unsigned int x86_feature)
 
static __always_inline void kvm_cpu_cap_set (unsigned int x86_feature)
 
static __always_inline u32 kvm_cpu_cap_get (unsigned int x86_feature)
 
static __always_inline bool kvm_cpu_cap_has (unsigned int x86_feature)
 
static __always_inline void kvm_cpu_cap_check_and_set (unsigned int x86_feature)
 
static __always_inline bool guest_pv_has (struct kvm_vcpu *vcpu, unsigned int kvm_feature)
 
static __always_inline int kvm_governed_feature_index (unsigned int x86_feature)
 
static __always_inline bool kvm_is_governed_feature (unsigned int x86_feature)
 
static __always_inline void kvm_governed_feature_set (struct kvm_vcpu *vcpu, unsigned int x86_feature)
 
static __always_inline void kvm_governed_feature_check_and_set (struct kvm_vcpu *vcpu, unsigned int x86_feature)
 
static __always_inline bool guest_can_use (struct kvm_vcpu *vcpu, unsigned int x86_feature)
 
static bool kvm_vcpu_is_legal_cr3 (struct kvm_vcpu *vcpu, unsigned long cr3)
 

Variables

u32 kvm_cpu_caps[NR_KVM_CPU_CAPS__read_mostly
 

Macro Definition Documentation

◆ KVM_GOVERNED_FEATURE [1/2]

#define KVM_GOVERNED_FEATURE (   x)    KVM_GOVERNED_##x,

Definition at line 242 of file cpuid.h.

◆ KVM_GOVERNED_FEATURE [2/2]

#define KVM_GOVERNED_FEATURE (   x)    case x: return KVM_GOVERNED_##x;

Definition at line 242 of file cpuid.h.

Enumeration Type Documentation

◆ kvm_governed_features

Enumerator
KVM_NR_GOVERNED_FEATURES 

Definition at line 241 of file cpuid.h.

241  {
242 #define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
243 #include "governed_features.h"
245 };
@ KVM_NR_GOVERNED_FEATURES
Definition: cpuid.h:244

Function Documentation

◆ cpuid_entry_override()

static __always_inline void cpuid_entry_override ( struct kvm_cpuid_entry2 *  entry,
unsigned int  leaf 
)
static

Definition at line 61 of file cpuid.h.

63 {
64  u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
65 
66  BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
67  *reg = kvm_cpu_caps[leaf];
68 }
static __always_inline u32 * cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, unsigned int x86_feature)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpuid_fault_enabled()

static bool cpuid_fault_enabled ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 191 of file cpuid.h.

192 {
193  return vcpu->arch.msr_misc_features_enables &
194  MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
195 }
Here is the caller graph for this function:

◆ cpuid_maxphyaddr()

static int cpuid_maxphyaddr ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 40 of file cpuid.h.

41 {
42  return vcpu->arch.maxphyaddr;
43 }
Here is the caller graph for this function:

◆ cpuid_model_is_consistent()

static bool cpuid_model_is_consistent ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 155 of file cpuid.h.

156 {
157  return boot_cpu_data.x86_model == guest_cpuid_model(vcpu);
158 }
static int guest_cpuid_model(struct kvm_vcpu *vcpu)
Definition: cpuid.h:144
Here is the call graph for this function:
Here is the caller graph for this function:

◆ cpuid_query_maxphyaddr()

int cpuid_query_maxphyaddr ( struct kvm_vcpu *  vcpu)

Definition at line 390 of file cpuid.c.

391 {
392  struct kvm_cpuid_entry2 *best;
393 
394  best = kvm_find_cpuid_entry(vcpu, 0x80000000);
395  if (!best || best->eax < 0x80000008)
396  goto not_found;
397  best = kvm_find_cpuid_entry(vcpu, 0x80000008);
398  if (best)
399  return best->eax & 0xff;
400 not_found:
401  return 36;
402 }
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
Definition: cpuid.c:1455
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_can_use()

static __always_inline bool guest_can_use ( struct kvm_vcpu *  vcpu,
unsigned int  x86_feature 
)
static

Definition at line 278 of file cpuid.h.

280 {
281  BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
282 
283  return test_bit(kvm_governed_feature_index(x86_feature),
284  vcpu->arch.governed_features.enabled);
285 }
static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
Definition: cpuid.h:257
static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
Definition: cpuid.h:247
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_cpuid_clear()

static __always_inline void guest_cpuid_clear ( struct kvm_vcpu *  vcpu,
unsigned int  x86_feature 
)
static

Definition at line 95 of file cpuid.h.

97 {
98  u32 *reg;
99 
100  reg = guest_cpuid_get_register(vcpu, x86_feature);
101  if (reg)
102  *reg &= ~__feature_bit(x86_feature);
103 }
static __always_inline u32 * guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:70
static __always_inline u32 __feature_bit(int x86_feature)
Here is the call graph for this function:

◆ guest_cpuid_family()

static int guest_cpuid_family ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 133 of file cpuid.h.

134 {
135  struct kvm_cpuid_entry2 *best;
136 
137  best = kvm_find_cpuid_entry(vcpu, 0x1);
138  if (!best)
139  return -1;
140 
141  return x86_family(best->eax);
142 }
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function)
Definition: cpuid.c:1455
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_cpuid_get_register()

static __always_inline u32* guest_cpuid_get_register ( struct kvm_vcpu *  vcpu,
unsigned int  x86_feature 
)
static

Definition at line 70 of file cpuid.h.

72 {
73  const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
74  struct kvm_cpuid_entry2 *entry;
75 
76  entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index);
77  if (!entry)
78  return NULL;
79 
80  return __cpuid_entry_get_reg(entry, cpuid.reg);
81 }
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
Definition: cpuid.c:1447
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
static __always_inline u32 * __cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, u32 reg)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_cpuid_has()

static __always_inline bool guest_cpuid_has ( struct kvm_vcpu *  vcpu,
unsigned int  x86_feature 
)
static

Definition at line 83 of file cpuid.h.

85 {
86  u32 *reg;
87 
88  reg = guest_cpuid_get_register(vcpu, x86_feature);
89  if (!reg)
90  return false;
91 
92  return *reg & __feature_bit(x86_feature);
93 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_cpuid_is_amd_compatible()

static bool guest_cpuid_is_amd_compatible ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 123 of file cpuid.h.

124 {
125  return vcpu->arch.is_amd_compatible;
126 }
Here is the caller graph for this function:

◆ guest_cpuid_is_amd_or_hygon()

static bool guest_cpuid_is_amd_or_hygon ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 105 of file cpuid.h.

106 {
107  struct kvm_cpuid_entry2 *best;
108 
109  best = kvm_find_cpuid_entry(vcpu, 0);
110  return best &&
111  (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) ||
112  is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
113 }
static bool is_guest_vendor_hygon(u32 ebx, u32 ecx, u32 edx)
Definition: kvm_emulate.h:428
static bool is_guest_vendor_amd(u32 ebx, u32 ecx, u32 edx)
Definition: kvm_emulate.h:418
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_cpuid_is_intel()

static bool guest_cpuid_is_intel ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 115 of file cpuid.h.

116 {
117  struct kvm_cpuid_entry2 *best;
118 
119  best = kvm_find_cpuid_entry(vcpu, 0);
120  return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
121 }
static bool is_guest_vendor_intel(u32 ebx, u32 ecx, u32 edx)
Definition: kvm_emulate.h:411
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_cpuid_is_intel_compatible()

static bool guest_cpuid_is_intel_compatible ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 128 of file cpuid.h.

129 {
130  return !guest_cpuid_is_amd_compatible(vcpu);
131 }
static bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
Definition: cpuid.h:123
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_cpuid_model()

static int guest_cpuid_model ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 144 of file cpuid.h.

145 {
146  struct kvm_cpuid_entry2 *best;
147 
148  best = kvm_find_cpuid_entry(vcpu, 0x1);
149  if (!best)
150  return -1;
151 
152  return x86_model(best->eax);
153 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_cpuid_stepping()

static int guest_cpuid_stepping ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 160 of file cpuid.h.

161 {
162  struct kvm_cpuid_entry2 *best;
163 
164  best = kvm_find_cpuid_entry(vcpu, 0x1);
165  if (!best)
166  return -1;
167 
168  return x86_stepping(best->eax);
169 }
Here is the call graph for this function:

◆ guest_has_pred_cmd_msr()

static bool guest_has_pred_cmd_msr ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 179 of file cpuid.h.

180 {
181  return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
182  guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB) ||
183  guest_cpuid_has(vcpu, X86_FEATURE_SBPB));
184 }
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:83
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_has_spec_ctrl_msr()

static bool guest_has_spec_ctrl_msr ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 171 of file cpuid.h.

172 {
173  return (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) ||
174  guest_cpuid_has(vcpu, X86_FEATURE_AMD_STIBP) ||
175  guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) ||
176  guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD));
177 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ guest_pv_has()

static __always_inline bool guest_pv_has ( struct kvm_vcpu *  vcpu,
unsigned int  kvm_feature 
)
static

Definition at line 232 of file cpuid.h.

234 {
235  if (!vcpu->arch.pv_cpuid.enforce)
236  return true;
237 
238  return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
239 }
Here is the caller graph for this function:

◆ kvm_cpu_cap_check_and_set()

static __always_inline void kvm_cpu_cap_check_and_set ( unsigned int  x86_feature)
static

Definition at line 226 of file cpuid.h.

227 {
228  if (boot_cpu_has(x86_feature))
229  kvm_cpu_cap_set(x86_feature);
230 }
static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
Definition: cpuid.h:205
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_cpu_cap_clear()

static __always_inline void kvm_cpu_cap_clear ( unsigned int  x86_feature)
static

Definition at line 197 of file cpuid.h.

198 {
199  unsigned int x86_leaf = __feature_leaf(x86_feature);
200 
201  reverse_cpuid_check(x86_leaf);
202  kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
203 }
static __always_inline u32 __feature_leaf(int x86_feature)
static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_cpu_cap_get()

static __always_inline u32 kvm_cpu_cap_get ( unsigned int  x86_feature)
static

Definition at line 213 of file cpuid.h.

214 {
215  unsigned int x86_leaf = __feature_leaf(x86_feature);
216 
217  reverse_cpuid_check(x86_leaf);
218  return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
219 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_cpu_cap_has()

static __always_inline bool kvm_cpu_cap_has ( unsigned int  x86_feature)
static

Definition at line 221 of file cpuid.h.

222 {
223  return !!kvm_cpu_cap_get(x86_feature);
224 }
static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
Definition: cpuid.h:213
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_cpu_cap_set()

static __always_inline void kvm_cpu_cap_set ( unsigned int  x86_feature)
static

Definition at line 205 of file cpuid.h.

206 {
207  unsigned int x86_leaf = __feature_leaf(x86_feature);
208 
209  reverse_cpuid_check(x86_leaf);
210  kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
211 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_cpuid()

bool kvm_cpuid ( struct kvm_vcpu *  vcpu,
u32 *  eax,
u32 *  ebx,
u32 *  ecx,
u32 *  edx,
bool  exact_only 
)

Definition at line 1531 of file cpuid.c.

1533 {
1534  u32 orig_function = *eax, function = *eax, index = *ecx;
1535  struct kvm_cpuid_entry2 *entry;
1536  bool exact, used_max_basic = false;
1537 
1538  entry = kvm_find_cpuid_entry_index(vcpu, function, index);
1539  exact = !!entry;
1540 
1541  if (!entry && !exact_only) {
1542  entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
1543  used_max_basic = !!entry;
1544  }
1545 
1546  if (entry) {
1547  *eax = entry->eax;
1548  *ebx = entry->ebx;
1549  *ecx = entry->ecx;
1550  *edx = entry->edx;
1551  if (function == 7 && index == 0) {
1552  u64 data;
1553  if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1554  (data & TSX_CTRL_CPUID_CLEAR))
1555  *ebx &= ~(F(RTM) | F(HLE));
1556  } else if (function == 0x80000007) {
1557  if (kvm_hv_invtsc_suppressed(vcpu))
1558  *edx &= ~SF(CONSTANT_TSC);
1559  }
1560  } else {
1561  *eax = *ebx = *ecx = *edx = 0;
1562  /*
1563  * When leaf 0BH or 1FH is defined, CL is pass-through
1564  * and EDX is always the x2APIC ID, even for undefined
1565  * subleaves. Index 1 will exist iff the leaf is
1566  * implemented, so we pass through CL iff leaf 1
1567  * exists. EDX can be copied from any existing index.
1568  */
1569  if (function == 0xb || function == 0x1f) {
1570  entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
1571  if (entry) {
1572  *ecx = index & 0xff;
1573  *edx = entry->edx;
1574  }
1575  }
1576  }
1577  trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1578  used_max_basic);
1579  return exact;
1580 }
#define SF(name)
Definition: cpuid.c:67
#define F
Definition: cpuid.c:64
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
Definition: cpuid.c:1447
static struct kvm_cpuid_entry2 * get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
Definition: cpuid.c:1492
static bool kvm_hv_invtsc_suppressed(struct kvm_vcpu *vcpu)
Definition: hyperv.h:299
int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated)
Definition: x86.c:1925
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_dev_ioctl_get_cpuid()

int kvm_dev_ioctl_get_cpuid ( struct kvm_cpuid2 *  cpuid,
struct kvm_cpuid_entry2 __user *  entries,
unsigned int  type 
)

Definition at line 1404 of file cpuid.c.

1407 {
1408  static const u32 funcs[] = {
1409  0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
1410  };
1411 
1412  struct kvm_cpuid_array array = {
1413  .nent = 0,
1414  };
1415  int r, i;
1416 
1417  if (cpuid->nent < 1)
1418  return -E2BIG;
1419  if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1420  cpuid->nent = KVM_MAX_CPUID_ENTRIES;
1421 
1422  if (sanity_check_entries(entries, cpuid->nent, type))
1423  return -EINVAL;
1424 
1425  array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL);
1426  if (!array.entries)
1427  return -ENOMEM;
1428 
1429  array.maxnent = cpuid->nent;
1430 
1431  for (i = 0; i < ARRAY_SIZE(funcs); i++) {
1432  r = get_cpuid_func(&array, funcs[i], type);
1433  if (r)
1434  goto out_free;
1435  }
1436  cpuid->nent = array.nent;
1437 
1438  if (copy_to_user(entries, array.entries,
1439  array.nent * sizeof(struct kvm_cpuid_entry2)))
1440  r = -EFAULT;
1441 
1442 out_free:
1443  kvfree(array.entries);
1444  return r;
1445 }
static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, unsigned int type)
Definition: cpuid.c:1353
#define CENTAUR_CPUID_SIGNATURE
Definition: cpuid.c:1351
static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, __u32 num_entries, unsigned int ioctl_type)
Definition: cpuid.c:1377
struct kvm_cpuid_entry2 * entries
Definition: cpuid.c:822
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_find_cpuid_entry()

struct kvm_cpuid_entry2* kvm_find_cpuid_entry ( struct kvm_vcpu *  vcpu,
u32  function 
)

Definition at line 1455 of file cpuid.c.

1457 {
1458  return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1460 }
static struct kvm_cpuid_entry2 * cpuid_entry2_find(struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index)
Definition: cpuid.c:82
#define KVM_CPUID_INDEX_NOT_SIGNIFICANT
Definition: cpuid.c:80
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_find_cpuid_entry_index()

struct kvm_cpuid_entry2* kvm_find_cpuid_entry_index ( struct kvm_vcpu *  vcpu,
u32  function,
u32  index 
)

Definition at line 1447 of file cpuid.c.

1449 {
1450  return cpuid_entry2_find(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent,
1451  function, index);
1452 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_governed_feature_check_and_set()

static __always_inline void kvm_governed_feature_check_and_set ( struct kvm_vcpu *  vcpu,
unsigned int  x86_feature 
)
static

Definition at line 271 of file cpuid.h.

273 {
274  if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
275  kvm_governed_feature_set(vcpu, x86_feature);
276 }
static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature)
Definition: cpuid.h:221
static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:262
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_governed_feature_index()

static __always_inline int kvm_governed_feature_index ( unsigned int  x86_feature)
static

Definition at line 247 of file cpuid.h.

248 {
249  switch (x86_feature) {
250 #define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
251 #include "governed_features.h"
252  default:
253  return -1;
254  }
255 }
Here is the caller graph for this function:

◆ kvm_governed_feature_set()

static __always_inline void kvm_governed_feature_set ( struct kvm_vcpu *  vcpu,
unsigned int  x86_feature 
)
static

Definition at line 262 of file cpuid.h.

264 {
265  BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
266 
267  __set_bit(kvm_governed_feature_index(x86_feature),
268  vcpu->arch.governed_features.enabled);
269 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_is_governed_feature()

static __always_inline bool kvm_is_governed_feature ( unsigned int  x86_feature)
static

Definition at line 257 of file cpuid.h.

258 {
259  return kvm_governed_feature_index(x86_feature) >= 0;
260 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_set_cpu_caps()

void kvm_set_cpu_caps ( void  )

Definition at line 585 of file cpuid.c.

586 {
587 #ifdef CONFIG_X86_64
588  unsigned int f_gbpages = F(GBPAGES);
589  unsigned int f_lm = F(LM);
590  unsigned int f_xfd = F(XFD);
591 #else
592  unsigned int f_gbpages = 0;
593  unsigned int f_lm = 0;
594  unsigned int f_xfd = 0;
595 #endif
596  memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
597 
598  BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
599  sizeof(boot_cpu_data.x86_capability));
600 
601  memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
602  sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
603 
604  kvm_cpu_cap_mask(CPUID_1_ECX,
605  /*
606  * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
607  * advertised to guests via CPUID!
608  */
609  F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
610  0 /* DS-CPL, VMX, SMX, EST */ |
611  0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
612  F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
613  F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
614  F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
615  0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
616  F(F16C) | F(RDRAND)
617  );
618  /* KVM emulates x2apic in software irrespective of host support. */
619  kvm_cpu_cap_set(X86_FEATURE_X2APIC);
620 
621  kvm_cpu_cap_mask(CPUID_1_EDX,
622  F(FPU) | F(VME) | F(DE) | F(PSE) |
623  F(TSC) | F(MSR) | F(PAE) | F(MCE) |
624  F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
625  F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
626  F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
627  0 /* Reserved, DS, ACPI */ | F(MMX) |
628  F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
629  0 /* HTT, TM, Reserved, PBE */
630  );
631 
632  kvm_cpu_cap_mask(CPUID_7_0_EBX,
633  F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
634  F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
635  F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
636  F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
637  F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
638  F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
639  F(AVX512VL));
640 
641  kvm_cpu_cap_mask(CPUID_7_ECX,
642  F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
643  F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
644  F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
645  F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
646  F(SGX_LC) | F(BUS_LOCK_DETECT)
647  );
648  /* Set LA57 based on hardware capability. */
649  if (cpuid_ecx(7) & F(LA57))
650  kvm_cpu_cap_set(X86_FEATURE_LA57);
651 
652  /*
653  * PKU not yet implemented for shadow paging and requires OSPKE
654  * to be set on the host. Clear it if that is not the case
655  */
656  if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
657  kvm_cpu_cap_clear(X86_FEATURE_PKU);
658 
659  kvm_cpu_cap_mask(CPUID_7_EDX,
660  F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
661  F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
662  F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
663  F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
664  F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16) | F(FLUSH_L1D)
665  );
666 
667  /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
668  kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
669  kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
670 
671  if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
672  kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
673  if (boot_cpu_has(X86_FEATURE_STIBP))
674  kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
675  if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
676  kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
677 
678  kvm_cpu_cap_mask(CPUID_7_1_EAX,
679  F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) |
680  F(FZRM) | F(FSRS) | F(FSRC) |
681  F(AMX_FP16) | F(AVX_IFMA) | F(LAM)
682  );
683 
685  F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) |
686  F(AMX_COMPLEX)
687  );
688 
690  F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
691  F(BHI_CTRL) | F(MCDT_NO)
692  );
693 
694  kvm_cpu_cap_mask(CPUID_D_1_EAX,
695  F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
696  );
697 
699  SF(SGX1) | SF(SGX2) | SF(SGX_EDECCSSA)
700  );
701 
702  kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
703  F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
704  F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
705  F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
706  0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
707  F(TOPOEXT) | 0 /* PERFCTR_CORE */
708  );
709 
710  kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
711  F(FPU) | F(VME) | F(DE) | F(PSE) |
712  F(TSC) | F(MSR) | F(PAE) | F(MCE) |
713  F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
714  F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
715  F(PAT) | F(PSE36) | 0 /* Reserved */ |
716  F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
717  F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
718  0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
719  );
720 
721  if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
722  kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
723 
725  SF(CONSTANT_TSC)
726  );
727 
728  kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
729  F(CLZERO) | F(XSAVEERPTR) |
730  F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
731  F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) |
732  F(AMD_PSFD)
733  );
734 
735  /*
736  * AMD has separate bits for each SPEC_CTRL bit.
737  * arch/x86/kernel/cpu/bugs.c is kind enough to
738  * record that in cpufeatures so use them.
739  */
740  if (boot_cpu_has(X86_FEATURE_IBPB))
741  kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
742  if (boot_cpu_has(X86_FEATURE_IBRS))
743  kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
744  if (boot_cpu_has(X86_FEATURE_STIBP))
745  kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
746  if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
747  kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
748  if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
749  kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
750  /*
751  * The preference is to use SPEC CTRL MSR instead of the
752  * VIRT_SPEC MSR.
753  */
754  if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
755  !boot_cpu_has(X86_FEATURE_AMD_SSBD))
756  kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
757 
758  /*
759  * Hide all SVM features by default, SVM will set the cap bits for
760  * features it emulates and/or exposes for L1.
761  */
762  kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
763 
764  kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
765  0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
766  F(SME_COHERENT));
767 
768  kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
769  F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
770  F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
771  F(WRMSR_XX_BASE_NS)
772  );
773 
774  kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
775  kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE);
776  kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO);
777 
779  F(PERFMON_V2)
780  );
781 
782  /*
783  * Synthesize "LFENCE is serializing" into the AMD-defined entry in
784  * KVM's supported CPUID if the feature is reported as supported by the
785  * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long
786  * before AMD joined the bandwagon, e.g. LFENCE is serializing on most
787  * CPUs that support SSE2. On CPUs that don't support AMD's leaf,
788  * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
789  * the mask with the raw host CPUID, and reporting support in AMD's
790  * leaf can make it easier for userspace to detect the feature.
791  */
792  if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
793  kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC);
794  if (!static_cpu_has_bug(X86_BUG_NULL_SEG))
795  kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
796  kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR);
797 
798  kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
799  F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
800  F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
801  F(PMM) | F(PMM_EN)
802  );
803 
804  /*
805  * Hide RDTSCP and RDPID if either feature is reported as supported but
806  * probing MSR_TSC_AUX failed. This is purely a sanity check and
807  * should never happen, but the guest will likely crash if RDTSCP or
808  * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
809  * the past. For example, the sanity check may fire if this instance of
810  * KVM is running as L1 on top of an older, broken KVM.
811  */
812  if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
813  kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
814  !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
815  kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
816  kvm_cpu_cap_clear(X86_FEATURE_RDPID);
817  }
818 }
static __always_inline void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
Definition: cpuid.c:565
static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
Definition: cpuid.c:575
static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature)
Definition: cpuid.h:226
static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
Definition: cpuid.h:197
bool tdp_enabled
Definition: mmu.c:106
@ CPUID_7_1_EDX
Definition: reverse_cpuid.h:16
@ CPUID_7_2_EDX
Definition: reverse_cpuid.h:19
@ CPUID_8000_0022_EAX
Definition: reverse_cpuid.h:18
@ CPUID_12_EAX
Definition: reverse_cpuid.h:15
@ NKVMCAPINTS
Definition: reverse_cpuid.h:22
@ CPUID_8000_0007_EDX
Definition: reverse_cpuid.h:17
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_update_cpuid_runtime()

void kvm_update_cpuid_runtime ( struct kvm_vcpu *  vcpu)

Definition at line 309 of file cpuid.c.

310 {
311  __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
312 }
static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries, int nent)
Definition: cpuid.c:265
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_update_pv_runtime()

void kvm_update_pv_runtime ( struct kvm_vcpu *  vcpu)

Definition at line 238 of file cpuid.c.

239 {
240  struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu);
241 
242  /*
243  * save the feature bitmap to avoid cpuid lookup for every PV
244  * operation
245  */
246  if (best)
247  vcpu->arch.pv_cpuid.features = best->eax;
248 }
static struct kvm_cpuid_entry2 * kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
Definition: cpuid.c:232
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_ioctl_get_cpuid2()

int kvm_vcpu_ioctl_get_cpuid2 ( struct kvm_vcpu *  vcpu,
struct kvm_cpuid2 *  cpuid,
struct kvm_cpuid_entry2 __user *  entries 
)

Definition at line 535 of file cpuid.c.

538 {
539  if (cpuid->nent < vcpu->arch.cpuid_nent)
540  return -E2BIG;
541 
542  if (copy_to_user(entries, vcpu->arch.cpuid_entries,
543  vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
544  return -EFAULT;
545 
546  cpuid->nent = vcpu->arch.cpuid_nent;
547  return 0;
548 }
Here is the caller graph for this function:

◆ kvm_vcpu_ioctl_set_cpuid()

int kvm_vcpu_ioctl_set_cpuid ( struct kvm_vcpu *  vcpu,
struct kvm_cpuid cpuid,
struct kvm_cpuid_entry __user *  entries 
)

Definition at line 467 of file cpuid.c.

470 {
471  int r, i;
472  struct kvm_cpuid_entry *e = NULL;
473  struct kvm_cpuid_entry2 *e2 = NULL;
474 
475  if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
476  return -E2BIG;
477 
478  if (cpuid->nent) {
479  e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e));
480  if (IS_ERR(e))
481  return PTR_ERR(e);
482 
483  e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT);
484  if (!e2) {
485  r = -ENOMEM;
486  goto out_free_cpuid;
487  }
488  }
489  for (i = 0; i < cpuid->nent; i++) {
490  e2[i].function = e[i].function;
491  e2[i].eax = e[i].eax;
492  e2[i].ebx = e[i].ebx;
493  e2[i].ecx = e[i].ecx;
494  e2[i].edx = e[i].edx;
495  e2[i].index = 0;
496  e2[i].flags = 0;
497  e2[i].padding[0] = 0;
498  e2[i].padding[1] = 0;
499  e2[i].padding[2] = 0;
500  }
501 
502  r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
503  if (r)
504  kvfree(e2);
505 
506 out_free_cpuid:
507  kvfree(e);
508 
509  return r;
510 }
static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, int nent)
Definition: cpuid.c:414
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_ioctl_set_cpuid2()

int kvm_vcpu_ioctl_set_cpuid2 ( struct kvm_vcpu *  vcpu,
struct kvm_cpuid2 *  cpuid,
struct kvm_cpuid_entry2 __user *  entries 
)

Definition at line 512 of file cpuid.c.

515 {
516  struct kvm_cpuid_entry2 *e2 = NULL;
517  int r;
518 
519  if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
520  return -E2BIG;
521 
522  if (cpuid->nent) {
523  e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2));
524  if (IS_ERR(e2))
525  return PTR_ERR(e2);
526  }
527 
528  r = kvm_set_cpuid(vcpu, e2, cpuid->nent);
529  if (r)
530  kvfree(e2);
531 
532  return r;
533 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_is_legal_aligned_gpa()

static bool kvm_vcpu_is_legal_aligned_gpa ( struct kvm_vcpu *  vcpu,
gpa_t  gpa,
gpa_t  alignment 
)
inlinestatic

Definition at line 50 of file cpuid.h.

52 {
53  return IS_ALIGNED(gpa, alignment) && kvm_vcpu_is_legal_gpa(vcpu, gpa);
54 }
static bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
Definition: cpuid.h:45
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_is_legal_cr3()

static bool kvm_vcpu_is_legal_cr3 ( struct kvm_vcpu *  vcpu,
unsigned long  cr3 
)
inlinestatic

Definition at line 287 of file cpuid.h.

288 {
289  if (guest_can_use(vcpu, X86_FEATURE_LAM))
290  cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
291 
292  return kvm_vcpu_is_legal_gpa(vcpu, cr3);
293 }
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:278
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_is_legal_gpa()

static bool kvm_vcpu_is_legal_gpa ( struct kvm_vcpu *  vcpu,
gpa_t  gpa 
)
inlinestatic

Definition at line 45 of file cpuid.h.

46 {
47  return !(gpa & vcpu->arch.reserved_gpa_bits);
48 }
Here is the caller graph for this function:

◆ kvm_vcpu_reserved_gpa_bits_raw()

u64 kvm_vcpu_reserved_gpa_bits_raw ( struct kvm_vcpu *  vcpu)

Definition at line 409 of file cpuid.c.

410 {
411  return rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
412 }
static int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
Definition: cpuid.h:40
static __always_inline u64 rsvd_bits(int s, int e)
Definition: mmu.h:45
Here is the call graph for this function:
Here is the caller graph for this function:

◆ page_address_valid()

static bool page_address_valid ( struct kvm_vcpu *  vcpu,
gpa_t  gpa 
)
inlinestatic

Definition at line 56 of file cpuid.h.

57 {
58  return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
59 }
static bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment)
Definition: cpuid.h:50
Here is the call graph for this function:
Here is the caller graph for this function:

◆ supports_cpuid_fault()

static bool supports_cpuid_fault ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 186 of file cpuid.h.

187 {
188  return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
189 }
Here is the caller graph for this function:

◆ xstate_required_size()

u32 xstate_required_size ( u64  xstate_bv,
bool  compacted 
)

Definition at line 39 of file cpuid.c.

40 {
41  int feature_bit = 0;
42  u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
43 
44  xstate_bv &= XFEATURE_MASK_EXTEND;
45  while (xstate_bv) {
46  if (xstate_bv & 0x1) {
47  u32 eax, ebx, ecx, edx, offset;
48  cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
49  /* ECX[1]: 64B alignment in compacted form */
50  if (compacted)
51  offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
52  else
53  offset = ebx;
54  ret = max(ret, offset + eax);
55  }
56 
57  xstate_bv >>= 1;
58  feature_bit++;
59  }
60 
61  return ret;
62 }
#define feature_bit(name)
Here is the caller graph for this function:

Variable Documentation

◆ __read_mostly

u32 kvm_cpu_caps [NR_KVM_CPU_CAPS] __read_mostly
extern

Definition at line 36 of file cpuid.c.