18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
23 #include <linux/highmem.h>
24 #include <linux/smp.h>
25 #include <linux/hrtimer.h>
27 #include <linux/export.h>
28 #include <linux/math64.h>
29 #include <linux/slab.h>
30 #include <asm/processor.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
50 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
52 #define mod_64(x, y) ((x) % (y))
56 #define APIC_VERSION 0x14UL
57 #define LAPIC_MMIO_LENGTH (1 << 12)
59 #define MAX_APIC_VECTOR 256
60 #define APIC_VECTORS_PER_REG 32
63 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100
64 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000
65 #define LAPIC_TIMER_ADVANCE_NS_INIT 1000
66 #define LAPIC_TIMER_ADVANCE_NS_MAX 5000
68 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
74 *((u32 *) (regs + reg_off)) = val;
84 BUILD_BUG_ON(reg != APIC_ICR);
85 return *((u64 *) (regs + reg));
95 BUILD_BUG_ON(reg != APIC_ICR);
96 *((u64 *) (regs + reg)) = val;
125 return __test_and_clear_bit(
VEC_POS(vec), (bitmap) +
REG_POS(vec));
137 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
140 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
141 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
145 return apic->
vcpu->vcpu_id;
156 return kvm_x86_ops.set_hv_timer
168 return ((
id >> 4) << 16) | (1 << (
id & 0xf));
172 u32 dest_id,
struct kvm_lapic ***cluster, u16 *mask) {
173 switch (map->logical_mode) {
174 case KVM_APIC_MODE_SW_DISABLED:
176 *cluster = map->xapic_flat_map;
179 case KVM_APIC_MODE_X2APIC: {
180 u32 offset = (dest_id >> 16) * 16;
181 u32 max_apic_id = map->max_apic_id;
183 if (offset <= max_apic_id) {
184 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
186 offset = array_index_nospec(offset, map->max_apic_id + 1);
187 *cluster = &map->phys_map[offset];
188 *mask = dest_id & (0xffff >> (16 - cluster_size));
195 case KVM_APIC_MODE_XAPIC_FLAT:
196 *cluster = map->xapic_flat_map;
197 *mask = dest_id & 0xff;
199 case KVM_APIC_MODE_XAPIC_CLUSTER:
200 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
201 *mask = dest_id & 0xf;
203 case KVM_APIC_MODE_MAP_DISABLED:
213 struct kvm_apic_map *map = container_of(rcu,
struct kvm_apic_map, rcu);
219 struct kvm_vcpu *vcpu,
220 bool *xapic_id_mismatch)
232 if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
241 if (x2apic_id > new->max_apic_id)
251 *xapic_id_mismatch =
true;
267 if (
vcpu->kvm->arch.x2apic_format) {
270 new->phys_map[x2apic_id] = apic;
273 new->phys_map[xapic_id] = apic;
281 physical_id = x2apic_id;
283 physical_id = xapic_id;
285 if (new->phys_map[physical_id])
288 new->phys_map[physical_id] = apic;
295 struct kvm_vcpu *
vcpu)
298 enum kvm_apic_logical_mode logical_mode;
303 if (new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
314 logical_mode = KVM_APIC_MODE_X2APIC;
316 ldr = GET_APIC_LOGICAL_ID(ldr);
318 logical_mode = KVM_APIC_MODE_XAPIC_FLAT;
320 logical_mode = KVM_APIC_MODE_XAPIC_CLUSTER;
327 if (new->logical_mode == KVM_APIC_MODE_SW_DISABLED) {
328 new->logical_mode = logical_mode;
329 }
else if (new->logical_mode != logical_mode) {
330 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
348 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
356 if (!is_power_of_2(mask) || cluster[ldr])
357 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
376 struct kvm_apic_map *
new, *old = NULL;
377 struct kvm_vcpu *vcpu;
380 bool xapic_id_mismatch;
384 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) ==
CLEAN)
388 "Dirty APIC map without an in-kernel local APIC");
390 mutex_lock(&kvm->arch.apic_map_lock);
400 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
403 mutex_unlock(&kvm->arch.apic_map_lock);
414 xapic_id_mismatch =
false;
416 kvm_for_each_vcpu(i, vcpu, kvm)
420 new = kvzalloc(
sizeof(
struct kvm_apic_map) +
421 sizeof(
struct kvm_lapic *) * ((u64)max_id + 1),
427 new->max_apic_id = max_id;
428 new->logical_mode = KVM_APIC_MODE_SW_DISABLED;
430 kvm_for_each_vcpu(i, vcpu, kvm) {
455 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
457 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
459 if (!
new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
460 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
462 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
464 if (xapic_id_mismatch)
465 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
467 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
469 old = rcu_dereference_protected(kvm->arch.apic_map,
470 lockdep_is_held(&kvm->arch.apic_map_lock));
471 rcu_assign_pointer(kvm->arch.apic_map,
new);
476 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
478 mutex_unlock(&kvm->arch.apic_map_lock);
488 bool enabled = val & APIC_SPIV_APIC_ENABLED;
499 atomic_set_release(&apic->
vcpu->kvm->arch.apic_map_dirty,
DIRTY);
504 kvm_make_request(KVM_REQ_APF_READY, apic->
vcpu);
512 atomic_set_release(&apic->
vcpu->kvm->arch.apic_map_dirty,
DIRTY);
518 atomic_set_release(&apic->
vcpu->kvm->arch.apic_map_dirty,
DIRTY);
524 atomic_set_release(&apic->
vcpu->kvm->arch.apic_map_dirty,
DIRTY);
531 WARN_ON_ONCE(
id != apic->
vcpu->vcpu_id);
535 atomic_set_release(&apic->
vcpu->kvm->arch.apic_map_dirty,
DIRTY);
560 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
592 v |= APIC_LVR_DIRECTED_EOI;
634 return __fls(*reg) + vec;
648 count += hweight32(*reg);
657 u32 pir_val, irr_val, prev_irr_val;
660 max_updated_irr = -1;
663 for (i = vec = 0; i <= 7; i++, vec += 32) {
664 u32 *p_irr = (u32 *)(
regs + APIC_IRR + i * 0x10);
667 pir_val = READ_ONCE(
pir[i]);
670 pir_val = xchg(&
pir[i], 0);
672 prev_irr_val = irr_val;
674 irr_val = prev_irr_val | pir_val;
675 }
while (prev_irr_val != irr_val &&
676 !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
678 if (prev_irr_val != irr_val)
679 max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
682 *max_irr = __fls(irr_val) + vec;
685 return ((max_updated_irr != -1) &&
686 (max_updated_irr == *max_irr));
718 ASSERT(result == -1 || result >= 16);
728 static_call_cond(kvm_x86_hwapic_irr_update)(apic->
vcpu,
755 static_call_cond(kvm_x86_hwapic_isr_update)(vec);
782 ASSERT(result == -1 || result >= 16);
820 int vector,
int level,
int trig_mode,
829 irq->level, irq->trig_mode,
dest_map);
832 static int __pv_send_ipi(
unsigned long *ipi_bitmap,
struct kvm_apic_map *map,
833 struct kvm_lapic_irq *irq, u32 min)
836 struct kvm_vcpu *vcpu;
838 if (min > map->max_apic_id)
841 for_each_set_bit(i, ipi_bitmap,
842 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
843 if (map->phys_map[min + i]) {
844 vcpu = map->phys_map[min + i]->vcpu;
853 unsigned long ipi_bitmap_high, u32 min,
854 unsigned long icr,
int op_64_bit)
856 struct kvm_apic_map *map;
857 struct kvm_lapic_irq irq = {0};
858 int cluster_size = op_64_bit ? 64 : 32;
864 irq.vector = icr & APIC_VECTOR_MASK;
865 irq.delivery_mode = icr & APIC_MODE_MASK;
866 irq.level = (icr & APIC_INT_ASSERT) != 0;
867 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
870 map = rcu_dereference(kvm->arch.apic_map);
899 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
907 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
917 val &= KVM_PV_EOI_ENABLED;
927 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
935 if (kvm_x86_ops.sync_pir_to_irr)
936 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->
vcpu);
939 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
946 u32 tpr, isrv, ppr, old_ppr;
952 isrv = (isr != -1) ? isr : 0;
954 if ((tpr & 0xf0) >= (isrv & 0xf0))
963 return ppr < old_ppr;
972 kvm_make_request(KVM_REQ_EVENT, apic->
vcpu);
1023 return ((logical_id >> 16) == (mda >> 16))
1024 && (logical_id & mda & 0xffff) != 0;
1026 logical_id = GET_APIC_LOGICAL_ID(logical_id);
1030 return (logical_id & mda) != 0;
1031 case APIC_DFR_CLUSTER:
1032 return ((logical_id >> 4) == (mda >> 4))
1033 && (logical_id & mda & 0xf) != 0;
1058 bool ipi = source != NULL;
1060 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
1068 int shorthand,
unsigned int dest,
int dest_mode)
1074 switch (shorthand) {
1076 if (dest_mode == APIC_DEST_PHYSICAL)
1080 case APIC_DEST_SELF:
1081 return target == source;
1082 case APIC_DEST_ALLINC:
1084 case APIC_DEST_ALLBUT:
1085 return target != source;
1093 const unsigned long *bitmap, u32 bitmap_size)
1098 mod = vector % dest_vcpus;
1100 for (i = 0; i <= mod; i++) {
1101 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
1102 BUG_ON(idx == bitmap_size);
1110 if (!kvm->arch.disabled_lapic_found) {
1111 kvm->arch.disabled_lapic_found =
true;
1112 pr_info(
"Disabled LAPIC found during irq injection\n");
1117 struct kvm_lapic_irq *irq,
struct kvm_apic_map *map)
1119 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
1121 map->logical_mode != KVM_APIC_MODE_X2APIC))
1127 if (irq->dest_id == (x2apic_ipi ?
1143 struct kvm_lapic **src,
struct kvm_lapic_irq *irq,
1144 struct kvm_apic_map *map,
struct kvm_lapic ***dst,
1145 unsigned long *bitmap)
1149 if (irq->shorthand == APIC_DEST_SELF && src) {
1153 }
else if (irq->shorthand)
1159 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
1160 if (irq->dest_id > map->max_apic_id) {
1163 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
1164 *dst = &map->phys_map[dest_id];
1180 for_each_set_bit(i, bitmap, 16) {
1186 (*dst)[lowest]->
vcpu) < 0)
1196 if (!(*dst)[lowest]) {
1203 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1211 struct kvm_apic_map *map;
1212 unsigned long bitmap;
1219 if (irq->shorthand == APIC_DEST_SELF) {
1220 if (KVM_BUG_ON(!src, kvm)) {
1229 map = rcu_dereference(kvm->arch.apic_map);
1234 for_each_set_bit(i, &bitmap, 16) {
1260 struct kvm_vcpu **dest_vcpu)
1262 struct kvm_apic_map *map;
1263 unsigned long bitmap;
1271 map = rcu_dereference(kvm->arch.apic_map);
1274 hweight16(bitmap) == 1) {
1275 unsigned long i = find_first_bit(&bitmap, 16);
1278 *dest_vcpu = dst[i]->
vcpu;
1292 int vector,
int level,
int trig_mode,
1296 struct kvm_vcpu *vcpu = apic->
vcpu;
1298 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1300 switch (delivery_mode) {
1301 case APIC_DM_LOWEST:
1302 vcpu->arch.apic_arb_prio++;
1305 if (unlikely(trig_mode && !level))
1315 __set_bit(vcpu->vcpu_id,
dest_map->map);
1322 apic->
regs + APIC_TMR);
1325 apic->
regs + APIC_TMR);
1328 static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
1334 vcpu->arch.pv.pv_unhalted = 1;
1335 kvm_make_request(KVM_REQ_EVENT, vcpu);
1353 if (!trig_mode || level) {
1357 kvm_make_request(KVM_REQ_EVENT, vcpu);
1362 case APIC_DM_STARTUP:
1368 kvm_make_request(KVM_REQ_EVENT, vcpu);
1372 case APIC_DM_EXTINT:
1381 printk(KERN_ERR
"TODO: unsupported delivery mode %x\n",
1395 unsigned long *vcpu_bitmap)
1399 struct kvm_apic_map *map;
1400 struct kvm_vcpu *vcpu;
1401 unsigned long bitmap, i;
1406 map = rcu_dereference(kvm->arch.apic_map);
1411 for_each_set_bit(i, &bitmap, 16) {
1414 vcpu_idx = dest_vcpu[i]->
vcpu->vcpu_idx;
1415 __set_bit(vcpu_idx, vcpu_bitmap);
1418 kvm_for_each_vcpu(i, vcpu, kvm) {
1426 __set_bit(i, vcpu_bitmap);
1434 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1439 return test_bit(vector, apic->
vcpu->arch.ioapic_handled_vectors);
1452 apic->
vcpu->arch.pending_ioapic_eoi = vector;
1453 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->
vcpu);
1469 trace_kvm_eoi(apic, vector);
1485 kvm_make_request(KVM_REQ_EVENT, apic->
vcpu);
1497 trace_kvm_eoi(apic, vector);
1500 kvm_make_request(KVM_REQ_EVENT, apic->
vcpu);
1506 struct kvm_lapic_irq irq;
1509 WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1511 irq.vector = icr_low & APIC_VECTOR_MASK;
1512 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1514 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1515 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1517 irq.msi_redir_hint =
false;
1519 irq.dest_id = icr_high;
1521 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1523 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1531 ktime_t remaining, now;
1543 if (ktime_to_ns(remaining) < 0)
1552 struct kvm_vcpu *vcpu = apic->
vcpu;
1553 struct kvm_run *run = vcpu->run;
1555 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1557 run->tpr_access.is_write = write;
1562 if (apic->
vcpu->arch.tpr_access_reporting)
1603 #define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1604 #define APIC_REGS_MASK(first, count) \
1605 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1610 u64 valid_reg_mask =
1641 return valid_reg_mask;
1648 unsigned char alignment = offset & 0xf;
1657 if (alignment + len > 4)
1660 if (offset > 0x3f0 ||
1672 memcpy(data, (
char *)&result + alignment, len);
1675 printk(KERN_ERR
"Local APIC read with len = %x, "
1676 "should be 1,2, or 4 instead\n", len);
1689 gpa_t address,
int len,
void *data)
1699 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1702 memset(data, 0xff, len);
1713 u32 tmp1, tmp2, tdcr;
1717 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1732 pr_info_ratelimited(
1733 "vcpu %i: requested %lld ns "
1734 "lapic timer period limited to %lld ns\n",
1735 apic->
vcpu->vcpu_id,
1761 APIC_LVT_TIMER_TSCDEADLINE)) {
1783 int vec = reg & APIC_VECTOR_MASK;
1784 void *bitmap = apic->
regs + APIC_ISR;
1787 bitmap = apic->
regs + APIC_IRR;
1797 u64 timer_advance_ns =
vcpu->arch.apic->lapic_timer.timer_advance_ns;
1806 __delay(min(guest_cycles,
1809 u64 delay_ns = guest_cycles * 1000000ULL;
1810 do_div(delay_ns,
vcpu->arch.virtual_tsc_khz);
1811 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1816 s64 advance_expire_delta)
1828 if (advance_expire_delta < 0) {
1829 ns = -advance_expire_delta * 1000000ULL;
1830 do_div(ns,
vcpu->arch.virtual_tsc_khz);
1834 ns = advance_expire_delta * 1000000ULL;
1835 do_div(ns,
vcpu->arch.virtual_tsc_khz);
1847 u64 guest_tsc, tsc_deadline;
1852 trace_kvm_wait_lapic_expire(
vcpu->vcpu_id, guest_tsc - tsc_deadline);
1854 if (lapic_timer_advance_dynamic) {
1861 if (guest_tsc < tsc_deadline)
1865 if (guest_tsc < tsc_deadline)
1872 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1873 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1894 struct kvm_vcpu *vcpu = apic->
vcpu;
1917 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1918 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1925 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1936 struct kvm_vcpu *vcpu = apic->
vcpu;
1937 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1938 unsigned long flags;
1941 if (unlikely(!tscdeadline || !this_tsc_khz))
1944 local_irq_save(
flags);
1949 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1950 do_div(ns, this_tsc_khz);
1952 if (likely(tscdeadline > guest_tsc) &&
1954 expire = ktime_add_ns(now, ns);
1956 hrtimer_start(&ktimer->
timer, expire, HRTIMER_MODE_ABS_HARD);
1960 local_irq_restore(
flags);
1970 ktime_t now, remaining;
1971 u64 ns_remaining_old, ns_remaining_new;
1979 if (ktime_to_ns(remaining) < 0)
1982 ns_remaining_old = ktime_to_ns(remaining);
1983 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
2011 if (unlikely(count_reg != APIC_TMICT)) {
2014 if (unlikely(deadline <= 0)) {
2021 pr_info_ratelimited(
2022 "vcpu %i: requested lapic timer restore with "
2023 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
2024 "Using initial count to start timer.\n",
2025 apic->
vcpu->vcpu_id,
2044 ktime_t now = ktime_get();
2068 if (ktime_after(ktime_get(),
2080 HRTIMER_MODE_ABS_HARD);
2088 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2093 WARN_ON(preemptible());
2095 static_call(kvm_x86_cancel_hv_timer)(apic->
vcpu);
2102 struct kvm_vcpu *vcpu = apic->
vcpu;
2105 WARN_ON(preemptible());
2112 if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->
tscdeadline, &expired))
2116 hrtimer_cancel(&ktimer->
timer);
2128 if (atomic_read(&ktimer->
pending)) {
2130 }
else if (expired) {
2145 WARN_ON(preemptible());
2155 trace_kvm_hv_timer_state(apic->
vcpu->vcpu_id,
false);
2179 WARN_ON(kvm_vcpu_is_blocking(
vcpu));
2239 atomic_inc(&apic->
vcpu->kvm->arch.vapics_in_nmi_mode);
2241 atomic_dec(&apic->
vcpu->kvm->arch.vapics_in_nmi_mode);
2247 if (reg == APIC_LVTCMCI)
2249 if (reg < APIC_LVTT || reg > APIC_LVTERR)
2251 return array_index_nospec(
2296 mask |= APIC_SPIV_DIRECTED_EOI;
2298 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2315 val &= ~APIC_ICR_BUSY;
2333 case APIC_LVTCMCI: {
2340 val |= APIC_LVT_MASKED;
2348 val |= APIC_LVT_MASKED;
2407 gpa_t address,
int len,
const void *data)
2418 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2429 if (len != 4 || (offset & 0xf))
2472 if (!
vcpu->arch.apic)
2477 if (!(
vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2484 free_page((
unsigned long)apic->
regs);
2527 return (tpr & 0xf0) >> 4;
2532 u64 old_value =
vcpu->arch.apic_base;
2535 vcpu->arch.apic_base = value;
2537 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2544 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2545 if (value & MSR_IA32_APICBASE_ENABLE) {
2549 kvm_make_request(KVM_REQ_APF_READY,
vcpu);
2552 atomic_set_release(&apic->
vcpu->kvm->arch.apic_map_dirty,
DIRTY);
2556 if ((old_value ^ value) & X2APIC_ENABLE) {
2557 if (value & X2APIC_ENABLE)
2559 else if (value & MSR_IA32_APICBASE_ENABLE)
2563 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2564 kvm_make_request(KVM_REQ_APICV_UPDATE,
vcpu);
2565 static_call_cond(kvm_x86_set_virtual_apic_mode)(
vcpu);
2569 MSR_IA32_APICBASE_BASE;
2571 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2573 kvm_set_apicv_inhibit(apic->
vcpu->kvm,
2574 APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2604 mutex_lock(&kvm->slots_lock);
2605 if (kvm->arch.apic_access_memslot_enabled ||
2606 kvm->arch.apic_access_memslot_inhibited)
2610 APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
2616 page =
gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
2617 if (is_error_page(page)) {
2627 kvm->arch.apic_access_memslot_enabled =
true;
2629 mutex_unlock(&kvm->slots_lock);
2636 struct kvm *kvm = vcpu->kvm;
2638 if (!kvm->arch.apic_access_memslot_enabled)
2641 kvm_vcpu_srcu_read_unlock(vcpu);
2643 mutex_lock(&kvm->slots_lock);
2645 if (kvm->arch.apic_access_memslot_enabled) {
2655 kvm->arch.apic_access_memslot_enabled =
false;
2661 kvm->arch.apic_access_memslot_inhibited =
true;
2664 mutex_unlock(&kvm->slots_lock);
2666 kvm_vcpu_srcu_read_lock(vcpu);
2675 static_call_cond(kvm_x86_apicv_pre_state_restore)(
vcpu);
2678 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2680 msr_val |= MSR_IA32_APICBASE_BSP;
2701 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2718 for (i = 0; i < 8; i++) {
2727 vcpu->arch.pv_eoi.msr_val = 0;
2730 static_call_cond(kvm_x86_apicv_post_state_restore)(
vcpu);
2731 static_call_cond(kvm_x86_hwapic_irr_update)(
vcpu, -1);
2732 static_call_cond(kvm_x86_hwapic_isr_update)(-1);
2735 vcpu->arch.apic_arb_prio = 0;
2736 vcpu->arch.apic_attention = 0;
2765 int vector, mode, trig_mode;
2769 vector = reg & APIC_VECTOR_MASK;
2770 mode = reg & APIC_MODE_MASK;
2771 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2774 if (r && lvt_type == APIC_LVTPC &&
2795 static enum hrtimer_restart
apic_timer_fn(
struct hrtimer *data)
2804 hrtimer_add_expires_ns(&ktimer->
timer, ktimer->
period);
2805 return HRTIMER_RESTART;
2807 return HRTIMER_NORESTART;
2816 apic = kzalloc(
sizeof(*apic), GFP_KERNEL_ACCOUNT);
2820 vcpu->arch.apic = apic;
2822 apic->
regs = (
void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2824 printk(KERN_ERR
"malloc apic regs error for vcpu %x\n",
2826 goto nomem_free_apic;
2833 HRTIMER_MODE_ABS_HARD);
2835 if (timer_advance_ns == -1) {
2837 lapic_timer_advance_dynamic =
true;
2840 lapic_timer_advance_dynamic =
false;
2847 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2854 vcpu->arch.apic = NULL;
2878 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2879 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2933 struct kvm_lapic_state *s,
bool set)
2936 u32 *
id = (u32 *)(s->regs + APIC_ID);
2937 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2940 if (
vcpu->kvm->arch.x2apic_format) {
2941 if (*
id !=
vcpu->vcpu_id)
2972 memcpy(s->regs,
vcpu->arch.apic->regs,
sizeof(*s));
2989 static_call_cond(kvm_x86_apicv_pre_state_restore)(
vcpu);
3000 memcpy(
vcpu->arch.apic->regs, s->regs,
sizeof(*s));
3002 atomic_set_release(&apic->
vcpu->kvm->arch.apic_map_dirty,
DIRTY);
3016 static_call_cond(kvm_x86_apicv_post_state_restore)(
vcpu);
3020 kvm_make_request(KVM_REQ_EVENT,
vcpu);
3024 vcpu->arch.apic_arb_prio = 0;
3031 struct hrtimer *timer;
3037 timer = &vcpu->arch.apic->lapic_timer.timer;
3038 if (hrtimer_cancel(timer))
3039 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
3069 trace_kvm_pv_eoi(apic, vector);
3076 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3079 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3118 int max_irr, max_isr;
3123 if (!test_bit(KVM_APIC_CHECK_VAPIC, &
vcpu->arch.apic_attention))
3133 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
3143 &
vcpu->arch.apic->vapic_cache,
3146 __set_bit(KVM_APIC_CHECK_VAPIC, &
vcpu->arch.apic_attention);
3148 __clear_bit(KVM_APIC_CHECK_VAPIC, &
vcpu->arch.apic_attention);
3157 data &= ~APIC_ICR_BUSY;
3169 if (reg == APIC_ICR) {
3189 if (reg == APIC_ICR)
3202 u32 reg = (msr - APIC_BASE_MSR) << 4;
3213 u32 reg = (msr - APIC_BASE_MSR) << 4;
3239 u64 addr = data & ~KVM_MSR_ENABLED;
3240 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3241 unsigned long new_len;
3244 if (!IS_ALIGNED(addr, 4))
3247 if (data & KVM_MSR_ENABLED) {
3248 if (addr == ghc->gpa && len <= ghc->len)
3258 vcpu->arch.pv_eoi.msr_val = data;
3275 return r == -EBUSY ? 0 : r;
3289 WARN_ON_ONCE(
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3297 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3299 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3302 if (
vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3307 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
#define irqchip_in_kernel(k)
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
static bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
static bool kvm_hv_synic_has_vector(struct kvm_vcpu *vcpu, int vector)
static bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
#define IOAPIC_LEVEL_TRIG
static int ioapic_in_kernel(struct kvm *kvm)
static void kvm_iodevice_init(struct kvm_io_device *dev, const struct kvm_io_device_ops *ops)
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, struct dest_map *dest_map)
static int irqchip_split(struct kvm *kvm)
static unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
static bool is_guest_mode(struct kvm_vcpu *vcpu)
struct page * gfn_to_page(struct kvm *kvm, gfn_t gfn)
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
struct kvm_vcpu * kvm_get_running_vcpu(void)
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len)
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
static int apic_lvt_nmi_mode(u32 lvt_val)
static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
static void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
#define LAPIC_TIMER_ADVANCE_NS_MAX
static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, int vector, int level, int trig_mode, struct dest_map *dest_map)
int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t address, int len, void *data)
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
static void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
static bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, u32 dest_id, struct kvm_lapic ***cluster, u16 *mask)
bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
static int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
static void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
static bool lapic_timer_advance_dynamic __read_mostly
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
#define APIC_REG_MASK(reg)
static int __apic_test_and_clear_vector(int vec, void *bitmap)
static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map, struct kvm_lapic_irq *irq, u32 min)
static bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
EXPORT_SYMBOL_GPL(__kvm_apic_update_irr)
static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, struct dest_map *dest_map)
static int __apic_test_and_set_vector(int vec, void *bitmap)
static u8 count_vectors(void *bitmap)
static void __report_tpr_access(struct kvm_lapic *apic, bool write)
void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq, unsigned long *vcpu_bitmap)
static int kvm_recalculate_phys_map(struct kvm_apic_map *new, struct kvm_vcpu *vcpu, bool *xapic_id_mismatch)
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ)
u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
void kvm_free_lapic(struct kvm_vcpu *vcpu)
static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
static void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
static void start_sw_period(struct kvm_lapic *apic)
void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES]
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t address, int len, const void *data)
#define LAPIC_TIMER_ADVANCE_ADJUST_MIN
static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s, bool set)
static struct kvm_lapic * to_lapic(struct kvm_io_device *dev)
#define LAPIC_MMIO_LENGTH
static u32 apic_get_tmcct(struct kvm_lapic *apic)
static void advance_periodic_target_expiration(struct kvm_lapic *apic)
static void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
static void start_apic_timer(struct kvm_lapic *apic)
static const struct kvm_io_device_ops apic_mmio_ops
int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
static int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, struct kvm_lapic *apic)
static int find_highest_vector(void *bitmap)
static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit)
static int apic_search_irr(struct kvm_lapic *apic)
void kvm_recalculate_apic_map(struct kvm *kvm)
static void apic_clear_isr(int vec, struct kvm_lapic *apic)
static void apic_update_ppr(struct kvm_lapic *apic)
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
static int apic_find_highest_isr(struct kvm_lapic *apic)
static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
static int apic_lvtt_period(struct kvm_lapic *apic)
static u32 kvm_apic_calc_x2apic_ldr(u32 id)
static void kvm_recalculate_logical_map(struct kvm_apic_map *new, struct kvm_vcpu *vcpu)
bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_vcpu **dest_vcpu)
static bool lapic_is_periodic(struct kvm_lapic *apic)
static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
static void cancel_apic_timer(struct kvm_lapic *apic)
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
static u32 kvm_x2apic_id(struct kvm_lapic *apic)
int kvm_alloc_apic_access_page(struct kvm *kvm)
static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic, int reg, u64 val)
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id, struct kvm_lapic *source, struct kvm_lapic *target)
static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
static void update_divide_count(struct kvm_lapic *apic)
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
#define LAPIC_TIMER_ADVANCE_ADJUST_STEP
int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
static bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
void kvm_apic_set_version(struct kvm_vcpu *vcpu)
static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
#define LAPIC_TIMER_ADVANCE_NS_INIT
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX
static int apic_enabled(struct kvm_lapic *apic)
static void report_tpr_access(struct kvm_lapic *apic, bool write)
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
static void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
static void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
static u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src, struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
static void start_sw_tscdeadline(struct kvm_lapic *apic)
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
#define APIC_VECTORS_PER_REG
static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, struct kvm_lapic *apic)
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
static void apic_update_lvtt(struct kvm_lapic *apic)
int kvm_vector_to_index(u32 vector, u32 dest_vcpus, const unsigned long *bitmap, u32 bitmap_size)
static void apic_set_isr(int vec, struct kvm_lapic *apic)
void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
static int apic_test_vector(int vec, void *bitmap)
int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
static void apic_clear_irr(int vec, struct kvm_lapic *apic)
void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
static int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int shorthand, unsigned int dest, int dest_mode)
static void kvm_apic_map_free(struct rcu_head *rcu)
static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
static void restart_apic_timer(struct kvm_lapic *apic)
int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
static int apic_lvtt_oneshot(struct kvm_lapic *apic)
static bool start_hv_timer(struct kvm_lapic *apic)
#define APIC_REGS_MASK(first, count)
static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
static void apic_set_spiv(struct kvm_lapic *apic, u32 val)
static int apic_find_highest_irr(struct kvm_lapic *apic)
static int get_lvt_index(u32 reg)
bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
static bool kvm_apic_map_get_dest_lapic(struct kvm *kvm, struct kvm_lapic **src, struct kvm_lapic_irq *irq, struct kvm_apic_map *map, struct kvm_lapic ***dst, unsigned long *bitmap)
void kvm_lapic_exit(void)
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
static int apic_set_eoi(struct kvm_lapic *apic)
static void start_sw_timer(struct kvm_lapic *apic)
static void cancel_hv_timer(struct kvm_lapic *apic)
static void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu, s64 advance_expire_delta)
int apic_has_pending_timer(struct kvm_vcpu *vcpu)
static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len, void *data)
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
static int apic_x2apic_mode(struct kvm_lapic *apic)
#define APIC_BUS_CYCLE_NS
static bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
static bool kvm_apic_hw_enabled(struct kvm_lapic *apic)
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
static bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
struct static_key_false_deferred apic_sw_disabled
static bool kvm_apic_sw_enabled(struct kvm_lapic *apic)
static u8 kvm_xapic_id(struct kvm_lapic *apic)
static bool kvm_apic_init_sipi_allowed(struct kvm_vcpu *vcpu)
struct static_key_false_deferred apic_hw_disabled
static u32 kvm_lapic_get_reg(struct kvm_lapic *apic, int reg_off)
static u32 __kvm_lapic_get_reg(char *regs, int reg_off)
@ LVT_PERFORMANCE_COUNTER
@ KVM_APIC_MAX_NR_LVT_ENTRIES
#define APIC_DEST_NOSHORT
static void kvm_lapic_clear_vector(int vec, void *bitmap)
static bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
static bool kvm_apic_present(struct kvm_vcpu *vcpu)
static void kvm_lapic_set_vector(int vec, void *bitmap)
static int kvm_inject_smi(struct kvm_vcpu *vcpu)
u8 vectors[KVM_MAX_VCPU_IDS]
u64 default_tsc_scaling_ratio
int(* read)(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr, int len, void *val)
struct kvm_timer lapic_timer
unsigned long base_address
unsigned long pending_events
ktime_t target_expiration
#define trace_kvm_apic_write(reg, val)
#define trace_kvm_apic_read(reg, val)
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
void kvm_make_scan_ioapic_request(struct kvm *kvm)
bool kvm_vector_hashing_enabled(void)
int kvm_check_nested_events(struct kvm_vcpu *vcpu)
unsigned int min_timer_period_us
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
int __read_mostly pi_inject_timer
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
static bool kvm_mwait_in_guest(struct kvm *kvm)
static u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
static bool kvm_hlt_in_guest(struct kvm *kvm)
static bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
static void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)