KVM
Classes | Macros | Enumerations | Functions | Variables
lapic.h File Reference
#include <kvm/iodev.h>
#include <linux/kvm_host.h>
#include "hyperv.h"
#include "smm.h"
Include dependency graph for lapic.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  kvm_timer
 
struct  kvm_lapic
 

Macros

#define KVM_APIC_INIT   0
 
#define KVM_APIC_SIPI   1
 
#define APIC_SHORT_MASK   0xc0000
 
#define APIC_DEST_NOSHORT   0x0
 
#define APIC_DEST_MASK   0x800
 
#define APIC_BUS_CYCLE_NS   1
 
#define APIC_BUS_FREQUENCY   (1000000000ULL / APIC_BUS_CYCLE_NS)
 
#define APIC_BROADCAST   0xFF
 
#define X2APIC_BROADCAST   0xFFFFFFFFul
 
#define APIC_LVTx(x)   ((x) == LVT_CMCI ? APIC_LVTCMCI : APIC_LVTT + 0x10 * (x))
 
#define VEC_POS(v)   ((v) & (32 - 1))
 
#define REG_POS(v)   (((v) >> 5) << 4)
 

Enumerations

enum  lapic_mode { LAPIC_MODE_DISABLED = 0 , LAPIC_MODE_INVALID = X2APIC_ENABLE , LAPIC_MODE_XAPIC = MSR_IA32_APICBASE_ENABLE , LAPIC_MODE_X2APIC = MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE }
 
enum  lapic_lvt_entry {
  LVT_TIMER , LVT_THERMAL_MONITOR , LVT_PERFORMANCE_COUNTER , LVT_LINT0 ,
  LVT_LINT1 , LVT_ERROR , LVT_CMCI , KVM_APIC_MAX_NR_LVT_ENTRIES
}
 

Functions

int kvm_create_lapic (struct kvm_vcpu *vcpu, int timer_advance_ns)
 
void kvm_free_lapic (struct kvm_vcpu *vcpu)
 
int kvm_apic_has_interrupt (struct kvm_vcpu *vcpu)
 
int kvm_apic_accept_pic_intr (struct kvm_vcpu *vcpu)
 
int kvm_get_apic_interrupt (struct kvm_vcpu *vcpu)
 
int kvm_apic_accept_events (struct kvm_vcpu *vcpu)
 
void kvm_lapic_reset (struct kvm_vcpu *vcpu, bool init_event)
 
u64 kvm_lapic_get_cr8 (struct kvm_vcpu *vcpu)
 
void kvm_lapic_set_tpr (struct kvm_vcpu *vcpu, unsigned long cr8)
 
void kvm_lapic_set_eoi (struct kvm_vcpu *vcpu)
 
void kvm_lapic_set_base (struct kvm_vcpu *vcpu, u64 value)
 
u64 kvm_lapic_get_base (struct kvm_vcpu *vcpu)
 
void kvm_recalculate_apic_map (struct kvm *kvm)
 
void kvm_apic_set_version (struct kvm_vcpu *vcpu)
 
void kvm_apic_after_set_mcg_cap (struct kvm_vcpu *vcpu)
 
bool kvm_apic_match_dest (struct kvm_vcpu *vcpu, struct kvm_lapic *source, int shorthand, unsigned int dest, int dest_mode)
 
int kvm_apic_compare_prio (struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
 
void kvm_apic_clear_irr (struct kvm_vcpu *vcpu, int vec)
 
bool __kvm_apic_update_irr (u32 *pir, void *regs, int *max_irr)
 
bool kvm_apic_update_irr (struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
 
void kvm_apic_update_ppr (struct kvm_vcpu *vcpu)
 
int kvm_apic_set_irq (struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, struct dest_map *dest_map)
 
int kvm_apic_local_deliver (struct kvm_lapic *apic, int lvt_type)
 
void kvm_apic_update_apicv (struct kvm_vcpu *vcpu)
 
int kvm_alloc_apic_access_page (struct kvm *kvm)
 
void kvm_inhibit_apic_access_page (struct kvm_vcpu *vcpu)
 
bool kvm_irq_delivery_to_apic_fast (struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
 
void kvm_apic_send_ipi (struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
 
u64 kvm_get_apic_base (struct kvm_vcpu *vcpu)
 
int kvm_set_apic_base (struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
int kvm_apic_get_state (struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
 
int kvm_apic_set_state (struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
 
enum lapic_mode kvm_get_apic_mode (struct kvm_vcpu *vcpu)
 
int kvm_lapic_find_highest_irr (struct kvm_vcpu *vcpu)
 
u64 kvm_get_lapic_tscdeadline_msr (struct kvm_vcpu *vcpu)
 
void kvm_set_lapic_tscdeadline_msr (struct kvm_vcpu *vcpu, u64 data)
 
void kvm_apic_write_nodecode (struct kvm_vcpu *vcpu, u32 offset)
 
void kvm_apic_set_eoi_accelerated (struct kvm_vcpu *vcpu, int vector)
 
int kvm_lapic_set_vapic_addr (struct kvm_vcpu *vcpu, gpa_t vapic_addr)
 
void kvm_lapic_sync_from_vapic (struct kvm_vcpu *vcpu)
 
void kvm_lapic_sync_to_vapic (struct kvm_vcpu *vcpu)
 
int kvm_x2apic_icr_write (struct kvm_lapic *apic, u64 data)
 
int kvm_x2apic_msr_write (struct kvm_vcpu *vcpu, u32 msr, u64 data)
 
int kvm_x2apic_msr_read (struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 
int kvm_hv_vapic_msr_write (struct kvm_vcpu *vcpu, u32 msr, u64 data)
 
int kvm_hv_vapic_msr_read (struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 
int kvm_lapic_set_pv_eoi (struct kvm_vcpu *vcpu, u64 data, unsigned long len)
 
void kvm_lapic_exit (void)
 
u64 kvm_lapic_readable_reg_mask (struct kvm_lapic *apic)
 
static void kvm_lapic_clear_vector (int vec, void *bitmap)
 
static void kvm_lapic_set_vector (int vec, void *bitmap)
 
static void kvm_lapic_set_irr (int vec, struct kvm_lapic *apic)
 
static u32 __kvm_lapic_get_reg (char *regs, int reg_off)
 
static u32 kvm_lapic_get_reg (struct kvm_lapic *apic, int reg_off)
 
 DECLARE_STATIC_KEY_FALSE (kvm_has_noapic_vcpu)
 
static bool lapic_in_kernel (struct kvm_vcpu *vcpu)
 
static bool kvm_apic_hw_enabled (struct kvm_lapic *apic)
 
static bool kvm_apic_sw_enabled (struct kvm_lapic *apic)
 
static bool kvm_apic_present (struct kvm_vcpu *vcpu)
 
static int kvm_lapic_enabled (struct kvm_vcpu *vcpu)
 
static int apic_x2apic_mode (struct kvm_lapic *apic)
 
static bool kvm_vcpu_apicv_active (struct kvm_vcpu *vcpu)
 
static bool kvm_apic_has_pending_init_or_sipi (struct kvm_vcpu *vcpu)
 
static bool kvm_apic_init_sipi_allowed (struct kvm_vcpu *vcpu)
 
static bool kvm_lowest_prio_delivery (struct kvm_lapic_irq *irq)
 
static int kvm_lapic_latched_init (struct kvm_vcpu *vcpu)
 
bool kvm_apic_pending_eoi (struct kvm_vcpu *vcpu, int vector)
 
void kvm_wait_lapic_expire (struct kvm_vcpu *vcpu)
 
void kvm_bitmap_or_dest_vcpus (struct kvm *kvm, struct kvm_lapic_irq *irq, unsigned long *vcpu_bitmap)
 
bool kvm_intr_is_single_vcpu_fast (struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_vcpu **dest_vcpu)
 
int kvm_vector_to_index (u32 vector, u32 dest_vcpus, const unsigned long *bitmap, u32 bitmap_size)
 
void kvm_lapic_switch_to_sw_timer (struct kvm_vcpu *vcpu)
 
void kvm_lapic_switch_to_hv_timer (struct kvm_vcpu *vcpu)
 
void kvm_lapic_expired_hv_timer (struct kvm_vcpu *vcpu)
 
bool kvm_lapic_hv_timer_in_use (struct kvm_vcpu *vcpu)
 
void kvm_lapic_restart_hv_timer (struct kvm_vcpu *vcpu)
 
bool kvm_can_use_hv_timer (struct kvm_vcpu *vcpu)
 
static enum lapic_mode kvm_apic_mode (u64 apic_base)
 
static u8 kvm_xapic_id (struct kvm_lapic *apic)
 

Variables

struct static_key_false_deferred apic_hw_disabled
 
struct static_key_false_deferred apic_sw_disabled
 

Macro Definition Documentation

◆ APIC_BROADCAST

#define APIC_BROADCAST   0xFF

Definition at line 22 of file lapic.h.

◆ APIC_BUS_CYCLE_NS

#define APIC_BUS_CYCLE_NS   1

Definition at line 19 of file lapic.h.

◆ APIC_BUS_FREQUENCY

#define APIC_BUS_FREQUENCY   (1000000000ULL / APIC_BUS_CYCLE_NS)

Definition at line 20 of file lapic.h.

◆ APIC_DEST_MASK

#define APIC_DEST_MASK   0x800

Definition at line 17 of file lapic.h.

◆ APIC_DEST_NOSHORT

#define APIC_DEST_NOSHORT   0x0

Definition at line 16 of file lapic.h.

◆ APIC_LVTx

#define APIC_LVTx (   x)    ((x) == LVT_CMCI ? APIC_LVTCMCI : APIC_LVTT + 0x10 * (x))

Definition at line 44 of file lapic.h.

◆ APIC_SHORT_MASK

#define APIC_SHORT_MASK   0xc0000

Definition at line 15 of file lapic.h.

◆ KVM_APIC_INIT

#define KVM_APIC_INIT   0

Definition at line 12 of file lapic.h.

◆ KVM_APIC_SIPI

#define KVM_APIC_SIPI   1

Definition at line 13 of file lapic.h.

◆ REG_POS

#define REG_POS (   v)    (((v) >> 5) << 4)

Definition at line 152 of file lapic.h.

◆ VEC_POS

#define VEC_POS (   v)    ((v) & (32 - 1))

Definition at line 151 of file lapic.h.

◆ X2APIC_BROADCAST

#define X2APIC_BROADCAST   0xFFFFFFFFul

Definition at line 23 of file lapic.h.

Enumeration Type Documentation

◆ lapic_lvt_entry

Enumerator
LVT_TIMER 
LVT_THERMAL_MONITOR 
LVT_PERFORMANCE_COUNTER 
LVT_LINT0 
LVT_LINT1 
LVT_ERROR 
LVT_CMCI 
KVM_APIC_MAX_NR_LVT_ENTRIES 

Definition at line 32 of file lapic.h.

32  {
33  LVT_TIMER,
36  LVT_LINT0,
37  LVT_LINT1,
38  LVT_ERROR,
39  LVT_CMCI,
40 
42 };
@ LVT_LINT0
Definition: lapic.h:36
@ LVT_PERFORMANCE_COUNTER
Definition: lapic.h:35
@ LVT_ERROR
Definition: lapic.h:38
@ LVT_CMCI
Definition: lapic.h:39
@ KVM_APIC_MAX_NR_LVT_ENTRIES
Definition: lapic.h:41
@ LVT_TIMER
Definition: lapic.h:33
@ LVT_LINT1
Definition: lapic.h:37
@ LVT_THERMAL_MONITOR
Definition: lapic.h:34

◆ lapic_mode

enum lapic_mode
Enumerator
LAPIC_MODE_DISABLED 
LAPIC_MODE_INVALID 
LAPIC_MODE_XAPIC 
LAPIC_MODE_X2APIC 

Definition at line 25 of file lapic.h.

25  {
27  LAPIC_MODE_INVALID = X2APIC_ENABLE,
28  LAPIC_MODE_XAPIC = MSR_IA32_APICBASE_ENABLE,
29  LAPIC_MODE_X2APIC = MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE,
30 };
@ LAPIC_MODE_X2APIC
Definition: lapic.h:29
@ LAPIC_MODE_DISABLED
Definition: lapic.h:26
@ LAPIC_MODE_XAPIC
Definition: lapic.h:28
@ LAPIC_MODE_INVALID
Definition: lapic.h:27

Function Documentation

◆ __kvm_apic_update_irr()

bool __kvm_apic_update_irr ( u32 *  pir,
void *  regs,
int *  max_irr 
)

Definition at line 654 of file lapic.c.

655 {
656  u32 i, vec;
657  u32 pir_val, irr_val, prev_irr_val;
658  int max_updated_irr;
659 
660  max_updated_irr = -1;
661  *max_irr = -1;
662 
663  for (i = vec = 0; i <= 7; i++, vec += 32) {
664  u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
665 
666  irr_val = *p_irr;
667  pir_val = READ_ONCE(pir[i]);
668 
669  if (pir_val) {
670  pir_val = xchg(&pir[i], 0);
671 
672  prev_irr_val = irr_val;
673  do {
674  irr_val = prev_irr_val | pir_val;
675  } while (prev_irr_val != irr_val &&
676  !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
677 
678  if (prev_irr_val != irr_val)
679  max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
680  }
681  if (irr_val)
682  *max_irr = __fls(irr_val) + vec;
683  }
684 
685  return ((max_updated_irr != -1) &&
686  (max_updated_irr == *max_irr));
687 }
u32 pir[8]
Definition: posted_intr.h:0
Here is the caller graph for this function:

◆ __kvm_lapic_get_reg()

static u32 __kvm_lapic_get_reg ( char *  regs,
int  reg_off 
)
inlinestatic

Definition at line 174 of file lapic.h.

175 {
176  return *((u32 *) (regs + reg_off));
177 }
Here is the caller graph for this function:

◆ apic_x2apic_mode()

static int apic_x2apic_mode ( struct kvm_lapic apic)
inlinestatic

Definition at line 221 of file lapic.h.

222 {
223  return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
224 }
struct kvm_vcpu * vcpu
Definition: lapic.h:64
Here is the caller graph for this function:

◆ DECLARE_STATIC_KEY_FALSE()

DECLARE_STATIC_KEY_FALSE ( kvm_has_noapic_vcpu  )

◆ kvm_alloc_apic_access_page()

int kvm_alloc_apic_access_page ( struct kvm *  kvm)

Definition at line 2598 of file lapic.c.

2599 {
2600  struct page *page;
2601  void __user *hva;
2602  int ret = 0;
2603 
2604  mutex_lock(&kvm->slots_lock);
2605  if (kvm->arch.apic_access_memslot_enabled ||
2606  kvm->arch.apic_access_memslot_inhibited)
2607  goto out;
2608 
2609  hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2610  APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
2611  if (IS_ERR(hva)) {
2612  ret = PTR_ERR(hva);
2613  goto out;
2614  }
2615 
2616  page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
2617  if (is_error_page(page)) {
2618  ret = -EFAULT;
2619  goto out;
2620  }
2621 
2622  /*
2623  * Do not pin the page in memory, so that memory hot-unplug
2624  * is able to migrate it.
2625  */
2626  put_page(page);
2627  kvm->arch.apic_access_memslot_enabled = true;
2628 out:
2629  mutex_unlock(&kvm->slots_lock);
2630  return ret;
2631 }
struct page * gfn_to_page(struct kvm *kvm, gfn_t gfn)
Definition: kvm_main.c:3126
void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
Definition: x86.c:12637
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_accept_events()

int kvm_apic_accept_events ( struct kvm_vcpu *  vcpu)

Definition at line 3263 of file lapic.c.

3264 {
3265  struct kvm_lapic *apic = vcpu->arch.apic;
3266  u8 sipi_vector;
3267  int r;
3268 
3270  return 0;
3271 
3272  if (is_guest_mode(vcpu)) {
3274  if (r < 0)
3275  return r == -EBUSY ? 0 : r;
3276  /*
3277  * Continue processing INIT/SIPI even if a nested VM-Exit
3278  * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3279  * are blocked as a result of transitioning to VMX root mode.
3280  */
3281  }
3282 
3283  /*
3284  * INITs are blocked while CPU is in specific states (SMM, VMX root
3285  * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3286  * wait-for-SIPI (WFS).
3287  */
3289  WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3290  clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3291  return 0;
3292  }
3293 
3294  if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3295  kvm_vcpu_reset(vcpu, true);
3296  if (kvm_vcpu_is_bsp(apic->vcpu))
3297  vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3298  else
3299  vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3300  }
3301  if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3302  if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3303  /* evaluate pending_events before reading the vector */
3304  smp_rmb();
3305  sipi_vector = apic->sipi_vector;
3306  static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector);
3307  vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3308  }
3309  }
3310  return 0;
3311 }
static bool is_guest_mode(struct kvm_vcpu *vcpu)
static bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
Definition: lapic.h:231
#define KVM_APIC_INIT
Definition: lapic.h:12
static bool kvm_apic_init_sipi_allowed(struct kvm_vcpu *vcpu)
Definition: lapic.h:236
#define KVM_APIC_SIPI
Definition: lapic.h:13
unsigned int sipi_vector
Definition: lapic.h:82
unsigned long pending_events
Definition: lapic.h:81
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Definition: x86.c:12224
int kvm_check_nested_events(struct kvm_vcpu *vcpu)
Definition: x86.c:10197
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
Definition: x86.c:12493
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_accept_pic_intr()

int kvm_apic_accept_pic_intr ( struct kvm_vcpu *  vcpu)

Definition at line 2872 of file lapic.c.

2873 {
2874  u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2875 
2876  if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2877  return 1;
2878  if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2879  GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2880  return 1;
2881  return 0;
2882 }
static bool kvm_apic_hw_enabled(struct kvm_lapic *apic)
Definition: lapic.h:195
static u32 kvm_lapic_get_reg(struct kvm_lapic *apic, int reg_off)
Definition: lapic.h:179
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_after_set_mcg_cap()

void kvm_apic_after_set_mcg_cap ( struct kvm_vcpu *  vcpu)

Definition at line 596 of file lapic.c.

597 {
598  int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
599  struct kvm_lapic *apic = vcpu->arch.apic;
600  int i;
601 
603  return;
604 
605  /* Initialize/mask any "new" LVT entries. */
606  for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
607  kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
608 
610 
611  /* The number of LVT entries is reflected in the version register. */
613 }
static int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
Definition: lapic.c:568
void kvm_apic_set_version(struct kvm_vcpu *vcpu)
Definition: lapic.c:573
static void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
Definition: lapic.c:77
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
Definition: lapic.h:186
#define APIC_LVTx(x)
Definition: lapic.h:44
int nr_lvt_entries
Definition: lapic.h:83
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_clear_irr()

void kvm_apic_clear_irr ( struct kvm_vcpu *  vcpu,
int  vec 
)

Definition at line 738 of file lapic.c.

739 {
740  apic_clear_irr(vec, vcpu->arch.apic);
741 }
static void apic_clear_irr(int vec, struct kvm_lapic *apic)
Definition: lapic.c:723
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_compare_prio()

int kvm_apic_compare_prio ( struct kvm_vcpu *  vcpu1,
struct kvm_vcpu *  vcpu2 
)

Definition at line 1432 of file lapic.c.

1433 {
1434  return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1435 }
Here is the caller graph for this function:

◆ kvm_apic_get_state()

int kvm_apic_get_state ( struct kvm_vcpu *  vcpu,
struct kvm_lapic_state *  s 
)

Definition at line 2970 of file lapic.c.

2971 {
2972  memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2973 
2974  /*
2975  * Get calculated timer current count for remaining timer period (if
2976  * any) and store it in the returned register set.
2977  */
2978  __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2979  __apic_read(vcpu->arch.apic, APIC_TMCCT));
2980 
2981  return kvm_apic_state_fixup(vcpu, s, false);
2982 }
static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s, bool set)
Definition: lapic.c:2932
static void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
Definition: lapic.c:72
static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
Definition: lapic.c:1566
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_has_interrupt()

int kvm_apic_has_interrupt ( struct kvm_vcpu *  vcpu)

Definition at line 2859 of file lapic.c.

2860 {
2861  struct kvm_lapic *apic = vcpu->arch.apic;
2862  u32 ppr;
2863 
2864  if (!kvm_apic_present(vcpu))
2865  return -1;
2866 
2867  __apic_update_ppr(apic, &ppr);
2868  return apic_has_interrupt_for_ppr(apic, ppr);
2869 }
static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
Definition: lapic.c:944
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
Definition: lapic.c:932
static bool kvm_apic_present(struct kvm_vcpu *vcpu)
Definition: lapic.h:211
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_has_pending_init_or_sipi()

static bool kvm_apic_has_pending_init_or_sipi ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 231 of file lapic.h.

232 {
233  return lapic_in_kernel(vcpu) && vcpu->arch.apic->pending_events;
234 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_hw_enabled()

static bool kvm_apic_hw_enabled ( struct kvm_lapic apic)
inlinestatic

Definition at line 195 of file lapic.h.

196 {
197  if (static_branch_unlikely(&apic_hw_disabled.key))
198  return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
199  return true;
200 }
struct static_key_false_deferred apic_hw_disabled
Here is the caller graph for this function:

◆ kvm_apic_init_sipi_allowed()

static bool kvm_apic_init_sipi_allowed ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 236 of file lapic.h.

237 {
238  return !is_smm(vcpu) &&
239  !static_call(kvm_x86_apic_init_signal_blocked)(vcpu);
240 }
static bool is_smm(struct kvm_vcpu *vcpu)
Definition: smm.h:160
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_local_deliver()

int kvm_apic_local_deliver ( struct kvm_lapic apic,
int  lvt_type 
)

Definition at line 2762 of file lapic.c.

2763 {
2764  u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2765  int vector, mode, trig_mode;
2766  int r;
2767 
2768  if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2769  vector = reg & APIC_VECTOR_MASK;
2770  mode = reg & APIC_MODE_MASK;
2771  trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2772 
2773  r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2774  if (r && lvt_type == APIC_LVTPC &&
2776  kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2777  return r;
2778  }
2779  return 0;
2780 }
static bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
Definition: cpuid.h:128
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, int vector, int level, int trig_mode, struct dest_map *dest_map)
Definition: lapic.c:1291
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_match_dest()

bool kvm_apic_match_dest ( struct kvm_vcpu *  vcpu,
struct kvm_lapic source,
int  shorthand,
unsigned int  dest,
int  dest_mode 
)

Definition at line 1067 of file lapic.c.

1069 {
1070  struct kvm_lapic *target = vcpu->arch.apic;
1071  u32 mda = kvm_apic_mda(vcpu, dest, source, target);
1072 
1073  ASSERT(target);
1074  switch (shorthand) {
1075  case APIC_DEST_NOSHORT:
1076  if (dest_mode == APIC_DEST_PHYSICAL)
1077  return kvm_apic_match_physical_addr(target, mda);
1078  else
1079  return kvm_apic_match_logical_addr(target, mda);
1080  case APIC_DEST_SELF:
1081  return target == source;
1082  case APIC_DEST_ALLINC:
1083  return true;
1084  case APIC_DEST_ALLBUT:
1085  return target != source;
1086  default:
1087  return false;
1088  }
1089 }
#define ASSERT(x)
Definition: ioapic.h:101
static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
Definition: lapic.c:1013
static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id, struct kvm_lapic *source, struct kvm_lapic *target)
Definition: lapic.c:1055
static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
Definition: lapic.c:993
#define APIC_DEST_NOSHORT
Definition: lapic.h:16
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_mode()

static enum lapic_mode kvm_apic_mode ( u64  apic_base)
inlinestatic

Definition at line 269 of file lapic.h.

272 {
273  return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
274 }
Here is the caller graph for this function:

◆ kvm_apic_pending_eoi()

bool kvm_apic_pending_eoi ( struct kvm_vcpu *  vcpu,
int  vector 
)

Definition at line 110 of file lapic.c.

111 {
112  struct kvm_lapic *apic = vcpu->arch.apic;
113 
114  return apic_test_vector(vector, apic->regs + APIC_ISR) ||
115  apic_test_vector(vector, apic->regs + APIC_IRR);
116 }
static int apic_test_vector(int vec, void *bitmap)
Definition: lapic.c:105
void * regs
Definition: lapic.h:78
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_present()

static bool kvm_apic_present ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 211 of file lapic.h.

212 {
213  return lapic_in_kernel(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic);
214 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_send_ipi()

void kvm_apic_send_ipi ( struct kvm_lapic apic,
u32  icr_low,
u32  icr_high 
)

Definition at line 1504 of file lapic.c.

1505 {
1506  struct kvm_lapic_irq irq;
1507 
1508  /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1509  WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1510 
1511  irq.vector = icr_low & APIC_VECTOR_MASK;
1512  irq.delivery_mode = icr_low & APIC_MODE_MASK;
1513  irq.dest_mode = icr_low & APIC_DEST_MASK;
1514  irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1515  irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1516  irq.shorthand = icr_low & APIC_SHORT_MASK;
1517  irq.msi_redir_hint = false;
1518  if (apic_x2apic_mode(apic))
1519  irq.dest_id = icr_high;
1520  else
1521  irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1522 
1523  trace_kvm_apic_ipi(icr_low, irq.dest_id);
1524 
1525  kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1526 }
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, struct dest_map *dest_map)
Definition: irq_comm.c:47
static int apic_x2apic_mode(struct kvm_lapic *apic)
Definition: lapic.h:221
#define APIC_DEST_MASK
Definition: lapic.h:17
#define APIC_SHORT_MASK
Definition: lapic.h:15
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_set_eoi_accelerated()

void kvm_apic_set_eoi_accelerated ( struct kvm_vcpu *  vcpu,
int  vector 
)

Definition at line 1493 of file lapic.c.

1494 {
1495  struct kvm_lapic *apic = vcpu->arch.apic;
1496 
1497  trace_kvm_eoi(apic, vector);
1498 
1499  kvm_ioapic_send_eoi(apic, vector);
1500  kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1501 }
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
Definition: lapic.c:1442
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_set_irq()

int kvm_apic_set_irq ( struct kvm_vcpu *  vcpu,
struct kvm_lapic_irq *  irq,
struct dest_map dest_map 
)

Definition at line 823 of file lapic.c.

825 {
826  struct kvm_lapic *apic = vcpu->arch.apic;
827 
828  return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
829  irq->level, irq->trig_mode, dest_map);
830 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_set_state()

int kvm_apic_set_state ( struct kvm_vcpu *  vcpu,
struct kvm_lapic_state *  s 
)

Definition at line 2984 of file lapic.c.

2985 {
2986  struct kvm_lapic *apic = vcpu->arch.apic;
2987  int r;
2988 
2989  static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
2990 
2991  kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2992  /* set SPIV separately to get count of SW disabled APICs right */
2993  apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2994 
2995  r = kvm_apic_state_fixup(vcpu, s, true);
2996  if (r) {
2998  return r;
2999  }
3000  memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3001 
3002  atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3005 
3006  apic_update_ppr(apic);
3007  cancel_apic_timer(apic);
3008  apic->lapic_timer.expired_tscdeadline = 0;
3009  apic_update_lvtt(apic);
3010  apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3011  update_divide_count(apic);
3012  __start_apic_timer(apic, APIC_TMCCT);
3013  kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3015  if (apic->apicv_active) {
3016  static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
3017  static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3018  static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
3019  }
3020  kvm_make_request(KVM_REQ_EVENT, vcpu);
3021  if (ioapic_in_kernel(vcpu->kvm))
3023 
3024  vcpu->arch.apic_arb_prio = 0;
3025 
3026  return 0;
3027 }
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
Definition: ioapic.c:139
static int ioapic_in_kernel(struct kvm *kvm)
Definition: ioapic.h:104
static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
Definition: lapic.c:2216
@ DIRTY
Definition: lapic.c:371
void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
Definition: lapic.c:2578
void kvm_recalculate_apic_map(struct kvm *kvm)
Definition: lapic.c:374
static void apic_update_ppr(struct kvm_lapic *apic)
Definition: lapic.c:966
static int apic_find_highest_isr(struct kvm_lapic *apic)
Definition: lapic.c:768
static void cancel_apic_timer(struct kvm_lapic *apic)
Definition: lapic.c:1744
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
Definition: lapic.c:2530
static void update_divide_count(struct kvm_lapic *apic)
Definition: lapic.c:1711
static void apic_update_lvtt(struct kvm_lapic *apic)
Definition: lapic.c:1754
static void apic_set_spiv(struct kvm_lapic *apic, u32 val)
Definition: lapic.c:486
static int apic_find_highest_irr(struct kvm_lapic *apic)
Definition: lapic.c:706
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
Definition: lapic.c:2232
struct kvm_timer lapic_timer
Definition: lapic.h:62
bool apicv_active
Definition: lapic.h:65
u64 expired_tscdeadline
Definition: lapic.h:53
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_set_version()

void kvm_apic_set_version ( struct kvm_vcpu *  vcpu)

Definition at line 573 of file lapic.c.

574 {
575  struct kvm_lapic *apic = vcpu->arch.apic;
576  u32 v = 0;
577 
578  if (!lapic_in_kernel(vcpu))
579  return;
580 
581  v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
582 
583  /*
584  * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
585  * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
586  * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
587  * version first and level-triggered interrupts never get EOIed in
588  * IOAPIC.
589  */
590  if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
591  !ioapic_in_kernel(vcpu->kvm))
592  v |= APIC_LVR_DIRECTED_EOI;
593  kvm_lapic_set_reg(apic, APIC_LVR, v);
594 }
static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned int x86_feature)
Definition: cpuid.h:83
#define APIC_VERSION
Definition: lapic.c:56
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_sw_enabled()

static bool kvm_apic_sw_enabled ( struct kvm_lapic apic)
inlinestatic

Definition at line 204 of file lapic.h.

205 {
206  if (static_branch_unlikely(&apic_sw_disabled.key))
207  return apic->sw_enabled;
208  return true;
209 }
struct static_key_false_deferred apic_sw_disabled
bool sw_enabled
Definition: lapic.h:66
Here is the caller graph for this function:

◆ kvm_apic_update_apicv()

void kvm_apic_update_apicv ( struct kvm_vcpu *  vcpu)

Definition at line 2578 of file lapic.c.

2579 {
2580  struct kvm_lapic *apic = vcpu->arch.apic;
2581 
2582  if (apic->apicv_active) {
2583  /* irr_pending is always true when apicv is activated. */
2584  apic->irr_pending = true;
2585  apic->isr_count = 1;
2586  } else {
2587  /*
2588  * Don't clear irr_pending, searching the IRR can race with
2589  * updates from the CPU as APICv is still active from hardware's
2590  * perspective. The flag will be cleared as appropriate when
2591  * KVM injects the interrupt.
2592  */
2593  apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2594  }
2595  apic->highest_isr_cache = -1;
2596 }
static u8 count_vectors(void *bitmap)
Definition: lapic.c:640
int highest_isr_cache
Definition: lapic.h:72
s16 isr_count
Definition: lapic.h:70
bool irr_pending
Definition: lapic.h:67
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_update_irr()

bool kvm_apic_update_irr ( struct kvm_vcpu *  vcpu,
u32 *  pir,
int *  max_irr 
)

Definition at line 690 of file lapic.c.

691 {
692  struct kvm_lapic *apic = vcpu->arch.apic;
693  bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
694 
695  if (unlikely(!apic->apicv_active && irr_updated))
696  apic->irr_pending = true;
697  return irr_updated;
698 }
bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
Definition: lapic.c:654
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_update_ppr()

void kvm_apic_update_ppr ( struct kvm_vcpu *  vcpu)

Definition at line 975 of file lapic.c.

976 {
977  apic_update_ppr(vcpu->arch.apic);
978 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_apic_write_nodecode()

void kvm_apic_write_nodecode ( struct kvm_vcpu *  vcpu,
u32  offset 
)

Definition at line 2446 of file lapic.c.

2447 {
2448  struct kvm_lapic *apic = vcpu->arch.apic;
2449 
2450  /*
2451  * ICR is a single 64-bit register when x2APIC is enabled, all others
2452  * registers hold 32-bit values. For legacy xAPIC, ICR writes need to
2453  * go down the common path to get the upper half from ICR2.
2454  *
2455  * Note, using the write helpers may incur an unnecessary write to the
2456  * virtual APIC state, but KVM needs to conditionally modify the value
2457  * in certain cases, e.g. to clear the ICR busy bit. The cost of extra
2458  * conditional branches is likely a wash relative to the cost of the
2459  * maybe-unecessary write, and both are in the noise anyways.
2460  */
2461  if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2462  kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
2463  else
2464  kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2465 }
static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
Definition: lapic.c:88
static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
Definition: lapic.c:2255
int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
Definition: lapic.c:3155
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_bitmap_or_dest_vcpus()

void kvm_bitmap_or_dest_vcpus ( struct kvm *  kvm,
struct kvm_lapic_irq *  irq,
unsigned long *  vcpu_bitmap 
)

Definition at line 1394 of file lapic.c.

1396 {
1397  struct kvm_lapic **dest_vcpu = NULL;
1398  struct kvm_lapic *src = NULL;
1399  struct kvm_apic_map *map;
1400  struct kvm_vcpu *vcpu;
1401  unsigned long bitmap, i;
1402  int vcpu_idx;
1403  bool ret;
1404 
1405  rcu_read_lock();
1406  map = rcu_dereference(kvm->arch.apic_map);
1407 
1408  ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1409  &bitmap);
1410  if (ret) {
1411  for_each_set_bit(i, &bitmap, 16) {
1412  if (!dest_vcpu[i])
1413  continue;
1414  vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1415  __set_bit(vcpu_idx, vcpu_bitmap);
1416  }
1417  } else {
1418  kvm_for_each_vcpu(i, vcpu, kvm) {
1419  if (!kvm_apic_present(vcpu))
1420  continue;
1421  if (!kvm_apic_match_dest(vcpu, NULL,
1422  irq->shorthand,
1423  irq->dest_id,
1424  irq->dest_mode))
1425  continue;
1426  __set_bit(i, vcpu_bitmap);
1427  }
1428  }
1429  rcu_read_unlock();
1430 }
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, int shorthand, unsigned int dest, int dest_mode)
Definition: lapic.c:1067
static bool kvm_apic_map_get_dest_lapic(struct kvm *kvm, struct kvm_lapic **src, struct kvm_lapic_irq *irq, struct kvm_apic_map *map, struct kvm_lapic ***dst, unsigned long *bitmap)
Definition: lapic.c:1142
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_can_use_hv_timer()

bool kvm_can_use_hv_timer ( struct kvm_vcpu *  vcpu)

Definition at line 154 of file lapic.c.

155 {
156  return kvm_x86_ops.set_hv_timer
157  && !(kvm_mwait_in_guest(vcpu->kvm) ||
159 }
static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
Definition: lapic.c:148
static bool kvm_mwait_in_guest(struct kvm *kvm)
Definition: x86.h:409
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_create_lapic()

int kvm_create_lapic ( struct kvm_vcpu *  vcpu,
int  timer_advance_ns 
)

Definition at line 2810 of file lapic.c.

2811 {
2812  struct kvm_lapic *apic;
2813 
2814  ASSERT(vcpu != NULL);
2815 
2816  apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2817  if (!apic)
2818  goto nomem;
2819 
2820  vcpu->arch.apic = apic;
2821 
2822  apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2823  if (!apic->regs) {
2824  printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2825  vcpu->vcpu_id);
2826  goto nomem_free_apic;
2827  }
2828  apic->vcpu = vcpu;
2829 
2831 
2832  hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2833  HRTIMER_MODE_ABS_HARD);
2834  apic->lapic_timer.timer.function = apic_timer_fn;
2835  if (timer_advance_ns == -1) {
2837  lapic_timer_advance_dynamic = true;
2838  } else {
2839  apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2840  lapic_timer_advance_dynamic = false;
2841  }
2842 
2843  /*
2844  * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2845  * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2846  */
2847  vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2848  static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2850 
2851  return 0;
2852 nomem_free_apic:
2853  kfree(apic);
2854  vcpu->arch.apic = NULL;
2855 nomem:
2856  return -ENOMEM;
2857 }
static void kvm_iodevice_init(struct kvm_io_device *dev, const struct kvm_io_device_ops *ops)
Definition: iodev.h:36
static const struct kvm_io_device_ops apic_mmio_ops
Definition: lapic.c:2790
#define LAPIC_TIMER_ADVANCE_NS_INIT
Definition: lapic.c:65
static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
Definition: lapic.c:2795
struct kvm_io_device dev
Definition: lapic.h:61
u32 timer_advance_ns
Definition: lapic.h:54
struct hrtimer timer
Definition: lapic.h:47
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_free_lapic()

void kvm_free_lapic ( struct kvm_vcpu *  vcpu)

Definition at line 2468 of file lapic.c.

2469 {
2470  struct kvm_lapic *apic = vcpu->arch.apic;
2471 
2472  if (!vcpu->arch.apic)
2473  return;
2474 
2475  hrtimer_cancel(&apic->lapic_timer.timer);
2476 
2477  if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2478  static_branch_slow_dec_deferred(&apic_hw_disabled);
2479 
2480  if (!apic->sw_enabled)
2481  static_branch_slow_dec_deferred(&apic_sw_disabled);
2482 
2483  if (apic->regs)
2484  free_page((unsigned long)apic->regs);
2485 
2486  kfree(apic);
2487 }
Here is the caller graph for this function:

◆ kvm_get_apic_base()

u64 kvm_get_apic_base ( struct kvm_vcpu *  vcpu)

Definition at line 474 of file x86.c.

475 {
476  return vcpu->arch.apic_base;
477 }
Here is the caller graph for this function:

◆ kvm_get_apic_interrupt()

int kvm_get_apic_interrupt ( struct kvm_vcpu *  vcpu)

Definition at line 2894 of file lapic.c.

2895 {
2896  int vector = kvm_apic_has_interrupt(vcpu);
2897  struct kvm_lapic *apic = vcpu->arch.apic;
2898  u32 ppr;
2899 
2900  if (vector == -1)
2901  return -1;
2902 
2903  /*
2904  * We get here even with APIC virtualization enabled, if doing
2905  * nested virtualization and L1 runs with the "acknowledge interrupt
2906  * on exit" mode. Then we cannot inject the interrupt via RVI,
2907  * because the process would deliver it through the IDT.
2908  */
2909 
2910  apic_clear_irr(vector, apic);
2911  if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
2912  /*
2913  * For auto-EOI interrupts, there might be another pending
2914  * interrupt above PPR, so check whether to raise another
2915  * KVM_REQ_EVENT.
2916  */
2917  apic_update_ppr(apic);
2918  } else {
2919  /*
2920  * For normal interrupts, PPR has been raised and there cannot
2921  * be a higher-priority pending interrupt---except if there was
2922  * a concurrent interrupt injection, but that would have
2923  * triggered KVM_REQ_EVENT already.
2924  */
2925  apic_set_isr(vector, apic);
2926  __apic_update_ppr(apic, &ppr);
2927  }
2928 
2929  return vector;
2930 }
static bool kvm_hv_synic_auto_eoi_set(struct kvm_vcpu *vcpu, int vector)
Definition: hyperv.h:294
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
Definition: lapic.c:2859
static void apic_set_isr(int vec, struct kvm_lapic *apic)
Definition: lapic.c:744
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_get_apic_mode()

enum lapic_mode kvm_get_apic_mode ( struct kvm_vcpu *  vcpu)

Definition at line 474 of file x86.c.

480 {
481  return kvm_apic_mode(kvm_get_apic_base(vcpu));
482 }
static enum lapic_mode kvm_apic_mode(u64 apic_base)
Definition: lapic.h:271
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
Definition: x86.c:474
Here is the caller graph for this function:

◆ kvm_get_lapic_tscdeadline_msr()

u64 kvm_get_lapic_tscdeadline_msr ( struct kvm_vcpu *  vcpu)

Definition at line 2494 of file lapic.c.

2495 {
2496  struct kvm_lapic *apic = vcpu->arch.apic;
2497 
2499  return 0;
2500 
2501  return apic->lapic_timer.tscdeadline;
2502 }
static int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
Definition: lapic.c:553
u64 tscdeadline
Definition: lapic.h:52
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_hv_vapic_msr_read()

int kvm_hv_vapic_msr_read ( struct kvm_vcpu *  vcpu,
u32  msr,
u64 *  data 
)

Definition at line 3229 of file lapic.c.

3230 {
3231  if (!lapic_in_kernel(vcpu))
3232  return 1;
3233 
3234  return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3235 }
static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
Definition: lapic.c:3165
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_hv_vapic_msr_write()

int kvm_hv_vapic_msr_write ( struct kvm_vcpu *  vcpu,
u32  msr,
u64  data 
)

Definition at line 3221 of file lapic.c.

3222 {
3223  if (!lapic_in_kernel(vcpu))
3224  return 1;
3225 
3226  return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3227 }
static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
Definition: lapic.c:3182
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_inhibit_apic_access_page()

void kvm_inhibit_apic_access_page ( struct kvm_vcpu *  vcpu)

Definition at line 2634 of file lapic.c.

2635 {
2636  struct kvm *kvm = vcpu->kvm;
2637 
2638  if (!kvm->arch.apic_access_memslot_enabled)
2639  return;
2640 
2641  kvm_vcpu_srcu_read_unlock(vcpu);
2642 
2643  mutex_lock(&kvm->slots_lock);
2644 
2645  if (kvm->arch.apic_access_memslot_enabled) {
2646  __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
2647  /*
2648  * Clear "enabled" after the memslot is deleted so that a
2649  * different vCPU doesn't get a false negative when checking
2650  * the flag out of slots_lock. No additional memory barrier is
2651  * needed as modifying memslots requires waiting other vCPUs to
2652  * drop SRCU (see above), and false positives are ok as the
2653  * flag is rechecked after acquiring slots_lock.
2654  */
2655  kvm->arch.apic_access_memslot_enabled = false;
2656 
2657  /*
2658  * Mark the memslot as inhibited to prevent reallocating the
2659  * memslot during vCPU creation, e.g. if a vCPU is hotplugged.
2660  */
2661  kvm->arch.apic_access_memslot_inhibited = true;
2662  }
2663 
2664  mutex_unlock(&kvm->slots_lock);
2665 
2666  kvm_vcpu_srcu_read_lock(vcpu);
2667 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_intr_is_single_vcpu_fast()

bool kvm_intr_is_single_vcpu_fast ( struct kvm *  kvm,
struct kvm_lapic_irq *  irq,
struct kvm_vcpu **  dest_vcpu 
)

Definition at line 1259 of file lapic.c.

1261 {
1262  struct kvm_apic_map *map;
1263  unsigned long bitmap;
1264  struct kvm_lapic **dst = NULL;
1265  bool ret = false;
1266 
1267  if (irq->shorthand)
1268  return false;
1269 
1270  rcu_read_lock();
1271  map = rcu_dereference(kvm->arch.apic_map);
1272 
1273  if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1274  hweight16(bitmap) == 1) {
1275  unsigned long i = find_first_bit(&bitmap, 16);
1276 
1277  if (dst[i]) {
1278  *dest_vcpu = dst[i]->vcpu;
1279  ret = true;
1280  }
1281  }
1282 
1283  rcu_read_unlock();
1284  return ret;
1285 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_irq_delivery_to_apic_fast()

bool kvm_irq_delivery_to_apic_fast ( struct kvm *  kvm,
struct kvm_lapic src,
struct kvm_lapic_irq *  irq,
int *  r,
struct dest_map dest_map 
)

Definition at line 1208 of file lapic.c.

1210 {
1211  struct kvm_apic_map *map;
1212  unsigned long bitmap;
1213  struct kvm_lapic **dst = NULL;
1214  int i;
1215  bool ret;
1216 
1217  *r = -1;
1218 
1219  if (irq->shorthand == APIC_DEST_SELF) {
1220  if (KVM_BUG_ON(!src, kvm)) {
1221  *r = 0;
1222  return true;
1223  }
1224  *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1225  return true;
1226  }
1227 
1228  rcu_read_lock();
1229  map = rcu_dereference(kvm->arch.apic_map);
1230 
1231  ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1232  if (ret) {
1233  *r = 0;
1234  for_each_set_bit(i, &bitmap, 16) {
1235  if (!dst[i])
1236  continue;
1237  *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1238  }
1239  }
1240 
1241  rcu_read_unlock();
1242  return ret;
1243 }
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, struct dest_map *dest_map)
Definition: lapic.c:823
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_clear_vector()

static void kvm_lapic_clear_vector ( int  vec,
void *  bitmap 
)
inlinestatic

Definition at line 154 of file lapic.h.

155 {
156  clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
157 }
#define VEC_POS(v)
Definition: lapic.h:151
#define REG_POS(v)
Definition: lapic.h:152
Here is the caller graph for this function:

◆ kvm_lapic_enabled()

static int kvm_lapic_enabled ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 216 of file lapic.h.

217 {
218  return kvm_apic_present(vcpu) && kvm_apic_sw_enabled(vcpu->arch.apic);
219 }
static bool kvm_apic_sw_enabled(struct kvm_lapic *apic)
Definition: lapic.h:204
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_exit()

void kvm_lapic_exit ( void  )

Definition at line 3313 of file lapic.c.

3314 {
3315  static_key_deferred_flush(&apic_hw_disabled);
3316  WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3317  static_key_deferred_flush(&apic_sw_disabled);
3318  WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3319 }
Here is the caller graph for this function:

◆ kvm_lapic_expired_hv_timer()

void kvm_lapic_expired_hv_timer ( struct kvm_vcpu *  vcpu)

Definition at line 2171 of file lapic.c.

2172 {
2173  struct kvm_lapic *apic = vcpu->arch.apic;
2174 
2175  preempt_disable();
2176  /* If the preempt notifier has already run, it also called apic_timer_expired */
2177  if (!apic->lapic_timer.hv_timer_in_use)
2178  goto out;
2179  WARN_ON(kvm_vcpu_is_blocking(vcpu));
2180  apic_timer_expired(apic, false);
2181  cancel_hv_timer(apic);
2182 
2183  if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2185  restart_apic_timer(apic);
2186  }
2187 out:
2188  preempt_enable();
2189 }
static void advance_periodic_target_expiration(struct kvm_lapic *apic)
Definition: lapic.c:2042
static int apic_lvtt_period(struct kvm_lapic *apic)
Definition: lapic.c:548
static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
Definition: lapic.c:1892
static void restart_apic_timer(struct kvm_lapic *apic)
Definition: lapic.c:2158
static void cancel_hv_timer(struct kvm_lapic *apic)
Definition: lapic.c:2091
bool hv_timer_in_use
Definition: lapic.h:56
s64 period
Definition: lapic.h:48
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_find_highest_irr()

int kvm_lapic_find_highest_irr ( struct kvm_vcpu *  vcpu)

Definition at line 808 of file lapic.c.

809 {
810  /* This may race with setting of irr in __apic_accept_irq() and
811  * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
812  * will cause vmexit immediately and the value will be recalculated
813  * on the next vmentry.
814  */
815  return apic_find_highest_irr(vcpu->arch.apic);
816 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_get_base()

u64 kvm_lapic_get_base ( struct kvm_vcpu *  vcpu)

◆ kvm_lapic_get_cr8()

u64 kvm_lapic_get_cr8 ( struct kvm_vcpu *  vcpu)

Definition at line 2521 of file lapic.c.

2522 {
2523  u64 tpr;
2524 
2525  tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2526 
2527  return (tpr & 0xf0) >> 4;
2528 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_get_reg()

static u32 kvm_lapic_get_reg ( struct kvm_lapic apic,
int  reg_off 
)
inlinestatic

Definition at line 179 of file lapic.h.

180 {
181  return __kvm_lapic_get_reg(apic->regs, reg_off);
182 }
static u32 __kvm_lapic_get_reg(char *regs, int reg_off)
Definition: lapic.h:174
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_hv_timer_in_use()

bool kvm_lapic_hv_timer_in_use ( struct kvm_vcpu *  vcpu)

Definition at line 2083 of file lapic.c.

2084 {
2085  if (!lapic_in_kernel(vcpu))
2086  return false;
2087 
2088  return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2089 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_latched_init()

static int kvm_lapic_latched_init ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 248 of file lapic.h.

249 {
250  return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
251 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_readable_reg_mask()

u64 kvm_lapic_readable_reg_mask ( struct kvm_lapic apic)

Definition at line 1607 of file lapic.c.

1608 {
1609  /* Leave bits '0' for reserved and write-only registers. */
1610  u64 valid_reg_mask =
1611  APIC_REG_MASK(APIC_ID) |
1612  APIC_REG_MASK(APIC_LVR) |
1613  APIC_REG_MASK(APIC_TASKPRI) |
1614  APIC_REG_MASK(APIC_PROCPRI) |
1615  APIC_REG_MASK(APIC_LDR) |
1616  APIC_REG_MASK(APIC_SPIV) |
1617  APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1618  APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1619  APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1620  APIC_REG_MASK(APIC_ESR) |
1621  APIC_REG_MASK(APIC_ICR) |
1622  APIC_REG_MASK(APIC_LVTT) |
1623  APIC_REG_MASK(APIC_LVTTHMR) |
1624  APIC_REG_MASK(APIC_LVTPC) |
1625  APIC_REG_MASK(APIC_LVT0) |
1626  APIC_REG_MASK(APIC_LVT1) |
1627  APIC_REG_MASK(APIC_LVTERR) |
1628  APIC_REG_MASK(APIC_TMICT) |
1629  APIC_REG_MASK(APIC_TMCCT) |
1630  APIC_REG_MASK(APIC_TDCR);
1631 
1632  if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1633  valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1634 
1635  /* ARBPRI, DFR, and ICR2 are not valid in x2APIC mode. */
1636  if (!apic_x2apic_mode(apic))
1637  valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1638  APIC_REG_MASK(APIC_DFR) |
1639  APIC_REG_MASK(APIC_ICR2);
1640 
1641  return valid_reg_mask;
1642 }
#define APIC_REG_MASK(reg)
Definition: lapic.c:1603
static bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
Definition: lapic.c:563
#define APIC_REGS_MASK(first, count)
Definition: lapic.c:1604
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_reset()

void kvm_lapic_reset ( struct kvm_vcpu *  vcpu,
bool  init_event 
)

Definition at line 2669 of file lapic.c.

2670 {
2671  struct kvm_lapic *apic = vcpu->arch.apic;
2672  u64 msr_val;
2673  int i;
2674 
2675  static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
2676 
2677  if (!init_event) {
2678  msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2680  msr_val |= MSR_IA32_APICBASE_BSP;
2681  kvm_lapic_set_base(vcpu, msr_val);
2682  }
2683 
2684  if (!apic)
2685  return;
2686 
2687  /* Stop the timer in case it's a reset to an active apic */
2688  hrtimer_cancel(&apic->lapic_timer.timer);
2689 
2690  /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2691  if (!init_event)
2692  kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2693  kvm_apic_set_version(apic->vcpu);
2694 
2695  for (i = 0; i < apic->nr_lvt_entries; i++)
2696  kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2697  apic_update_lvtt(apic);
2698  if (kvm_vcpu_is_reset_bsp(vcpu) &&
2699  kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2700  kvm_lapic_set_reg(apic, APIC_LVT0,
2701  SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2702  apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2703 
2704  kvm_apic_set_dfr(apic, 0xffffffffU);
2705  apic_set_spiv(apic, 0xff);
2706  kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2707  if (!apic_x2apic_mode(apic))
2708  kvm_apic_set_ldr(apic, 0);
2709  kvm_lapic_set_reg(apic, APIC_ESR, 0);
2710  if (!apic_x2apic_mode(apic)) {
2711  kvm_lapic_set_reg(apic, APIC_ICR, 0);
2712  kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2713  } else {
2714  kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2715  }
2716  kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2717  kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2718  for (i = 0; i < 8; i++) {
2719  kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2720  kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2721  kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2722  }
2724  update_divide_count(apic);
2725  atomic_set(&apic->lapic_timer.pending, 0);
2726 
2727  vcpu->arch.pv_eoi.msr_val = 0;
2728  apic_update_ppr(apic);
2729  if (apic->apicv_active) {
2730  static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2731  static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
2732  static_call_cond(kvm_x86_hwapic_isr_update)(-1);
2733  }
2734 
2735  vcpu->arch.apic_arb_prio = 0;
2736  vcpu->arch.apic_attention = 0;
2737 
2739 }
static void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
Definition: lapic.c:515
static void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
Definition: lapic.c:521
static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic, int reg, u64 val)
Definition: lapic.c:99
static void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
Definition: lapic.c:509
atomic_t pending
Definition: lapic.h:55
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
Definition: x86.c:12488
static bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
Definition: x86.h:288
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_restart_hv_timer()

void kvm_lapic_restart_hv_timer ( struct kvm_vcpu *  vcpu)

Definition at line 2208 of file lapic.c.

2209 {
2210  struct kvm_lapic *apic = vcpu->arch.apic;
2211 
2212  WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2213  restart_apic_timer(apic);
2214 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_set_base()

void kvm_lapic_set_base ( struct kvm_vcpu *  vcpu,
u64  value 
)

Definition at line 2530 of file lapic.c.

2531 {
2532  u64 old_value = vcpu->arch.apic_base;
2533  struct kvm_lapic *apic = vcpu->arch.apic;
2534 
2535  vcpu->arch.apic_base = value;
2536 
2537  if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2539 
2540  if (!apic)
2541  return;
2542 
2543  /* update jump label if enable bit changes */
2544  if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2545  if (value & MSR_IA32_APICBASE_ENABLE) {
2546  kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2547  static_branch_slow_dec_deferred(&apic_hw_disabled);
2548  /* Check if there are APF page ready requests pending */
2549  kvm_make_request(KVM_REQ_APF_READY, vcpu);
2550  } else {
2551  static_branch_inc(&apic_hw_disabled.key);
2552  atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2553  }
2554  }
2555 
2556  if ((old_value ^ value) & X2APIC_ENABLE) {
2557  if (value & X2APIC_ENABLE)
2558  kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2559  else if (value & MSR_IA32_APICBASE_ENABLE)
2560  kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2561  }
2562 
2563  if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2564  kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2565  static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu);
2566  }
2567 
2568  apic->base_address = apic->vcpu->arch.apic_base &
2569  MSR_IA32_APICBASE_BASE;
2570 
2571  if ((value & MSR_IA32_APICBASE_ENABLE) &&
2572  apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2573  kvm_set_apicv_inhibit(apic->vcpu->kvm,
2574  APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2575  }
2576 }
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
Definition: cpuid.c:309
static void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
Definition: lapic.c:527
unsigned long base_address
Definition: lapic.h:60
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_set_eoi()

void kvm_lapic_set_eoi ( struct kvm_vcpu *  vcpu)

Definition at line 2439 of file lapic.c.

2440 {
2441  kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2442 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_set_irr()

static void kvm_lapic_set_irr ( int  vec,
struct kvm_lapic apic 
)
inlinestatic

Definition at line 164 of file lapic.h.

165 {
166  kvm_lapic_set_vector(vec, apic->regs + APIC_IRR);
167  /*
168  * irr_pending must be true if any interrupt is pending; set it after
169  * APIC_IRR to avoid race with apic_clear_irr
170  */
171  apic->irr_pending = true;
172 }
static void kvm_lapic_set_vector(int vec, void *bitmap)
Definition: lapic.h:159
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_set_pv_eoi()

int kvm_lapic_set_pv_eoi ( struct kvm_vcpu *  vcpu,
u64  data,
unsigned long  len 
)

Definition at line 3237 of file lapic.c.

3238 {
3239  u64 addr = data & ~KVM_MSR_ENABLED;
3240  struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3241  unsigned long new_len;
3242  int ret;
3243 
3244  if (!IS_ALIGNED(addr, 4))
3245  return 1;
3246 
3247  if (data & KVM_MSR_ENABLED) {
3248  if (addr == ghc->gpa && len <= ghc->len)
3249  new_len = ghc->len;
3250  else
3251  new_len = len;
3252 
3253  ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3254  if (ret)
3255  return ret;
3256  }
3257 
3258  vcpu->arch.pv_eoi.msr_val = data;
3259 
3260  return 0;
3261 }
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len)
Definition: kvm_main.c:3532
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_set_tpr()

void kvm_lapic_set_tpr ( struct kvm_vcpu *  vcpu,
unsigned long  cr8 
)

Definition at line 2516 of file lapic.c.

2517 {
2518  apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2519 }
static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
Definition: lapic.c:981
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_set_vapic_addr()

int kvm_lapic_set_vapic_addr ( struct kvm_vcpu *  vcpu,
gpa_t  vapic_addr 
)

Definition at line 3139 of file lapic.c.

3140 {
3141  if (vapic_addr) {
3142  if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
3143  &vcpu->arch.apic->vapic_cache,
3144  vapic_addr, sizeof(u32)))
3145  return -EINVAL;
3146  __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3147  } else {
3148  __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3149  }
3150 
3151  vcpu->arch.apic->vapic_addr = vapic_addr;
3152  return 0;
3153 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_set_vector()

static void kvm_lapic_set_vector ( int  vec,
void *  bitmap 
)
inlinestatic

Definition at line 159 of file lapic.h.

160 {
161  set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
162 }
Here is the caller graph for this function:

◆ kvm_lapic_switch_to_hv_timer()

void kvm_lapic_switch_to_hv_timer ( struct kvm_vcpu *  vcpu)

Definition at line 2192 of file lapic.c.

2193 {
2194  restart_apic_timer(vcpu->arch.apic);
2195 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_switch_to_sw_timer()

void kvm_lapic_switch_to_sw_timer ( struct kvm_vcpu *  vcpu)

Definition at line 2197 of file lapic.c.

2198 {
2199  struct kvm_lapic *apic = vcpu->arch.apic;
2200 
2201  preempt_disable();
2202  /* Possibly the TSC deadline timer is not enabled yet */
2203  if (apic->lapic_timer.hv_timer_in_use)
2204  start_sw_timer(apic);
2205  preempt_enable();
2206 }
static void start_sw_timer(struct kvm_lapic *apic)
Definition: lapic.c:2141
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_sync_from_vapic()

void kvm_lapic_sync_from_vapic ( struct kvm_vcpu *  vcpu)

Definition at line 3072 of file lapic.c.

3073 {
3074  u32 data;
3075 
3076  if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3077  apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3078 
3079  if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3080  return;
3081 
3082  if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3083  sizeof(u32)))
3084  return;
3085 
3086  apic_set_tpr(vcpu->arch.apic, data & 0xff);
3087 }
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
Definition: kvm_main.c:3608
static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, struct kvm_lapic *apic)
Definition: lapic.c:3049
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lapic_sync_to_vapic()

void kvm_lapic_sync_to_vapic ( struct kvm_vcpu *  vcpu)

Definition at line 3115 of file lapic.c.

3116 {
3117  u32 data, tpr;
3118  int max_irr, max_isr;
3119  struct kvm_lapic *apic = vcpu->arch.apic;
3120 
3122 
3123  if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3124  return;
3125 
3126  tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3127  max_irr = apic_find_highest_irr(apic);
3128  if (max_irr < 0)
3129  max_irr = 0;
3130  max_isr = apic_find_highest_isr(apic);
3131  if (max_isr < 0)
3132  max_isr = 0;
3133  data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
3134 
3135  kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3136  sizeof(u32));
3137 }
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len)
Definition: kvm_main.c:3571
static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, struct kvm_lapic *apic)
Definition: lapic.c:3095
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_lowest_prio_delivery()

static bool kvm_lowest_prio_delivery ( struct kvm_lapic_irq *  irq)
inlinestatic

Definition at line 242 of file lapic.h.

243 {
244  return (irq->delivery_mode == APIC_DM_LOWEST ||
245  irq->msi_redir_hint);
246 }
Here is the caller graph for this function:

◆ kvm_recalculate_apic_map()

void kvm_recalculate_apic_map ( struct kvm *  kvm)

Definition at line 374 of file lapic.c.

375 {
376  struct kvm_apic_map *new, *old = NULL;
377  struct kvm_vcpu *vcpu;
378  unsigned long i;
379  u32 max_id = 255; /* enough space for any xAPIC ID */
380  bool xapic_id_mismatch;
381  int r;
382 
383  /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
384  if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
385  return;
386 
387  WARN_ONCE(!irqchip_in_kernel(kvm),
388  "Dirty APIC map without an in-kernel local APIC");
389 
390  mutex_lock(&kvm->arch.apic_map_lock);
391 
392 retry:
393  /*
394  * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
395  * or the APIC registers (if dirty). Note, on retry the map may have
396  * not yet been marked dirty by whatever task changed a vCPU's x2APIC
397  * ID, i.e. the map may still show up as in-progress. In that case
398  * this task still needs to retry and complete its calculation.
399  */
400  if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
402  /* Someone else has updated the map. */
403  mutex_unlock(&kvm->arch.apic_map_lock);
404  return;
405  }
406 
407  /*
408  * Reset the mismatch flag between attempts so that KVM does the right
409  * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e.
410  * keep max_id strictly increasing. Disallowing max_id from shrinking
411  * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU
412  * with the highest x2APIC ID is toggling its APIC on and off.
413  */
414  xapic_id_mismatch = false;
415 
416  kvm_for_each_vcpu(i, vcpu, kvm)
417  if (kvm_apic_present(vcpu))
418  max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
419 
420  new = kvzalloc(sizeof(struct kvm_apic_map) +
421  sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
422  GFP_KERNEL_ACCOUNT);
423 
424  if (!new)
425  goto out;
426 
427  new->max_apic_id = max_id;
428  new->logical_mode = KVM_APIC_MODE_SW_DISABLED;
429 
430  kvm_for_each_vcpu(i, vcpu, kvm) {
431  if (!kvm_apic_present(vcpu))
432  continue;
433 
434  r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch);
435  if (r) {
436  kvfree(new);
437  new = NULL;
438  if (r == -E2BIG) {
439  cond_resched();
440  goto retry;
441  }
442 
443  goto out;
444  }
445 
446  kvm_recalculate_logical_map(new, vcpu);
447  }
448 out:
449  /*
450  * The optimized map is effectively KVM's internal version of APICv,
451  * and all unwanted aliasing that results in disabling the optimized
452  * map also applies to APICv.
453  */
454  if (!new)
455  kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
456  else
457  kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
458 
459  if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
460  kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
461  else
462  kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
463 
464  if (xapic_id_mismatch)
465  kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
466  else
467  kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
468 
469  old = rcu_dereference_protected(kvm->arch.apic_map,
470  lockdep_is_held(&kvm->arch.apic_map_lock));
471  rcu_assign_pointer(kvm->arch.apic_map, new);
472  /*
473  * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
474  * If another update has come in, leave it DIRTY.
475  */
476  atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
478  mutex_unlock(&kvm->arch.apic_map_lock);
479 
480  if (old)
481  call_rcu(&old->rcu, kvm_apic_map_free);
482 
484 }
#define irqchip_in_kernel(k)
Definition: arm_vgic.h:392
@ CLEAN
Definition: lapic.c:369
@ UPDATE_IN_PROGRESS
Definition: lapic.c:370
static int kvm_recalculate_phys_map(struct kvm_apic_map *new, struct kvm_vcpu *vcpu, bool *xapic_id_mismatch)
Definition: lapic.c:218
static void kvm_recalculate_logical_map(struct kvm_apic_map *new, struct kvm_vcpu *vcpu)
Definition: lapic.c:294
static u32 kvm_x2apic_id(struct kvm_lapic *apic)
Definition: lapic.c:143
static void kvm_apic_map_free(struct rcu_head *rcu)
Definition: lapic.c:211
void kvm_make_scan_ioapic_request(struct kvm *kvm)
Definition: x86.c:10520
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_set_apic_base()

int kvm_set_apic_base ( struct kvm_vcpu *  vcpu,
struct msr_data *  msr_info 
)

Definition at line 485 of file x86.c.

486 {
487  enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
488  enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
489  u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff |
490  (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
491 
492  if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
493  return 1;
494  if (!msr_info->host_initiated) {
495  if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
496  return 1;
497  if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
498  return 1;
499  }
500 
501  kvm_lapic_set_base(vcpu, msr_info->data);
502  kvm_recalculate_apic_map(vcpu->kvm);
503  return 0;
504 }
u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
Definition: cpuid.c:409
lapic_mode
Definition: lapic.h:25
enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
Definition: x86.c:479
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_set_lapic_tscdeadline_msr()

void kvm_set_lapic_tscdeadline_msr ( struct kvm_vcpu *  vcpu,
u64  data 
)

Definition at line 2504 of file lapic.c.

2505 {
2506  struct kvm_lapic *apic = vcpu->arch.apic;
2507 
2509  return;
2510 
2511  hrtimer_cancel(&apic->lapic_timer.timer);
2512  apic->lapic_timer.tscdeadline = data;
2513  start_apic_timer(apic);
2514 }
static void start_apic_timer(struct kvm_lapic *apic)
Definition: lapic.c:2227
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vcpu_apicv_active()

static bool kvm_vcpu_apicv_active ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 226 of file lapic.h.

227 {
228  return lapic_in_kernel(vcpu) && vcpu->arch.apic->apicv_active;
229 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_vector_to_index()

int kvm_vector_to_index ( u32  vector,
u32  dest_vcpus,
const unsigned long *  bitmap,
u32  bitmap_size 
)

Definition at line 1092 of file lapic.c.

1094 {
1095  u32 mod;
1096  int i, idx = -1;
1097 
1098  mod = vector % dest_vcpus;
1099 
1100  for (i = 0; i <= mod; i++) {
1101  idx = find_next_bit(bitmap, bitmap_size, idx + 1);
1102  BUG_ON(idx == bitmap_size);
1103  }
1104 
1105  return idx;
1106 }
Here is the caller graph for this function:

◆ kvm_wait_lapic_expire()

void kvm_wait_lapic_expire ( struct kvm_vcpu *  vcpu)

Definition at line 1869 of file lapic.c.

1870 {
1871  if (lapic_in_kernel(vcpu) &&
1872  vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1873  vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1876 }
static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
Definition: lapic.c:1777
static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
Definition: lapic.c:1844
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_x2apic_icr_write()

int kvm_x2apic_icr_write ( struct kvm_lapic apic,
u64  data 
)

Definition at line 3155 of file lapic.c.

3156 {
3157  data &= ~APIC_ICR_BUSY;
3158 
3159  kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
3160  kvm_lapic_set_reg64(apic, APIC_ICR, data);
3161  trace_kvm_apic_write(APIC_ICR, data);
3162  return 0;
3163 }
void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
Definition: lapic.c:1504
#define trace_kvm_apic_write(reg, val)
Definition: trace.h:281
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_x2apic_msr_read()

int kvm_x2apic_msr_read ( struct kvm_vcpu *  vcpu,
u32  msr,
u64 *  data 
)

Definition at line 3210 of file lapic.c.

3211 {
3212  struct kvm_lapic *apic = vcpu->arch.apic;
3213  u32 reg = (msr - APIC_BASE_MSR) << 4;
3214 
3215  if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3216  return 1;
3217 
3218  return kvm_lapic_msr_read(apic, reg, data);
3219 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_x2apic_msr_write()

int kvm_x2apic_msr_write ( struct kvm_vcpu *  vcpu,
u32  msr,
u64  data 
)

Definition at line 3199 of file lapic.c.

3200 {
3201  struct kvm_lapic *apic = vcpu->arch.apic;
3202  u32 reg = (msr - APIC_BASE_MSR) << 4;
3203 
3204  if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3205  return 1;
3206 
3207  return kvm_lapic_msr_write(apic, reg, data);
3208 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xapic_id()

static u8 kvm_xapic_id ( struct kvm_lapic apic)
inlinestatic

Definition at line 276 of file lapic.h.

277 {
278  return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
279 }
Here is the caller graph for this function:

◆ lapic_in_kernel()

static bool lapic_in_kernel ( struct kvm_vcpu *  vcpu)
inlinestatic

Definition at line 186 of file lapic.h.

187 {
188  if (static_branch_unlikely(&kvm_has_noapic_vcpu))
189  return vcpu->arch.apic;
190  return true;
191 }

Variable Documentation

◆ apic_hw_disabled

struct static_key_false_deferred apic_hw_disabled
extern

◆ apic_sw_disabled

struct static_key_false_deferred apic_sw_disabled
extern