KVM
All Classes Files Functions Variables Typedefs Enumerations Enumerator Macros
Classes | Macros | Enumerations | Functions
arm_arch_timer.h File Reference
#include <linux/clocksource.h>
#include <linux/hrtimer.h>
Include dependency graph for arm_arch_timer.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

struct  arch_timer_offset
 
struct  arch_timer_vm_data
 
struct  arch_timer_context
 
struct  timer_map
 
struct  arch_timer_cpu
 

Macros

#define vcpu_timer(v)   (&(v)->arch.timer_cpu)
 
#define vcpu_get_timer(v, t)   (&vcpu_timer(v)->timers[(t)])
 
#define vcpu_vtimer(v)   (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
 
#define vcpu_ptimer(v)   (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
 
#define vcpu_hvtimer(v)   (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
 
#define vcpu_hptimer(v)   (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
 
#define arch_timer_ctx_index(ctx)   ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
 
#define timer_vm_data(ctx)   (&(ctx)->vcpu->kvm->arch.timer_data)
 
#define timer_irq(ctx)   (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
 

Enumerations

enum  kvm_arch_timers {
  TIMER_PTIMER , TIMER_VTIMER , NR_KVM_EL0_TIMERS , TIMER_HVTIMER = NR_KVM_EL0_TIMERS ,
  TIMER_HPTIMER , NR_KVM_TIMERS
}
 
enum  kvm_arch_timer_regs {
  TIMER_REG_CNT , TIMER_REG_CVAL , TIMER_REG_TVAL , TIMER_REG_CTL ,
  TIMER_REG_VOFF
}
 

Functions

void get_timer_map (struct kvm_vcpu *vcpu, struct timer_map *map)
 
int __init kvm_timer_hyp_init (bool has_gic)
 
int kvm_timer_enable (struct kvm_vcpu *vcpu)
 
void kvm_timer_vcpu_reset (struct kvm_vcpu *vcpu)
 
void kvm_timer_vcpu_init (struct kvm_vcpu *vcpu)
 
void kvm_timer_sync_user (struct kvm_vcpu *vcpu)
 
bool kvm_timer_should_notify_user (struct kvm_vcpu *vcpu)
 
void kvm_timer_update_run (struct kvm_vcpu *vcpu)
 
void kvm_timer_vcpu_terminate (struct kvm_vcpu *vcpu)
 
void kvm_timer_init_vm (struct kvm *kvm)
 
u64 kvm_arm_timer_get_reg (struct kvm_vcpu *, u64 regid)
 
int kvm_arm_timer_set_reg (struct kvm_vcpu *, u64 regid, u64 value)
 
int kvm_arm_timer_set_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
int kvm_arm_timer_get_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
int kvm_arm_timer_has_attr (struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
 
u64 kvm_phys_timer_read (void)
 
void kvm_timer_vcpu_load (struct kvm_vcpu *vcpu)
 
void kvm_timer_vcpu_put (struct kvm_vcpu *vcpu)
 
void kvm_timer_init_vhe (void)
 
u64 kvm_arm_timer_read_sysreg (struct kvm_vcpu *vcpu, enum kvm_arch_timers tmr, enum kvm_arch_timer_regs treg)
 
void kvm_arm_timer_write_sysreg (struct kvm_vcpu *vcpu, enum kvm_arch_timers tmr, enum kvm_arch_timer_regs treg, u64 val)
 
u32 timer_get_ctl (struct arch_timer_context *ctxt)
 
u64 timer_get_cval (struct arch_timer_context *ctxt)
 
void kvm_timer_cpu_up (void)
 
void kvm_timer_cpu_down (void)
 
static bool has_cntpoff (void)
 

Macro Definition Documentation

◆ arch_timer_ctx_index

#define arch_timer_ctx_index (   ctx)    ((ctx) - vcpu_timer((ctx)->vcpu)->timers)

Definition at line 129 of file arm_arch_timer.h.

◆ timer_irq

#define timer_irq (   ctx)    (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])

Definition at line 132 of file arm_arch_timer.h.

◆ timer_vm_data

#define timer_vm_data (   ctx)    (&(ctx)->vcpu->kvm->arch.timer_data)

Definition at line 131 of file arm_arch_timer.h.

◆ vcpu_get_timer

#define vcpu_get_timer (   v,
 
)    (&vcpu_timer(v)->timers[(t)])

Definition at line 123 of file arm_arch_timer.h.

◆ vcpu_hptimer

#define vcpu_hptimer (   v)    (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])

Definition at line 127 of file arm_arch_timer.h.

◆ vcpu_hvtimer

#define vcpu_hvtimer (   v)    (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])

Definition at line 126 of file arm_arch_timer.h.

◆ vcpu_ptimer

#define vcpu_ptimer (   v)    (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])

Definition at line 125 of file arm_arch_timer.h.

◆ vcpu_timer

#define vcpu_timer (   v)    (&(v)->arch.timer_cpu)

Definition at line 122 of file arm_arch_timer.h.

◆ vcpu_vtimer

#define vcpu_vtimer (   v)    (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])

Definition at line 124 of file arm_arch_timer.h.

Enumeration Type Documentation

◆ kvm_arch_timer_regs

Enumerator
TIMER_REG_CNT 
TIMER_REG_CVAL 
TIMER_REG_TVAL 
TIMER_REG_CTL 
TIMER_REG_VOFF 

Definition at line 22 of file arm_arch_timer.h.

22  {
28 };
@ TIMER_REG_VOFF
@ TIMER_REG_CTL
@ TIMER_REG_TVAL
@ TIMER_REG_CNT
@ TIMER_REG_CVAL

◆ kvm_arch_timers

Enumerator
TIMER_PTIMER 
TIMER_VTIMER 
NR_KVM_EL0_TIMERS 
TIMER_HVTIMER 
TIMER_HPTIMER 
NR_KVM_TIMERS 

Definition at line 13 of file arm_arch_timer.h.

13  {
20 };
@ NR_KVM_TIMERS
@ NR_KVM_EL0_TIMERS
@ TIMER_HPTIMER
@ TIMER_VTIMER
@ TIMER_PTIMER
@ TIMER_HVTIMER

Function Documentation

◆ get_timer_map()

void get_timer_map ( struct kvm_vcpu *  vcpu,
struct timer_map map 
)

Definition at line 178 of file arch_timer.c.

179 {
180  if (vcpu_has_nv(vcpu)) {
181  if (is_hyp_ctxt(vcpu)) {
182  map->direct_vtimer = vcpu_hvtimer(vcpu);
183  map->direct_ptimer = vcpu_hptimer(vcpu);
184  map->emul_vtimer = vcpu_vtimer(vcpu);
185  map->emul_ptimer = vcpu_ptimer(vcpu);
186  } else {
187  map->direct_vtimer = vcpu_vtimer(vcpu);
188  map->direct_ptimer = vcpu_ptimer(vcpu);
189  map->emul_vtimer = vcpu_hvtimer(vcpu);
190  map->emul_ptimer = vcpu_hptimer(vcpu);
191  }
192  } else if (has_vhe()) {
193  map->direct_vtimer = vcpu_vtimer(vcpu);
194  map->direct_ptimer = vcpu_ptimer(vcpu);
195  map->emul_vtimer = NULL;
196  map->emul_ptimer = NULL;
197  } else {
198  map->direct_vtimer = vcpu_vtimer(vcpu);
199  map->direct_ptimer = NULL;
200  map->emul_vtimer = NULL;
201  map->emul_ptimer = vcpu_ptimer(vcpu);
202  }
203 
204  trace_kvm_get_timer_map(vcpu->vcpu_id, map);
205 }
#define vcpu_ptimer(v)
#define vcpu_hvtimer(v)
#define vcpu_vtimer(v)
#define vcpu_hptimer(v)
struct arch_timer_context * direct_vtimer
struct arch_timer_context * direct_ptimer
struct arch_timer_context * emul_ptimer
struct arch_timer_context * emul_vtimer
Here is the caller graph for this function:

◆ has_cntpoff()

static bool has_cntpoff ( void  )
inlinestatic

Definition at line 150 of file arm_arch_timer.h.

151 {
152  return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
153 }
Here is the caller graph for this function:

◆ kvm_arm_timer_get_attr()

int kvm_arm_timer_get_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)

Definition at line 1611 of file arch_timer.c.

1612 {
1613  int __user *uaddr = (int __user *)(long)attr->addr;
1614  struct arch_timer_context *timer;
1615  int irq;
1616 
1617  switch (attr->attr) {
1618  case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1619  timer = vcpu_vtimer(vcpu);
1620  break;
1621  case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1622  timer = vcpu_ptimer(vcpu);
1623  break;
1624  case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
1625  timer = vcpu_hvtimer(vcpu);
1626  break;
1627  case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
1628  timer = vcpu_hptimer(vcpu);
1629  break;
1630  default:
1631  return -ENXIO;
1632  }
1633 
1634  irq = timer_irq(timer);
1635  return put_user(irq, uaddr);
1636 }
#define timer_irq(ctx)
struct kvm_vcpu * vcpu
struct arch_timer_context::@18 irq
Here is the caller graph for this function:

◆ kvm_arm_timer_get_reg()

u64 kvm_arm_timer_get_reg ( struct kvm_vcpu *  vcpu,
u64  regid 
)

Definition at line 1107 of file arch_timer.c.

1108 {
1109  switch (regid) {
1110  case KVM_REG_ARM_TIMER_CTL:
1111  return kvm_arm_timer_read(vcpu,
1112  vcpu_vtimer(vcpu), TIMER_REG_CTL);
1113  case KVM_REG_ARM_TIMER_CNT:
1114  return kvm_arm_timer_read(vcpu,
1115  vcpu_vtimer(vcpu), TIMER_REG_CNT);
1116  case KVM_REG_ARM_TIMER_CVAL:
1117  return kvm_arm_timer_read(vcpu,
1118  vcpu_vtimer(vcpu), TIMER_REG_CVAL);
1119  case KVM_REG_ARM_PTIMER_CTL:
1120  return kvm_arm_timer_read(vcpu,
1121  vcpu_ptimer(vcpu), TIMER_REG_CTL);
1122  case KVM_REG_ARM_PTIMER_CNT:
1123  return kvm_arm_timer_read(vcpu,
1124  vcpu_ptimer(vcpu), TIMER_REG_CNT);
1125  case KVM_REG_ARM_PTIMER_CVAL:
1126  return kvm_arm_timer_read(vcpu,
1127  vcpu_ptimer(vcpu), TIMER_REG_CVAL);
1128  }
1129  return (u64)-1;
1130 }
static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, struct arch_timer_context *timer, enum kvm_arch_timer_regs treg)
Definition: arch_timer.c:1132
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_timer_has_attr()

int kvm_arm_timer_has_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)

Definition at line 1638 of file arch_timer.c.

1639 {
1640  switch (attr->attr) {
1641  case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1642  case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1643  case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
1644  case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
1645  return 0;
1646  }
1647 
1648  return -ENXIO;
1649 }
Here is the caller graph for this function:

◆ kvm_arm_timer_read_sysreg()

u64 kvm_arm_timer_read_sysreg ( struct kvm_vcpu *  vcpu,
enum kvm_arch_timers  tmr,
enum kvm_arch_timer_regs  treg 
)

Definition at line 1167 of file arch_timer.c.

1170 {
1171  struct arch_timer_context *timer;
1172  struct timer_map map;
1173  u64 val;
1174 
1175  get_timer_map(vcpu, &map);
1176  timer = vcpu_get_timer(vcpu, tmr);
1177 
1178  if (timer == map.emul_vtimer || timer == map.emul_ptimer)
1179  return kvm_arm_timer_read(vcpu, timer, treg);
1180 
1181  preempt_disable();
1182  timer_save_state(timer);
1183 
1184  val = kvm_arm_timer_read(vcpu, timer, treg);
1185 
1186  timer_restore_state(timer);
1187  preempt_enable();
1188 
1189  return val;
1190 }
static void timer_save_state(struct arch_timer_context *ctx)
Definition: arch_timer.c:497
static void timer_restore_state(struct arch_timer_context *ctx)
Definition: arch_timer.c:603
void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
Definition: arch_timer.c:178
#define vcpu_get_timer(v, t)
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_timer_set_attr()

int kvm_arm_timer_set_attr ( struct kvm_vcpu *  vcpu,
struct kvm_device_attr *  attr 
)

Definition at line 1559 of file arch_timer.c.

1560 {
1561  int __user *uaddr = (int __user *)(long)attr->addr;
1562  int irq, idx, ret = 0;
1563 
1564  if (!irqchip_in_kernel(vcpu->kvm))
1565  return -EINVAL;
1566 
1567  if (get_user(irq, uaddr))
1568  return -EFAULT;
1569 
1570  if (!(irq_is_ppi(irq)))
1571  return -EINVAL;
1572 
1573  mutex_lock(&vcpu->kvm->arch.config_lock);
1574 
1575  if (test_bit(KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE,
1576  &vcpu->kvm->arch.flags)) {
1577  ret = -EBUSY;
1578  goto out;
1579  }
1580 
1581  switch (attr->attr) {
1582  case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1583  idx = TIMER_VTIMER;
1584  break;
1585  case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1586  idx = TIMER_PTIMER;
1587  break;
1588  case KVM_ARM_VCPU_TIMER_IRQ_HVTIMER:
1589  idx = TIMER_HVTIMER;
1590  break;
1591  case KVM_ARM_VCPU_TIMER_IRQ_HPTIMER:
1592  idx = TIMER_HPTIMER;
1593  break;
1594  default:
1595  ret = -ENXIO;
1596  goto out;
1597  }
1598 
1599  /*
1600  * We cannot validate the IRQ unicity before we run, so take it at
1601  * face value. The verdict will be given on first vcpu run, for each
1602  * vcpu. Yes this is late. Blame it on the stupid API.
1603  */
1604  vcpu->kvm->arch.timer_data.ppi[idx] = irq;
1605 
1606 out:
1607  mutex_unlock(&vcpu->kvm->arch.config_lock);
1608  return ret;
1609 }
#define irqchip_in_kernel(k)
Definition: arm_vgic.h:392
#define irq_is_ppi(irq)
Definition: arm_vgic.h:34
Here is the caller graph for this function:

◆ kvm_arm_timer_set_reg()

int kvm_arm_timer_set_reg ( struct kvm_vcpu *  vcpu,
u64  regid,
u64  value 
)

Definition at line 1048 of file arch_timer.c.

1049 {
1050  struct arch_timer_context *timer;
1051 
1052  switch (regid) {
1053  case KVM_REG_ARM_TIMER_CTL:
1054  timer = vcpu_vtimer(vcpu);
1055  kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
1056  break;
1057  case KVM_REG_ARM_TIMER_CNT:
1058  if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
1059  &vcpu->kvm->arch.flags)) {
1060  timer = vcpu_vtimer(vcpu);
1061  timer_set_offset(timer, kvm_phys_timer_read() - value);
1062  }
1063  break;
1064  case KVM_REG_ARM_TIMER_CVAL:
1065  timer = vcpu_vtimer(vcpu);
1066  kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
1067  break;
1068  case KVM_REG_ARM_PTIMER_CTL:
1069  timer = vcpu_ptimer(vcpu);
1070  kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
1071  break;
1072  case KVM_REG_ARM_PTIMER_CNT:
1073  if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET,
1074  &vcpu->kvm->arch.flags)) {
1075  timer = vcpu_ptimer(vcpu);
1076  timer_set_offset(timer, kvm_phys_timer_read() - value);
1077  }
1078  break;
1079  case KVM_REG_ARM_PTIMER_CVAL:
1080  timer = vcpu_ptimer(vcpu);
1081  kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
1082  break;
1083 
1084  default:
1085  return -1;
1086  }
1087 
1088  return 0;
1089 }
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
Definition: arch_timer.c:163
u64 kvm_phys_timer_read(void)
Definition: arch_timer.c:173
static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, struct arch_timer_context *timer, enum kvm_arch_timer_regs treg, u64 val)
Definition: arch_timer.c:1192
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_arm_timer_write_sysreg()

void kvm_arm_timer_write_sysreg ( struct kvm_vcpu *  vcpu,
enum kvm_arch_timers  tmr,
enum kvm_arch_timer_regs  treg,
u64  val 
)

Definition at line 1219 of file arch_timer.c.

1223 {
1224  struct arch_timer_context *timer;
1225  struct timer_map map;
1226 
1227  get_timer_map(vcpu, &map);
1228  timer = vcpu_get_timer(vcpu, tmr);
1229  if (timer == map.emul_vtimer || timer == map.emul_ptimer) {
1230  soft_timer_cancel(&timer->hrtimer);
1231  kvm_arm_timer_write(vcpu, timer, treg, val);
1232  timer_emulate(timer);
1233  } else {
1234  preempt_disable();
1235  timer_save_state(timer);
1236  kvm_arm_timer_write(vcpu, timer, treg, val);
1237  timer_restore_state(timer);
1238  preempt_enable();
1239  }
1240 }
static void timer_emulate(struct arch_timer_context *ctx)
Definition: arch_timer.c:464
static void soft_timer_cancel(struct hrtimer *hrt)
Definition: arch_timer.c:219
struct hrtimer hrtimer
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_phys_timer_read()

u64 kvm_phys_timer_read ( void  )

Definition at line 173 of file arch_timer.c.

174 {
175  return timecounter->cc->read(timecounter->cc);
176 }
static struct timecounter * timecounter
Definition: arch_timer.c:26
Here is the caller graph for this function:

◆ kvm_timer_cpu_down()

void kvm_timer_cpu_down ( void  )

Definition at line 1041 of file arch_timer.c.

1042 {
1043  disable_percpu_irq(host_vtimer_irq);
1044  if (host_ptimer_irq)
1045  disable_percpu_irq(host_ptimer_irq);
1046 }
static unsigned int host_ptimer_irq
Definition: arch_timer.c:28
static unsigned int host_vtimer_irq
Definition: arch_timer.c:27
Here is the caller graph for this function:

◆ kvm_timer_cpu_up()

void kvm_timer_cpu_up ( void  )

Definition at line 1034 of file arch_timer.c.

1035 {
1036  enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
1037  if (host_ptimer_irq)
1038  enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
1039 }
static u32 host_vtimer_irq_flags
Definition: arch_timer.c:29
static u32 host_ptimer_irq_flags
Definition: arch_timer.c:30
Here is the caller graph for this function:

◆ kvm_timer_enable()

int kvm_timer_enable ( struct kvm_vcpu *  vcpu)

Definition at line 1506 of file arch_timer.c.

1507 {
1508  struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1509  struct timer_map map;
1510  int ret;
1511 
1512  if (timer->enabled)
1513  return 0;
1514 
1515  /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1516  if (!irqchip_in_kernel(vcpu->kvm))
1517  goto no_vgic;
1518 
1519  /*
1520  * At this stage, we have the guarantee that the vgic is both
1521  * available and initialized.
1522  */
1523  if (!timer_irqs_are_valid(vcpu)) {
1524  kvm_debug("incorrectly configured timer irqs\n");
1525  return -EINVAL;
1526  }
1527 
1528  get_timer_map(vcpu, &map);
1529 
1530  ret = kvm_vgic_map_phys_irq(vcpu,
1531  map.direct_vtimer->host_timer_irq,
1532  timer_irq(map.direct_vtimer),
1534  if (ret)
1535  return ret;
1536 
1537  if (map.direct_ptimer) {
1538  ret = kvm_vgic_map_phys_irq(vcpu,
1539  map.direct_ptimer->host_timer_irq,
1540  timer_irq(map.direct_ptimer),
1542  }
1543 
1544  if (ret)
1545  return ret;
1546 
1547 no_vgic:
1548  timer->enabled = 1;
1549  return 0;
1550 }
static struct irq_ops arch_timer_irq_ops
Definition: arch_timer.c:54
static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:1452
#define vcpu_timer(v)
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, u32 vintid, struct irq_ops *ops)
Definition: vgic.c:514
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_timer_hyp_init()

int __init kvm_timer_hyp_init ( bool  has_gic)

Definition at line 1367 of file arch_timer.c.

1368 {
1369  struct arch_timer_kvm_info *info;
1370  int err;
1371 
1372  info = arch_timer_get_kvm_info();
1373  timecounter = &info->timecounter;
1374 
1375  if (!timecounter->cc) {
1376  kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1377  return -ENODEV;
1378  }
1379 
1380  err = kvm_irq_init(info);
1381  if (err)
1382  return err;
1383 
1384  /* First, do the virtual EL1 timer irq */
1385 
1386  err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
1387  "kvm guest vtimer", kvm_get_running_vcpus());
1388  if (err) {
1389  kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1390  host_vtimer_irq, err);
1391  return err;
1392  }
1393 
1394  if (has_gic) {
1395  err = irq_set_vcpu_affinity(host_vtimer_irq,
1397  if (err) {
1398  kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1399  goto out_free_vtimer_irq;
1400  }
1401 
1402  static_branch_enable(&has_gic_active_state);
1403  }
1404 
1405  kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1406 
1407  /* Now let's do the physical EL1 timer irq */
1408 
1409  if (info->physical_irq > 0) {
1410  err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1411  "kvm guest ptimer", kvm_get_running_vcpus());
1412  if (err) {
1413  kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1414  host_ptimer_irq, err);
1415  goto out_free_vtimer_irq;
1416  }
1417 
1418  if (has_gic) {
1419  err = irq_set_vcpu_affinity(host_ptimer_irq,
1421  if (err) {
1422  kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1423  goto out_free_ptimer_irq;
1424  }
1425  }
1426 
1427  kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1428  } else if (has_vhe()) {
1429  kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1430  info->physical_irq);
1431  err = -ENODEV;
1432  goto out_free_vtimer_irq;
1433  }
1434 
1435  return 0;
1436 
1437 out_free_ptimer_irq:
1438  if (info->physical_irq > 0)
1439  free_percpu_irq(host_ptimer_irq, kvm_get_running_vcpus());
1440 out_free_vtimer_irq:
1441  free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
1442  return err;
1443 }
static int kvm_irq_init(struct arch_timer_kvm_info *info)
Definition: arch_timer.c:1319
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
Definition: arch_timer.c:224
struct kvm_vcpu *__percpu * kvm_get_running_vcpus(void)
Definition: kvm_main.c:6353
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_timer_init_vhe()

void kvm_timer_init_vhe ( void  )

Definition at line 1553 of file arch_timer.c.

1554 {
1555  if (cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF))
1556  sysreg_clear_set(cnthctl_el2, 0, CNTHCTL_ECV);
1557 }
Here is the caller graph for this function:

◆ kvm_timer_init_vm()

void kvm_timer_init_vm ( struct kvm *  kvm)

Definition at line 1028 of file arch_timer.c.

1029 {
1030  for (int i = 0; i < NR_KVM_TIMERS; i++)
1031  kvm->arch.timer_data.ppi[i] = default_ppi[i];
1032 }
static const u8 default_ppi[]
Definition: arch_timer.c:34
Here is the caller graph for this function:

◆ kvm_timer_should_notify_user()

bool kvm_timer_should_notify_user ( struct kvm_vcpu *  vcpu)

Definition at line 860 of file arch_timer.c.

861 {
862  struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
863  struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
864  struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
865  bool vlevel, plevel;
866 
867  if (likely(irqchip_in_kernel(vcpu->kvm)))
868  return false;
869 
870  vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
871  plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
872 
873  return kvm_timer_should_fire(vtimer) != vlevel ||
874  kvm_timer_should_fire(ptimer) != plevel;
875 }
static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
Definition: arch_timer.c:380
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_timer_sync_user()

void kvm_timer_sync_user ( struct kvm_vcpu *  vcpu)

Definition at line 927 of file arch_timer.c.

928 {
929  struct arch_timer_cpu *timer = vcpu_timer(vcpu);
930 
931  if (unlikely(!timer->enabled))
932  return;
933 
934  if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
936 }
static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:914
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_timer_update_run()

void kvm_timer_update_run ( struct kvm_vcpu *  vcpu)

Definition at line 430 of file arch_timer.c.

431 {
432  struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
433  struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
434  struct kvm_sync_regs *regs = &vcpu->run->s.regs;
435 
436  /* Populate the device bitmap with the timer states */
437  regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
438  KVM_ARM_DEV_EL1_PTIMER);
439  if (kvm_timer_should_fire(vtimer))
440  regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
441  if (kvm_timer_should_fire(ptimer))
442  regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
443 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_timer_vcpu_init()

void kvm_timer_vcpu_init ( struct kvm_vcpu *  vcpu)

Definition at line 1011 of file arch_timer.c.

1012 {
1013  struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1014 
1015  for (int i = 0; i < NR_KVM_TIMERS; i++)
1016  timer_context_init(vcpu, i);
1017 
1018  /* Synchronize offsets across timers of a VM if not already provided */
1019  if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) {
1021  timer_set_offset(vcpu_ptimer(vcpu), 0);
1022  }
1023 
1024  hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1025  timer->bg_timer.function = kvm_bg_timer_expire;
1026 }
static void timer_context_init(struct kvm_vcpu *vcpu, int timerid)
Definition: arch_timer.c:984
static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
Definition: arch_timer.c:330
struct hrtimer bg_timer
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_timer_vcpu_load()

void kvm_timer_vcpu_load ( struct kvm_vcpu *  vcpu)

Definition at line 826 of file arch_timer.c.

827 {
828  struct arch_timer_cpu *timer = vcpu_timer(vcpu);
829  struct timer_map map;
830 
831  if (unlikely(!timer->enabled))
832  return;
833 
834  get_timer_map(vcpu, &map);
835 
836  if (static_branch_likely(&has_gic_active_state)) {
837  if (vcpu_has_nv(vcpu))
839 
840  kvm_timer_vcpu_load_gic(map.direct_vtimer);
841  if (map.direct_ptimer)
842  kvm_timer_vcpu_load_gic(map.direct_ptimer);
843  } else {
845  }
846 
847  kvm_timer_unblocking(vcpu);
848 
849  timer_restore_state(map.direct_vtimer);
850  if (map.direct_ptimer)
851  timer_restore_state(map.direct_ptimer);
852  if (map.emul_vtimer)
853  timer_emulate(map.emul_vtimer);
854  if (map.emul_ptimer)
855  timer_emulate(map.emul_ptimer);
856 
857  timer_set_traps(vcpu, &map);
858 }
static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
Definition: arch_timer.c:655
static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:676
static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu, struct timer_map *map)
Definition: arch_timer.c:713
static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:596
static void timer_set_traps(struct kvm_vcpu *vcpu, struct timer_map *map)
Definition: arch_timer.c:764
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_timer_vcpu_put()

void kvm_timer_vcpu_put ( struct kvm_vcpu *  vcpu)

Definition at line 877 of file arch_timer.c.

878 {
879  struct arch_timer_cpu *timer = vcpu_timer(vcpu);
880  struct timer_map map;
881 
882  if (unlikely(!timer->enabled))
883  return;
884 
885  get_timer_map(vcpu, &map);
886 
887  timer_save_state(map.direct_vtimer);
888  if (map.direct_ptimer)
889  timer_save_state(map.direct_ptimer);
890 
891  /*
892  * Cancel soft timer emulation, because the only case where we
893  * need it after a vcpu_put is in the context of a sleeping VCPU, and
894  * in that case we already factor in the deadline for the physical
895  * timer when scheduling the bg_timer.
896  *
897  * In any case, we re-schedule the hrtimer for the physical timer when
898  * coming back to the VCPU thread in kvm_timer_vcpu_load().
899  */
900  if (map.emul_vtimer)
901  soft_timer_cancel(&map.emul_vtimer->hrtimer);
902  if (map.emul_ptimer)
903  soft_timer_cancel(&map.emul_ptimer->hrtimer);
904 
905  if (kvm_vcpu_is_blocking(vcpu))
906  kvm_timer_blocking(vcpu);
907 }
static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:571
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_timer_vcpu_reset()

void kvm_timer_vcpu_reset ( struct kvm_vcpu *  vcpu)

Definition at line 938 of file arch_timer.c.

939 {
940  struct arch_timer_cpu *timer = vcpu_timer(vcpu);
941  struct timer_map map;
942 
943  get_timer_map(vcpu, &map);
944 
945  /*
946  * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
947  * and to 0 for ARMv7. We provide an implementation that always
948  * resets the timer to be disabled and unmasked and is compliant with
949  * the ARMv7 architecture.
950  */
951  for (int i = 0; i < nr_timers(vcpu); i++)
952  timer_set_ctl(vcpu_get_timer(vcpu, i), 0);
953 
954  /*
955  * A vcpu running at EL2 is in charge of the offset applied to
956  * the virtual timer, so use the physical VM offset, and point
957  * the vcpu offset to CNTVOFF_EL2.
958  */
959  if (vcpu_has_nv(vcpu)) {
960  struct arch_timer_offset *offs = &vcpu_vtimer(vcpu)->offset;
961 
962  offs->vcpu_offset = &__vcpu_sys_reg(vcpu, CNTVOFF_EL2);
963  offs->vm_offset = &vcpu->kvm->arch.timer_data.poffset;
964  }
965 
966  if (timer->enabled) {
967  for (int i = 0; i < nr_timers(vcpu); i++)
968  kvm_timer_update_irq(vcpu, false,
969  vcpu_get_timer(vcpu, i));
970 
971  if (irqchip_in_kernel(vcpu->kvm)) {
972  kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_vtimer));
973  if (map.direct_ptimer)
974  kvm_vgic_reset_mapped_irq(vcpu, timer_irq(map.direct_ptimer));
975  }
976  }
977 
978  if (map.emul_vtimer)
979  soft_timer_cancel(&map.emul_vtimer->hrtimer);
980  if (map.emul_ptimer)
981  soft_timer_cancel(&map.emul_ptimer->hrtimer);
982 }
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
Definition: arch_timer.c:119
static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, struct arch_timer_context *timer_ctx)
Definition: arch_timer.c:445
static int nr_timers(struct kvm_vcpu *vcpu)
Definition: arch_timer.c:58
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
Definition: vgic.c:540
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_timer_vcpu_terminate()

void kvm_timer_vcpu_terminate ( struct kvm_vcpu *  vcpu)

Definition at line 1445 of file arch_timer.c.

1446 {
1447  struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1448 
1449  soft_timer_cancel(&timer->bg_timer);
1450 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ timer_get_ctl()

u32 timer_get_ctl ( struct arch_timer_context ctxt)

Definition at line 66 of file arch_timer.c.

67 {
68  struct kvm_vcpu *vcpu = ctxt->vcpu;
69 
70  switch(arch_timer_ctx_index(ctxt)) {
71  case TIMER_VTIMER:
72  return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
73  case TIMER_PTIMER:
74  return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
75  case TIMER_HVTIMER:
76  return __vcpu_sys_reg(vcpu, CNTHV_CTL_EL2);
77  case TIMER_HPTIMER:
78  return __vcpu_sys_reg(vcpu, CNTHP_CTL_EL2);
79  default:
80  WARN_ON(1);
81  return 0;
82  }
83 }
#define arch_timer_ctx_index(ctx)
Here is the caller graph for this function:

◆ timer_get_cval()

u64 timer_get_cval ( struct arch_timer_context ctxt)

Definition at line 85 of file arch_timer.c.

86 {
87  struct kvm_vcpu *vcpu = ctxt->vcpu;
88 
89  switch(arch_timer_ctx_index(ctxt)) {
90  case TIMER_VTIMER:
91  return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
92  case TIMER_PTIMER:
93  return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
94  case TIMER_HVTIMER:
95  return __vcpu_sys_reg(vcpu, CNTHV_CVAL_EL2);
96  case TIMER_HPTIMER:
97  return __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
98  default:
99  WARN_ON(1);
100  return 0;
101  }
102 }
Here is the caller graph for this function: