KVM
Classes | Macros | Functions | Variables
xen.c File Reference
#include "x86.h"
#include "xen.h"
#include "hyperv.h"
#include "lapic.h"
#include <linux/eventfd.h>
#include <linux/kvm_host.h>
#include <linux/sched/stat.h>
#include <trace/events/kvm.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/version.h>
#include <xen/interface/event_channel.h>
#include <xen/interface/sched.h>
#include <asm/xen/cpuid.h>
#include "cpuid.h"
#include "trace.h"
Include dependency graph for xen.c:

Go to the source code of this file.

Classes

struct  compat_vcpu_set_singleshot_timer
 
struct  evtchnfd
 

Macros

#define pr_fmt(fmt)   KBUILD_MODNAME ": " fmt
 

Functions

static int kvm_xen_set_evtchn (struct kvm_xen_evtchn *xe, struct kvm *kvm)
 
static int kvm_xen_setattr_evtchn (struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
static bool kvm_xen_hcall_evtchn_send (struct kvm_vcpu *vcpu, u64 param, u64 *r)
 
 DEFINE_STATIC_KEY_DEFERRED_FALSE (kvm_xen_enabled, HZ)
 
static int kvm_xen_shared_info_init (struct kvm *kvm, gfn_t gfn)
 
void kvm_xen_inject_timer_irqs (struct kvm_vcpu *vcpu)
 
static enum hrtimer_restart xen_timer_callback (struct hrtimer *timer)
 
static void kvm_xen_start_timer (struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
 
static void kvm_xen_stop_timer (struct kvm_vcpu *vcpu)
 
static void kvm_xen_init_timer (struct kvm_vcpu *vcpu)
 
static void kvm_xen_update_runstate_guest (struct kvm_vcpu *v, bool atomic)
 
void kvm_xen_update_runstate (struct kvm_vcpu *v, int state)
 
void kvm_xen_inject_vcpu_vector (struct kvm_vcpu *v)
 
void kvm_xen_inject_pending_events (struct kvm_vcpu *v)
 
int __kvm_xen_has_interrupt (struct kvm_vcpu *v)
 
int kvm_xen_hvm_set_attr (struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
int kvm_xen_hvm_get_attr (struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
int kvm_xen_vcpu_set_attr (struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 
int kvm_xen_vcpu_get_attr (struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 
int kvm_xen_write_hypercall_page (struct kvm_vcpu *vcpu, u64 data)
 
int kvm_xen_hvm_config (struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
 
static int kvm_xen_hypercall_set_result (struct kvm_vcpu *vcpu, u64 result)
 
static int kvm_xen_hypercall_complete_userspace (struct kvm_vcpu *vcpu)
 
static int max_evtchn_port (struct kvm *kvm)
 
static bool wait_pending_event (struct kvm_vcpu *vcpu, int nr_ports, evtchn_port_t *ports)
 
static bool kvm_xen_schedop_poll (struct kvm_vcpu *vcpu, bool longmode, u64 param, u64 *r)
 
static void cancel_evtchn_poll (struct timer_list *t)
 
static bool kvm_xen_hcall_sched_op (struct kvm_vcpu *vcpu, bool longmode, int cmd, u64 param, u64 *r)
 
struct compat_vcpu_set_singleshot_timer __attribute__ ((packed))
 
static bool kvm_xen_hcall_vcpu_op (struct kvm_vcpu *vcpu, bool longmode, int cmd, int vcpu_id, u64 param, u64 *r)
 
static bool kvm_xen_hcall_set_timer_op (struct kvm_vcpu *vcpu, uint64_t timeout, u64 *r)
 
int kvm_xen_hypercall (struct kvm_vcpu *vcpu)
 
static void kvm_xen_check_poller (struct kvm_vcpu *vcpu, int port)
 
int kvm_xen_set_evtchn_fast (struct kvm_xen_evtchn *xe, struct kvm *kvm)
 
static int evtchn_set_fn (struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status)
 
int kvm_xen_setup_evtchn (struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue)
 
int kvm_xen_hvm_evtchn_send (struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
 
static int kvm_xen_eventfd_update (struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
static int kvm_xen_eventfd_assign (struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 
static int kvm_xen_eventfd_deassign (struct kvm *kvm, u32 port)
 
static int kvm_xen_eventfd_reset (struct kvm *kvm)
 
void kvm_xen_init_vcpu (struct kvm_vcpu *vcpu)
 
void kvm_xen_destroy_vcpu (struct kvm_vcpu *vcpu)
 
void kvm_xen_update_tsc_info (struct kvm_vcpu *vcpu)
 
void kvm_xen_init_vm (struct kvm *kvm)
 
void kvm_xen_destroy_vm (struct kvm *kvm)
 

Variables

uint64_t timeout_abs_ns
 
uint32_t flags
 
struct evtchnfd __attribute__
 

Macro Definition Documentation

◆ pr_fmt

#define pr_fmt (   fmt)    KBUILD_MODNAME ": " fmt

Definition at line 8 of file xen.c.

Function Documentation

◆ __attribute__()

struct compat_vcpu_set_singleshot_timer __attribute__ ( (packed)  )
Here is the call graph for this function:

◆ __kvm_xen_has_interrupt()

int __kvm_xen_has_interrupt ( struct kvm_vcpu *  v)

Definition at line 577 of file xen.c.

578 {
579  struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
580  unsigned long flags;
581  u8 rc = 0;
582 
583  /*
584  * If the global upcall vector (HVMIRQ_callback_vector) is set and
585  * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
586  */
587 
588  /* No need for compat handling here */
589  BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
590  offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
591  BUILD_BUG_ON(sizeof(rc) !=
592  sizeof_field(struct vcpu_info, evtchn_upcall_pending));
593  BUILD_BUG_ON(sizeof(rc) !=
594  sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
595 
596  read_lock_irqsave(&gpc->lock, flags);
597  while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
598  read_unlock_irqrestore(&gpc->lock, flags);
599 
600  /*
601  * This function gets called from kvm_vcpu_block() after setting the
602  * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
603  * from a HLT. So we really mustn't sleep. If the page ended up absent
604  * at that point, just return 1 in order to trigger an immediate wake,
605  * and we'll end up getting called again from a context where we *can*
606  * fault in the page and wait for it.
607  */
608  if (in_atomic() || !task_is_running(current))
609  return 1;
610 
611  if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) {
612  /*
613  * If this failed, userspace has screwed up the
614  * vcpu_info mapping. No interrupts for you.
615  */
616  return 0;
617  }
618  read_lock_irqsave(&gpc->lock, flags);
619  }
620 
621  rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
622  read_unlock_irqrestore(&gpc->lock, flags);
623  return rc;
624 }
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len)
Definition: pfncache.c:79
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
Definition: pfncache.c:334
uint32_t flags
Definition: xen.c:1
Here is the call graph for this function:

◆ cancel_evtchn_poll()

static void cancel_evtchn_poll ( struct timer_list *  t)
static

Definition at line 1362 of file xen.c.

1363 {
1364  struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer);
1365 
1366  kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1367  kvm_vcpu_kick(vcpu);
1368 }
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3931
Here is the call graph for this function:
Here is the caller graph for this function:

◆ DEFINE_STATIC_KEY_DEFERRED_FALSE()

DEFINE_STATIC_KEY_DEFERRED_FALSE ( kvm_xen_enabled  ,
HZ   
)

◆ evtchn_set_fn()

static int evtchn_set_fn ( struct kvm_kernel_irq_routing_entry *  e,
struct kvm *  kvm,
int  irq_source_id,
int  level,
bool  line_status 
)
static

Definition at line 1772 of file xen.c.

1774 {
1775  if (!level)
1776  return -EINVAL;
1777 
1778  return kvm_xen_set_evtchn(&e->xen_evtchn, kvm);
1779 }
static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
Definition: xen.c:1713
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_check_poller()

static void kvm_xen_check_poller ( struct kvm_vcpu *  vcpu,
int  port 
)
static

Definition at line 1583 of file xen.c.

1584 {
1585  int poll_evtchn = vcpu->arch.xen.poll_evtchn;
1586 
1587  if ((poll_evtchn == port || poll_evtchn == -1) &&
1588  test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) {
1589  kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1590  kvm_vcpu_kick(vcpu);
1591  }
1592 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_destroy_vcpu()

void kvm_xen_destroy_vcpu ( struct kvm_vcpu *  vcpu)

Definition at line 2122 of file xen.c.

2123 {
2124  if (kvm_xen_timer_enabled(vcpu))
2125  kvm_xen_stop_timer(vcpu);
2126 
2127  kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
2128  kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
2129  kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
2130  kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
2131 
2132  del_timer_sync(&vcpu->arch.xen.poll_timer);
2133 }
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
Definition: pfncache.c:382
static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
Definition: xen.c:184
static bool kvm_xen_timer_enabled(struct kvm_vcpu *vcpu)
Definition: xen.h:155
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_destroy_vm()

void kvm_xen_destroy_vm ( struct kvm *  kvm)

Definition at line 2165 of file xen.c.

2166 {
2167  struct evtchnfd *evtchnfd;
2168  int i;
2169 
2170  kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
2171 
2172  idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
2173  if (!evtchnfd->deliver.port.port)
2174  eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
2175  kfree(evtchnfd);
2176  }
2177  idr_destroy(&kvm->arch.xen.evtchn_ports);
2178 
2179  if (kvm->arch.xen_hvm_config.msr)
2180  static_branch_slow_dec_deferred(&kvm_xen_enabled);
2181 }
Definition: xen.c:1856
struct evtchnfd::@47::@48 eventfd
struct kvm_xen_evtchn port
Definition: xen.c:1860
union evtchnfd::@47 deliver
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_eventfd_assign()

static int kvm_xen_eventfd_assign ( struct kvm *  kvm,
struct kvm_xen_hvm_attr *  data 
)
static

Definition at line 1918 of file xen.c.

1920 {
1921  u32 port = data->u.evtchn.send_port;
1922  struct eventfd_ctx *eventfd = NULL;
1923  struct evtchnfd *evtchnfd;
1924  int ret = -EINVAL;
1925 
1926  evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
1927  if (!evtchnfd)
1928  return -ENOMEM;
1929 
1930  switch(data->u.evtchn.type) {
1931  case EVTCHNSTAT_ipi:
1932  /* IPI must map back to the same port# */
1933  if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
1934  goto out_noeventfd; /* -EINVAL */
1935  break;
1936 
1937  case EVTCHNSTAT_interdomain:
1938  if (data->u.evtchn.deliver.port.port) {
1939  if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
1940  goto out_noeventfd; /* -EINVAL */
1941  } else {
1942  eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
1943  if (IS_ERR(eventfd)) {
1944  ret = PTR_ERR(eventfd);
1945  goto out_noeventfd;
1946  }
1947  }
1948  break;
1949 
1950  case EVTCHNSTAT_virq:
1951  case EVTCHNSTAT_closed:
1952  case EVTCHNSTAT_unbound:
1953  case EVTCHNSTAT_pirq:
1954  default: /* Unknown event channel type */
1955  goto out; /* -EINVAL */
1956  }
1957 
1958  evtchnfd->send_port = data->u.evtchn.send_port;
1959  evtchnfd->type = data->u.evtchn.type;
1960  if (eventfd) {
1962  } else {
1963  /* We only support 2 level event channels for now */
1964  if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1965  goto out; /* -EINVAL; */
1966 
1967  evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port;
1968  evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1969  evtchnfd->deliver.port.vcpu_idx = -1;
1970  evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1971  }
1972 
1973  mutex_lock(&kvm->arch.xen.xen_lock);
1974  ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
1975  GFP_KERNEL);
1976  mutex_unlock(&kvm->arch.xen.xen_lock);
1977  if (ret >= 0)
1978  return 0;
1979 
1980  if (ret == -ENOSPC)
1981  ret = -EEXIST;
1982 out:
1983  if (eventfd)
1984  eventfd_ctx_put(eventfd);
1985 out_noeventfd:
1986  kfree(evtchnfd);
1987  return ret;
1988 }
u32 send_port
Definition: xen.c:1857
u32 type
Definition: xen.c:1858
static int max_evtchn_port(struct kvm *kvm)
Definition: xen.c:1215
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_eventfd_deassign()

static int kvm_xen_eventfd_deassign ( struct kvm *  kvm,
u32  port 
)
static

Definition at line 1990 of file xen.c.

1991 {
1992  struct evtchnfd *evtchnfd;
1993 
1994  mutex_lock(&kvm->arch.xen.xen_lock);
1995  evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
1996  mutex_unlock(&kvm->arch.xen.xen_lock);
1997 
1998  if (!evtchnfd)
1999  return -ENOENT;
2000 
2001  synchronize_srcu(&kvm->srcu);
2002  if (!evtchnfd->deliver.port.port)
2003  eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
2004  kfree(evtchnfd);
2005  return 0;
2006 }
Here is the caller graph for this function:

◆ kvm_xen_eventfd_reset()

static int kvm_xen_eventfd_reset ( struct kvm *  kvm)
static

Definition at line 2008 of file xen.c.

2009 {
2010  struct evtchnfd *evtchnfd, **all_evtchnfds;
2011  int i;
2012  int n = 0;
2013 
2014  mutex_lock(&kvm->arch.xen.xen_lock);
2015 
2016  /*
2017  * Because synchronize_srcu() cannot be called inside the
2018  * critical section, first collect all the evtchnfd objects
2019  * in an array as they are removed from evtchn_ports.
2020  */
2021  idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
2022  n++;
2023 
2024  all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
2025  if (!all_evtchnfds) {
2026  mutex_unlock(&kvm->arch.xen.xen_lock);
2027  return -ENOMEM;
2028  }
2029 
2030  n = 0;
2031  idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
2032  all_evtchnfds[n++] = evtchnfd;
2033  idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
2034  }
2035  mutex_unlock(&kvm->arch.xen.xen_lock);
2036 
2037  synchronize_srcu(&kvm->srcu);
2038 
2039  while (n--) {
2040  evtchnfd = all_evtchnfds[n];
2041  if (!evtchnfd->deliver.port.port)
2042  eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
2043  kfree(evtchnfd);
2044  }
2045  kfree(all_evtchnfds);
2046 
2047  return 0;
2048 }
Here is the caller graph for this function:

◆ kvm_xen_eventfd_update()

static int kvm_xen_eventfd_update ( struct kvm *  kvm,
struct kvm_xen_hvm_attr *  data 
)
static

Definition at line 1871 of file xen.c.

1873 {
1874  u32 port = data->u.evtchn.send_port;
1875  struct evtchnfd *evtchnfd;
1876  int ret;
1877 
1878  /* Protect writes to evtchnfd as well as the idr lookup. */
1879  mutex_lock(&kvm->arch.xen.xen_lock);
1880  evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
1881 
1882  ret = -ENOENT;
1883  if (!evtchnfd)
1884  goto out_unlock;
1885 
1886  /* For an UPDATE, nothing may change except the priority/vcpu */
1887  ret = -EINVAL;
1888  if (evtchnfd->type != data->u.evtchn.type)
1889  goto out_unlock;
1890 
1891  /*
1892  * Port cannot change, and if it's zero that was an eventfd
1893  * which can't be changed either.
1894  */
1895  if (!evtchnfd->deliver.port.port ||
1896  evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
1897  goto out_unlock;
1898 
1899  /* We only support 2 level event channels for now */
1900  if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1901  goto out_unlock;
1902 
1903  evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
1904  if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
1905  evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
1906  evtchnfd->deliver.port.vcpu_idx = -1;
1907  }
1908  ret = 0;
1909 out_unlock:
1910  mutex_unlock(&kvm->arch.xen.xen_lock);
1911  return ret;
1912 }
Here is the caller graph for this function:

◆ kvm_xen_hcall_evtchn_send()

static bool kvm_xen_hcall_evtchn_send ( struct kvm_vcpu *  vcpu,
u64  param,
u64 *  r 
)
static

Definition at line 2070 of file xen.c.

2071 {
2072  struct evtchnfd *evtchnfd;
2073  struct evtchn_send send;
2074  struct x86_exception e;
2075 
2076  /* Sanity check: this structure is the same for 32-bit and 64-bit */
2077  BUILD_BUG_ON(sizeof(send) != 4);
2078  if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) {
2079  *r = -EFAULT;
2080  return true;
2081  }
2082 
2083  /*
2084  * evtchnfd is protected by kvm->srcu; the idr lookup instead
2085  * is protected by RCU.
2086  */
2087  rcu_read_lock();
2088  evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
2089  rcu_read_unlock();
2090  if (!evtchnfd)
2091  return false;
2092 
2093  if (evtchnfd->deliver.port.port) {
2094  int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
2095  if (ret < 0 && ret != -ENOTCONN)
2096  return false;
2097  } else {
2098  eventfd_signal(evtchnfd->deliver.eventfd.ctx);
2099  }
2100 
2101  *r = 0;
2102  return true;
2103 }
int kvm_read_guest_virt(struct kvm_vcpu *vcpu, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception)
Definition: x86.c:7572
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_hcall_sched_op()

static bool kvm_xen_hcall_sched_op ( struct kvm_vcpu *  vcpu,
bool  longmode,
int  cmd,
u64  param,
u64 *  r 
)
static

Definition at line 1370 of file xen.c.

1372 {
1373  switch (cmd) {
1374  case SCHEDOP_poll:
1375  if (kvm_xen_schedop_poll(vcpu, longmode, param, r))
1376  return true;
1377  fallthrough;
1378  case SCHEDOP_yield:
1379  kvm_vcpu_on_spin(vcpu, true);
1380  *r = 0;
1381  return true;
1382  default:
1383  break;
1384  }
1385 
1386  return false;
1387 }
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
Definition: kvm_main.c:4056
static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode, u64 param, u64 *r)
Definition: xen.c:1261
Here is the caller graph for this function:

◆ kvm_xen_hcall_set_timer_op()

static bool kvm_xen_hcall_set_timer_op ( struct kvm_vcpu *  vcpu,
uint64_t  timeout,
u64 *  r 
)
static

Definition at line 1452 of file xen.c.

1454 {
1455  if (!kvm_xen_timer_enabled(vcpu))
1456  return false;
1457 
1458  if (timeout) {
1459  uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
1460  int64_t delta = timeout - guest_now;
1461 
1462  /* Xen has a 'Linux workaround' in do_set_timer_op() which
1463  * checks for negative absolute timeout values (caused by
1464  * integer overflow), and for values about 13 days in the
1465  * future (2^50ns) which would be caused by jiffies
1466  * overflow. For those cases, it sets the timeout 100ms in
1467  * the future (not *too* soon, since if a guest really did
1468  * set a long timeout on purpose we don't want to keep
1469  * churning CPU time by waking it up).
1470  */
1471  if (unlikely((int64_t)timeout < 0 ||
1472  (delta > 0 && (uint32_t) (delta >> 50) != 0))) {
1473  delta = 100 * NSEC_PER_MSEC;
1474  timeout = guest_now + delta;
1475  }
1476 
1477  kvm_xen_start_timer(vcpu, timeout, delta);
1478  } else {
1479  kvm_xen_stop_timer(vcpu);
1480  }
1481 
1482  *r = 0;
1483  return true;
1484 }
u64 get_kvmclock_ns(struct kvm *kvm)
Definition: x86.c:3105
static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
Definition: xen.c:161
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_hcall_vcpu_op()

static bool kvm_xen_hcall_vcpu_op ( struct kvm_vcpu *  vcpu,
bool  longmode,
int  cmd,
int  vcpu_id,
u64  param,
u64 *  r 
)
static

Definition at line 1394 of file xen.c.

1396 {
1397  struct vcpu_set_singleshot_timer oneshot;
1398  struct x86_exception e;
1399  s64 delta;
1400 
1401  if (!kvm_xen_timer_enabled(vcpu))
1402  return false;
1403 
1404  switch (cmd) {
1405  case VCPUOP_set_singleshot_timer:
1406  if (vcpu->arch.xen.vcpu_id != vcpu_id) {
1407  *r = -EINVAL;
1408  return true;
1409  }
1410 
1411  /*
1412  * The only difference for 32-bit compat is the 4 bytes of
1413  * padding after the interesting part of the structure. So
1414  * for a faithful emulation of Xen we have to *try* to copy
1415  * the padding and return -EFAULT if we can't. Otherwise we
1416  * might as well just have copied the 12-byte 32-bit struct.
1417  */
1418  BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
1419  offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns));
1420  BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
1421  sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns));
1422  BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) !=
1423  offsetof(struct vcpu_set_singleshot_timer, flags));
1424  BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
1425  sizeof_field(struct vcpu_set_singleshot_timer, flags));
1426 
1427  if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) :
1428  sizeof(struct compat_vcpu_set_singleshot_timer), &e)) {
1429  *r = -EFAULT;
1430  return true;
1431  }
1432 
1433  /* A delta <= 0 results in an immediate callback, which is what we want */
1434  delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm);
1435  kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta);
1436  *r = 0;
1437  return true;
1438 
1439  case VCPUOP_stop_singleshot_timer:
1440  if (vcpu->arch.xen.vcpu_id != vcpu_id) {
1441  *r = -EINVAL;
1442  return true;
1443  }
1444  kvm_xen_stop_timer(vcpu);
1445  *r = 0;
1446  return true;
1447  }
1448 
1449  return false;
1450 }
uint64_t timeout_abs_ns
Definition: xen.c:0
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_hvm_config()

int kvm_xen_hvm_config ( struct kvm *  kvm,
struct kvm_xen_hvm_config *  xhc 
)

Definition at line 1161 of file xen.c.

1162 {
1163  /* Only some feature flags need to be *enabled* by userspace */
1164  u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
1165  KVM_XEN_HVM_CONFIG_EVTCHN_SEND |
1166  KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE;
1167  u32 old_flags;
1168 
1169  if (xhc->flags & ~permitted_flags)
1170  return -EINVAL;
1171 
1172  /*
1173  * With hypercall interception the kernel generates its own
1174  * hypercall page so it must not be provided.
1175  */
1176  if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
1177  (xhc->blob_addr_32 || xhc->blob_addr_64 ||
1178  xhc->blob_size_32 || xhc->blob_size_64))
1179  return -EINVAL;
1180 
1181  mutex_lock(&kvm->arch.xen.xen_lock);
1182 
1183  if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
1184  static_branch_inc(&kvm_xen_enabled.key);
1185  else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
1186  static_branch_slow_dec_deferred(&kvm_xen_enabled);
1187 
1188  old_flags = kvm->arch.xen_hvm_config.flags;
1189  memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
1190 
1191  mutex_unlock(&kvm->arch.xen.xen_lock);
1192 
1193  if ((old_flags ^ xhc->flags) & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE)
1194  kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
1195 
1196  return 0;
1197 }
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
Definition: kvm_main.c:340
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_hvm_evtchn_send()

int kvm_xen_hvm_evtchn_send ( struct kvm *  kvm,
struct kvm_irq_routing_xen_evtchn *  uxe 
)

Definition at line 1824 of file xen.c.

1825 {
1826  struct kvm_xen_evtchn e;
1827  int ret;
1828 
1829  if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
1830  return -EINVAL;
1831 
1832  /* We only support 2 level event channels for now */
1833  if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1834  return -EINVAL;
1835 
1836  e.port = uxe->port;
1837  e.vcpu_id = uxe->vcpu;
1838  e.vcpu_idx = -1;
1839  e.priority = uxe->priority;
1840 
1841  ret = kvm_xen_set_evtchn(&e, kvm);
1842 
1843  /*
1844  * None of that 'return 1 if it actually got delivered' nonsense.
1845  * We don't care if it was masked (-ENOTCONN) either.
1846  */
1847  if (ret > 0 || ret == -ENOTCONN)
1848  ret = 0;
1849 
1850  return ret;
1851 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_hvm_get_attr()

int kvm_xen_hvm_get_attr ( struct kvm *  kvm,
struct kvm_xen_hvm_attr *  data 
)

Definition at line 689 of file xen.c.

690 {
691  int r = -ENOENT;
692 
693  mutex_lock(&kvm->arch.xen.xen_lock);
694 
695  switch (data->type) {
696  case KVM_XEN_ATTR_TYPE_LONG_MODE:
697  data->u.long_mode = kvm->arch.xen.long_mode;
698  r = 0;
699  break;
700 
701  case KVM_XEN_ATTR_TYPE_SHARED_INFO:
702  if (kvm->arch.xen.shinfo_cache.active)
703  data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
704  else
705  data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
706  r = 0;
707  break;
708 
709  case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
710  data->u.vector = kvm->arch.xen.upcall_vector;
711  r = 0;
712  break;
713 
714  case KVM_XEN_ATTR_TYPE_XEN_VERSION:
715  data->u.xen_version = kvm->arch.xen.xen_version;
716  r = 0;
717  break;
718 
719  case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
720  if (!sched_info_on()) {
721  r = -EOPNOTSUPP;
722  break;
723  }
724  data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag;
725  r = 0;
726  break;
727 
728  default:
729  break;
730  }
731 
732  mutex_unlock(&kvm->arch.xen.xen_lock);
733  return r;
734 }
Here is the caller graph for this function:

◆ kvm_xen_hvm_set_attr()

int kvm_xen_hvm_set_attr ( struct kvm *  kvm,
struct kvm_xen_hvm_attr *  data 
)

Definition at line 626 of file xen.c.

627 {
628  int r = -ENOENT;
629 
630 
631  switch (data->type) {
632  case KVM_XEN_ATTR_TYPE_LONG_MODE:
633  if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
634  r = -EINVAL;
635  } else {
636  mutex_lock(&kvm->arch.xen.xen_lock);
637  kvm->arch.xen.long_mode = !!data->u.long_mode;
638  mutex_unlock(&kvm->arch.xen.xen_lock);
639  r = 0;
640  }
641  break;
642 
643  case KVM_XEN_ATTR_TYPE_SHARED_INFO:
644  mutex_lock(&kvm->arch.xen.xen_lock);
645  r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
646  mutex_unlock(&kvm->arch.xen.xen_lock);
647  break;
648 
649  case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
650  if (data->u.vector && data->u.vector < 0x10)
651  r = -EINVAL;
652  else {
653  mutex_lock(&kvm->arch.xen.xen_lock);
654  kvm->arch.xen.upcall_vector = data->u.vector;
655  mutex_unlock(&kvm->arch.xen.xen_lock);
656  r = 0;
657  }
658  break;
659 
660  case KVM_XEN_ATTR_TYPE_EVTCHN:
661  r = kvm_xen_setattr_evtchn(kvm, data);
662  break;
663 
664  case KVM_XEN_ATTR_TYPE_XEN_VERSION:
665  mutex_lock(&kvm->arch.xen.xen_lock);
666  kvm->arch.xen.xen_version = data->u.xen_version;
667  mutex_unlock(&kvm->arch.xen.xen_lock);
668  r = 0;
669  break;
670 
671  case KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG:
672  if (!sched_info_on()) {
673  r = -EOPNOTSUPP;
674  break;
675  }
676  mutex_lock(&kvm->arch.xen.xen_lock);
677  kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
678  mutex_unlock(&kvm->arch.xen.xen_lock);
679  r = 0;
680  break;
681 
682  default:
683  break;
684  }
685 
686  return r;
687 }
static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
Definition: xen.c:2050
static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
Definition: xen.c:37
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_hypercall()

int kvm_xen_hypercall ( struct kvm_vcpu *  vcpu)

Definition at line 1486 of file xen.c.

1487 {
1488  bool longmode;
1489  u64 input, params[6], r = -ENOSYS;
1490  bool handled = false;
1491  u8 cpl;
1492 
1493  input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
1494 
1495  /* Hyper-V hypercalls get bit 31 set in EAX */
1496  if ((input & 0x80000000) &&
1498  return kvm_hv_hypercall(vcpu);
1499 
1500  longmode = is_64_bit_hypercall(vcpu);
1501  if (!longmode) {
1502  params[0] = (u32)kvm_rbx_read(vcpu);
1503  params[1] = (u32)kvm_rcx_read(vcpu);
1504  params[2] = (u32)kvm_rdx_read(vcpu);
1505  params[3] = (u32)kvm_rsi_read(vcpu);
1506  params[4] = (u32)kvm_rdi_read(vcpu);
1507  params[5] = (u32)kvm_rbp_read(vcpu);
1508  }
1509 #ifdef CONFIG_X86_64
1510  else {
1511  params[0] = (u64)kvm_rdi_read(vcpu);
1512  params[1] = (u64)kvm_rsi_read(vcpu);
1513  params[2] = (u64)kvm_rdx_read(vcpu);
1514  params[3] = (u64)kvm_r10_read(vcpu);
1515  params[4] = (u64)kvm_r8_read(vcpu);
1516  params[5] = (u64)kvm_r9_read(vcpu);
1517  }
1518 #endif
1519  cpl = static_call(kvm_x86_get_cpl)(vcpu);
1520  trace_kvm_xen_hypercall(cpl, input, params[0], params[1], params[2],
1521  params[3], params[4], params[5]);
1522 
1523  /*
1524  * Only allow hypercall acceleration for CPL0. The rare hypercalls that
1525  * are permitted in guest userspace can be handled by the VMM.
1526  */
1527  if (unlikely(cpl > 0))
1528  goto handle_in_userspace;
1529 
1530  switch (input) {
1531  case __HYPERVISOR_xen_version:
1532  if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) {
1533  r = vcpu->kvm->arch.xen.xen_version;
1534  handled = true;
1535  }
1536  break;
1537  case __HYPERVISOR_event_channel_op:
1538  if (params[0] == EVTCHNOP_send)
1539  handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r);
1540  break;
1541  case __HYPERVISOR_sched_op:
1542  handled = kvm_xen_hcall_sched_op(vcpu, longmode, params[0],
1543  params[1], &r);
1544  break;
1545  case __HYPERVISOR_vcpu_op:
1546  handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1],
1547  params[2], &r);
1548  break;
1549  case __HYPERVISOR_set_timer_op: {
1550  u64 timeout = params[0];
1551  /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */
1552  if (!longmode)
1553  timeout |= params[1] << 32;
1554  handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r);
1555  break;
1556  }
1557  default:
1558  break;
1559  }
1560 
1561  if (handled)
1562  return kvm_xen_hypercall_set_result(vcpu, r);
1563 
1564 handle_in_userspace:
1565  vcpu->run->exit_reason = KVM_EXIT_XEN;
1566  vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
1567  vcpu->run->xen.u.hcall.longmode = longmode;
1568  vcpu->run->xen.u.hcall.cpl = cpl;
1569  vcpu->run->xen.u.hcall.input = input;
1570  vcpu->run->xen.u.hcall.params[0] = params[0];
1571  vcpu->run->xen.u.hcall.params[1] = params[1];
1572  vcpu->run->xen.u.hcall.params[2] = params[2];
1573  vcpu->run->xen.u.hcall.params[3] = params[3];
1574  vcpu->run->xen.u.hcall.params[4] = params[4];
1575  vcpu->run->xen.u.hcall.params[5] = params[5];
1576  vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
1577  vcpu->arch.complete_userspace_io =
1579 
1580  return 0;
1581 }
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
Definition: hyperv.c:2519
static bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
Definition: hyperv.h:280
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
Definition: x86.c:13151
static unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
Definition: x86.h:273
static bool is_64_bit_hypercall(struct kvm_vcpu *vcpu)
Definition: x86.h:164
static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
Definition: xen.c:1205
static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
Definition: xen.c:2070
static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, int vcpu_id, u64 param, u64 *r)
Definition: xen.c:1394
static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout, u64 *r)
Definition: xen.c:1452
static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
Definition: xen.c:1199
static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode, int cmd, u64 param, u64 *r)
Definition: xen.c:1370
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_hypercall_complete_userspace()

static int kvm_xen_hypercall_complete_userspace ( struct kvm_vcpu *  vcpu)
static

Definition at line 1205 of file xen.c.

1206 {
1207  struct kvm_run *run = vcpu->run;
1208 
1209  if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
1210  return 1;
1211 
1212  return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
1213 }
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
Definition: x86.c:13164
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_hypercall_set_result()

static int kvm_xen_hypercall_set_result ( struct kvm_vcpu *  vcpu,
u64  result 
)
static

Definition at line 1199 of file xen.c.

1200 {
1201  kvm_rax_write(vcpu, result);
1202  return kvm_skip_emulated_instruction(vcpu);
1203 }
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
Definition: x86.c:8916
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_init_timer()

static void kvm_xen_init_timer ( struct kvm_vcpu *  vcpu)
static

Definition at line 191 of file xen.c.

192 {
193  hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC,
194  HRTIMER_MODE_ABS_HARD);
195  vcpu->arch.xen.timer.function = xen_timer_callback;
196 }
static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
Definition: xen.c:133
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_init_vcpu()

void kvm_xen_init_vcpu ( struct kvm_vcpu *  vcpu)

Definition at line 2105 of file xen.c.

2106 {
2107  vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx;
2108  vcpu->arch.xen.poll_evtchn = 0;
2109 
2110  timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
2111 
2112  kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
2113  KVM_HOST_USES_PFN);
2114  kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
2115  KVM_HOST_USES_PFN);
2116  kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
2117  KVM_HOST_USES_PFN);
2118  kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
2119  KVM_HOST_USES_PFN);
2120 }
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
Definition: pfncache.c:340
static void cancel_evtchn_poll(struct timer_list *t)
Definition: xen.c:1362
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_init_vm()

void kvm_xen_init_vm ( struct kvm *  kvm)

Definition at line 2158 of file xen.c.

2159 {
2160  mutex_init(&kvm->arch.xen.xen_lock);
2161  idr_init(&kvm->arch.xen.evtchn_ports);
2162  kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
2163 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_inject_pending_events()

void kvm_xen_inject_pending_events ( struct kvm_vcpu *  v)

Definition at line 519 of file xen.c.

520 {
521  unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
522  struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
523  unsigned long flags;
524 
525  if (!evtchn_pending_sel)
526  return;
527 
528  /*
529  * Yes, this is an open-coded loop. But that's just what put_user()
530  * does anyway. Page it in and retry the instruction. We're just a
531  * little more honest about it.
532  */
533  read_lock_irqsave(&gpc->lock, flags);
534  while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
535  read_unlock_irqrestore(&gpc->lock, flags);
536 
537  if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
538  return;
539 
540  read_lock_irqsave(&gpc->lock, flags);
541  }
542 
543  /* Now gpc->khva is a valid kernel address for the vcpu_info */
544  if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
545  struct vcpu_info *vi = gpc->khva;
546 
547  asm volatile(LOCK_PREFIX "orq %0, %1\n"
548  "notq %0\n"
549  LOCK_PREFIX "andq %0, %2\n"
550  : "=r" (evtchn_pending_sel),
551  "+m" (vi->evtchn_pending_sel),
552  "+m" (v->arch.xen.evtchn_pending_sel)
553  : "0" (evtchn_pending_sel));
554  WRITE_ONCE(vi->evtchn_upcall_pending, 1);
555  } else {
556  u32 evtchn_pending_sel32 = evtchn_pending_sel;
557  struct compat_vcpu_info *vi = gpc->khva;
558 
559  asm volatile(LOCK_PREFIX "orl %0, %1\n"
560  "notl %0\n"
561  LOCK_PREFIX "andl %0, %2\n"
562  : "=r" (evtchn_pending_sel32),
563  "+m" (vi->evtchn_pending_sel),
564  "+m" (v->arch.xen.evtchn_pending_sel)
565  : "0" (evtchn_pending_sel32));
566  WRITE_ONCE(vi->evtchn_upcall_pending, 1);
567  }
568  read_unlock_irqrestore(&gpc->lock, flags);
569 
570  /* For the per-vCPU lapic vector, deliver it as MSI. */
571  if (v->arch.xen.upcall_vector)
573 
574  mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
575 }
void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn)
Definition: kvm_main.c:3635
uint32_t evtchn_pending_sel
Definition: xen.h:203
uint8_t evtchn_upcall_pending
Definition: xen.h:200
void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
Definition: xen.c:496
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_inject_timer_irqs()

void kvm_xen_inject_timer_irqs ( struct kvm_vcpu *  vcpu)

Definition at line 116 of file xen.c.

117 {
118  if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) {
119  struct kvm_xen_evtchn e;
120 
121  e.vcpu_id = vcpu->vcpu_id;
122  e.vcpu_idx = vcpu->vcpu_idx;
123  e.port = vcpu->arch.xen.timer_virq;
124  e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
125 
126  kvm_xen_set_evtchn(&e, vcpu->kvm);
127 
128  vcpu->arch.xen.timer_expires = 0;
129  atomic_set(&vcpu->arch.xen.timer_pending, 0);
130  }
131 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_inject_vcpu_vector()

void kvm_xen_inject_vcpu_vector ( struct kvm_vcpu *  v)

Definition at line 496 of file xen.c.

497 {
498  struct kvm_lapic_irq irq = { };
499  int r;
500 
501  irq.dest_id = v->vcpu_id;
502  irq.vector = v->arch.xen.upcall_vector;
503  irq.dest_mode = APIC_DEST_PHYSICAL;
504  irq.shorthand = APIC_DEST_NOSHORT;
505  irq.delivery_mode = APIC_DM_FIXED;
506  irq.level = 1;
507 
508  /* The fast version will always work for physical unicast */
509  WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
510 }
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
Definition: lapic.c:1208
#define APIC_DEST_NOSHORT
Definition: lapic.h:16
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_schedop_poll()

static bool kvm_xen_schedop_poll ( struct kvm_vcpu *  vcpu,
bool  longmode,
u64  param,
u64 *  r 
)
static

Definition at line 1261 of file xen.c.

1263 {
1264  struct sched_poll sched_poll;
1265  evtchn_port_t port, *ports;
1266  struct x86_exception e;
1267  int i;
1268 
1269  if (!lapic_in_kernel(vcpu) ||
1270  !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
1271  return false;
1272 
1273  if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
1274  struct compat_sched_poll sp32;
1275 
1276  /* Sanity check that the compat struct definition is correct */
1277  BUILD_BUG_ON(sizeof(sp32) != 16);
1278 
1279  if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) {
1280  *r = -EFAULT;
1281  return true;
1282  }
1283 
1284  /*
1285  * This is a 32-bit pointer to an array of evtchn_port_t which
1286  * are uint32_t, so once it's converted no further compat
1287  * handling is needed.
1288  */
1289  sched_poll.ports = (void *)(unsigned long)(sp32.ports);
1290  sched_poll.nr_ports = sp32.nr_ports;
1291  sched_poll.timeout = sp32.timeout;
1292  } else {
1293  if (kvm_read_guest_virt(vcpu, param, &sched_poll,
1294  sizeof(sched_poll), &e)) {
1295  *r = -EFAULT;
1296  return true;
1297  }
1298  }
1299 
1300  if (unlikely(sched_poll.nr_ports > 1)) {
1301  /* Xen (unofficially) limits number of pollers to 128 */
1302  if (sched_poll.nr_ports > 128) {
1303  *r = -EINVAL;
1304  return true;
1305  }
1306 
1307  ports = kmalloc_array(sched_poll.nr_ports,
1308  sizeof(*ports), GFP_KERNEL);
1309  if (!ports) {
1310  *r = -ENOMEM;
1311  return true;
1312  }
1313  } else
1314  ports = &port;
1315 
1316  if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
1317  sched_poll.nr_ports * sizeof(*ports), &e)) {
1318  *r = -EFAULT;
1319  return true;
1320  }
1321 
1322  for (i = 0; i < sched_poll.nr_ports; i++) {
1323  if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
1324  *r = -EINVAL;
1325  goto out;
1326  }
1327  }
1328 
1329  if (sched_poll.nr_ports == 1)
1330  vcpu->arch.xen.poll_evtchn = port;
1331  else
1332  vcpu->arch.xen.poll_evtchn = -1;
1333 
1334  set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1335 
1336  if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) {
1337  vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
1338 
1339  if (sched_poll.timeout)
1340  mod_timer(&vcpu->arch.xen.poll_timer,
1341  jiffies + nsecs_to_jiffies(sched_poll.timeout));
1342 
1343  kvm_vcpu_halt(vcpu);
1344 
1345  if (sched_poll.timeout)
1346  del_timer(&vcpu->arch.xen.poll_timer);
1347 
1348  vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1349  }
1350 
1351  vcpu->arch.xen.poll_evtchn = 0;
1352  *r = 0;
1353 out:
1354  /* Really, this is only needed in case of timeout */
1355  clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1356 
1357  if (unlikely(sched_poll.nr_ports > 1))
1358  kfree(ports);
1359  return true;
1360 }
void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
Definition: kvm_main.c:3842
static bool lapic_in_kernel(struct kvm_vcpu *vcpu)
Definition: lapic.h:186
uint32_t ports
Definition: xen.h:237
static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports, evtchn_port_t *ports)
Definition: xen.c:1223
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_set_evtchn()

static int kvm_xen_set_evtchn ( struct kvm_xen_evtchn *  xe,
struct kvm *  kvm 
)
static

Definition at line 1713 of file xen.c.

1714 {
1715  bool mm_borrowed = false;
1716  int rc;
1717 
1718  rc = kvm_xen_set_evtchn_fast(xe, kvm);
1719  if (rc != -EWOULDBLOCK)
1720  return rc;
1721 
1722  if (current->mm != kvm->mm) {
1723  /*
1724  * If not on a thread which already belongs to this KVM,
1725  * we'd better be in the irqfd workqueue.
1726  */
1727  if (WARN_ON_ONCE(current->mm))
1728  return -EINVAL;
1729 
1730  kthread_use_mm(kvm->mm);
1731  mm_borrowed = true;
1732  }
1733 
1734  mutex_lock(&kvm->arch.xen.xen_lock);
1735 
1736  /*
1737  * It is theoretically possible for the page to be unmapped
1738  * and the MMU notifier to invalidate the shared_info before
1739  * we even get to use it. In that case, this looks like an
1740  * infinite loop. It was tempting to do it via the userspace
1741  * HVA instead... but that just *hides* the fact that it's
1742  * an infinite loop, because if a fault occurs and it waits
1743  * for the page to come back, it can *still* immediately
1744  * fault and have to wait again, repeatedly.
1745  *
1746  * Conversely, the page could also have been reinstated by
1747  * another thread before we even obtain the mutex above, so
1748  * check again *first* before remapping it.
1749  */
1750  do {
1751  struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1752  int idx;
1753 
1754  rc = kvm_xen_set_evtchn_fast(xe, kvm);
1755  if (rc != -EWOULDBLOCK)
1756  break;
1757 
1758  idx = srcu_read_lock(&kvm->srcu);
1759  rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
1760  srcu_read_unlock(&kvm->srcu, idx);
1761  } while(!rc);
1762 
1763  mutex_unlock(&kvm->arch.xen.xen_lock);
1764 
1765  if (mm_borrowed)
1766  kthread_unuse_mm(kvm->mm);
1767 
1768  return rc;
1769 }
int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
Definition: xen.c:1604
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_set_evtchn_fast()

int kvm_xen_set_evtchn_fast ( struct kvm_xen_evtchn *  xe,
struct kvm *  kvm 
)

Definition at line 1604 of file xen.c.

1605 {
1606  struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1607  struct kvm_vcpu *vcpu;
1608  unsigned long *pending_bits, *mask_bits;
1609  unsigned long flags;
1610  int port_word_bit;
1611  bool kick_vcpu = false;
1612  int vcpu_idx, idx, rc;
1613 
1614  vcpu_idx = READ_ONCE(xe->vcpu_idx);
1615  if (vcpu_idx >= 0)
1616  vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1617  else {
1618  vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
1619  if (!vcpu)
1620  return -EINVAL;
1621  WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx);
1622  }
1623 
1624  if (!vcpu->arch.xen.vcpu_info_cache.active)
1625  return -EINVAL;
1626 
1627  if (xe->port >= max_evtchn_port(kvm))
1628  return -EINVAL;
1629 
1630  rc = -EWOULDBLOCK;
1631 
1632  idx = srcu_read_lock(&kvm->srcu);
1633 
1634  read_lock_irqsave(&gpc->lock, flags);
1635  if (!kvm_gpc_check(gpc, PAGE_SIZE))
1636  goto out_rcu;
1637 
1638  if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1639  struct shared_info *shinfo = gpc->khva;
1640  pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1641  mask_bits = (unsigned long *)&shinfo->evtchn_mask;
1642  port_word_bit = xe->port / 64;
1643  } else {
1644  struct compat_shared_info *shinfo = gpc->khva;
1645  pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1646  mask_bits = (unsigned long *)&shinfo->evtchn_mask;
1647  port_word_bit = xe->port / 32;
1648  }
1649 
1650  /*
1651  * If this port wasn't already set, and if it isn't masked, then
1652  * we try to set the corresponding bit in the in-kernel shadow of
1653  * evtchn_pending_sel for the target vCPU. And if *that* wasn't
1654  * already set, then we kick the vCPU in question to write to the
1655  * *real* evtchn_pending_sel in its own guest vcpu_info struct.
1656  */
1657  if (test_and_set_bit(xe->port, pending_bits)) {
1658  rc = 0; /* It was already raised */
1659  } else if (test_bit(xe->port, mask_bits)) {
1660  rc = -ENOTCONN; /* Masked */
1661  kvm_xen_check_poller(vcpu, xe->port);
1662  } else {
1663  rc = 1; /* Delivered to the bitmap in shared_info. */
1664  /* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
1665  read_unlock_irqrestore(&gpc->lock, flags);
1666  gpc = &vcpu->arch.xen.vcpu_info_cache;
1667 
1668  read_lock_irqsave(&gpc->lock, flags);
1669  if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
1670  /*
1671  * Could not access the vcpu_info. Set the bit in-kernel
1672  * and prod the vCPU to deliver it for itself.
1673  */
1674  if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
1675  kick_vcpu = true;
1676  goto out_rcu;
1677  }
1678 
1679  if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1680  struct vcpu_info *vcpu_info = gpc->khva;
1681  if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
1682  WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
1683  kick_vcpu = true;
1684  }
1685  } else {
1686  struct compat_vcpu_info *vcpu_info = gpc->khva;
1687  if (!test_and_set_bit(port_word_bit,
1688  (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
1689  WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
1690  kick_vcpu = true;
1691  }
1692  }
1693 
1694  /* For the per-vCPU lapic vector, deliver it as MSI. */
1695  if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
1697  kick_vcpu = false;
1698  }
1699  }
1700 
1701  out_rcu:
1702  read_unlock_irqrestore(&gpc->lock, flags);
1703  srcu_read_unlock(&kvm->srcu, idx);
1704 
1705  if (kick_vcpu) {
1706  kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1707  kvm_vcpu_kick(vcpu);
1708  }
1709 
1710  return rc;
1711 }
uint32_t evtchn_pending[32]
Definition: xen.h:220
struct compat_vcpu_info vcpu_info[MAX_VIRT_CPUS]
Definition: xen.h:219
uint32_t evtchn_mask[32]
Definition: xen.h:221
static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
Definition: xen.c:1583
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_setattr_evtchn()

static int kvm_xen_setattr_evtchn ( struct kvm *  kvm,
struct kvm_xen_hvm_attr *  data 
)
static

Definition at line 2050 of file xen.c.

2051 {
2052  u32 port = data->u.evtchn.send_port;
2053 
2054  if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET)
2055  return kvm_xen_eventfd_reset(kvm);
2056 
2057  if (!port || port >= max_evtchn_port(kvm))
2058  return -EINVAL;
2059 
2060  if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN)
2061  return kvm_xen_eventfd_deassign(kvm, port);
2062  if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE)
2063  return kvm_xen_eventfd_update(kvm, data);
2064  if (data->u.evtchn.flags)
2065  return -EINVAL;
2066 
2067  return kvm_xen_eventfd_assign(kvm, data);
2068 }
static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
Definition: xen.c:1990
static int kvm_xen_eventfd_update(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
Definition: xen.c:1871
static int kvm_xen_eventfd_reset(struct kvm *kvm)
Definition: xen.c:2008
static int kvm_xen_eventfd_assign(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
Definition: xen.c:1918
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_setup_evtchn()

int kvm_xen_setup_evtchn ( struct kvm *  kvm,
struct kvm_kernel_irq_routing_entry *  e,
const struct kvm_irq_routing_entry *  ue 
)

Definition at line 1785 of file xen.c.

1789 {
1790  struct kvm_vcpu *vcpu;
1791 
1792  if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
1793  return -EINVAL;
1794 
1795  /* We only support 2 level event channels for now */
1796  if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
1797  return -EINVAL;
1798 
1799  /*
1800  * Xen gives us interesting mappings from vCPU index to APIC ID,
1801  * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs
1802  * to find it. Do that once at setup time, instead of every time.
1803  * But beware that on live update / live migration, the routing
1804  * table might be reinstated before the vCPU threads have finished
1805  * recreating their vCPUs.
1806  */
1807  vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
1808  if (vcpu)
1809  e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx;
1810  else
1811  e->xen_evtchn.vcpu_idx = -1;
1812 
1813  e->xen_evtchn.port = ue->u.xen_evtchn.port;
1814  e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu;
1815  e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
1816  e->set = evtchn_set_fn;
1817 
1818  return 0;
1819 }
static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status)
Definition: xen.c:1772
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_shared_info_init()

static int kvm_xen_shared_info_init ( struct kvm *  kvm,
gfn_t  gfn 
)
static

Definition at line 37 of file xen.c.

38 {
39  struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
40  struct pvclock_wall_clock *wc;
41  gpa_t gpa = gfn_to_gpa(gfn);
42  u32 *wc_sec_hi;
43  u32 wc_version;
44  u64 wall_nsec;
45  int ret = 0;
46  int idx = srcu_read_lock(&kvm->srcu);
47 
48  if (gfn == KVM_XEN_INVALID_GFN) {
49  kvm_gpc_deactivate(gpc);
50  goto out;
51  }
52 
53  do {
54  ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
55  if (ret)
56  goto out;
57 
58  /*
59  * This code mirrors kvm_write_wall_clock() except that it writes
60  * directly through the pfn cache and doesn't mark the page dirty.
61  */
62  wall_nsec = kvm_get_wall_clock_epoch(kvm);
63 
64  /* It could be invalid again already, so we need to check */
65  read_lock_irq(&gpc->lock);
66 
67  if (gpc->valid)
68  break;
69 
70  read_unlock_irq(&gpc->lock);
71  } while (1);
72 
73  /* Paranoia checks on the 32-bit struct layout */
74  BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
75  BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
76  BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
77 
78 #ifdef CONFIG_X86_64
79  /* Paranoia checks on the 64-bit struct layout */
80  BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
81  BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
82 
83  if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
84  struct shared_info *shinfo = gpc->khva;
85 
86  wc_sec_hi = &shinfo->wc_sec_hi;
87  wc = &shinfo->wc;
88  } else
89 #endif
90  {
91  struct compat_shared_info *shinfo = gpc->khva;
92 
93  wc_sec_hi = &shinfo->arch.wc_sec_hi;
94  wc = &shinfo->wc;
95  }
96 
97  /* Increment and ensure an odd value */
98  wc_version = wc->version = (wc->version + 1) | 1;
99  smp_wmb();
100 
101  wc->nsec = do_div(wall_nsec, NSEC_PER_SEC);
102  wc->sec = (u32)wall_nsec;
103  *wc_sec_hi = wall_nsec >> 32;
104  smp_wmb();
105 
106  wc->version = wc_version + 1;
107  read_unlock_irq(&gpc->lock);
108 
109  kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
110 
111 out:
112  srcu_read_unlock(&kvm->srcu, idx);
113  return ret;
114 }
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
Definition: pfncache.c:357
uint32_t wc_sec_hi
Definition: xen.h:215
struct compat_arch_shared_info arch
Definition: xen.h:223
struct pvclock_wall_clock wc
Definition: xen.h:222
uint64_t kvm_get_wall_clock_epoch(struct kvm *kvm)
Definition: x86.c:3298
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_start_timer()

static void kvm_xen_start_timer ( struct kvm_vcpu *  vcpu,
u64  guest_abs,
s64  delta_ns 
)
static

Definition at line 161 of file xen.c.

162 {
163  /*
164  * Avoid races with the old timer firing. Checking timer_expires
165  * to avoid calling hrtimer_cancel() will only have false positives
166  * so is fine.
167  */
168  if (vcpu->arch.xen.timer_expires)
169  hrtimer_cancel(&vcpu->arch.xen.timer);
170 
171  atomic_set(&vcpu->arch.xen.timer_pending, 0);
172  vcpu->arch.xen.timer_expires = guest_abs;
173 
174  if (delta_ns <= 0) {
175  xen_timer_callback(&vcpu->arch.xen.timer);
176  } else {
177  ktime_t ktime_now = ktime_get();
178  hrtimer_start(&vcpu->arch.xen.timer,
179  ktime_add_ns(ktime_now, delta_ns),
180  HRTIMER_MODE_ABS_HARD);
181  }
182 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_stop_timer()

static void kvm_xen_stop_timer ( struct kvm_vcpu *  vcpu)
static

Definition at line 184 of file xen.c.

185 {
186  hrtimer_cancel(&vcpu->arch.xen.timer);
187  vcpu->arch.xen.timer_expires = 0;
188  atomic_set(&vcpu->arch.xen.timer_pending, 0);
189 }
Here is the caller graph for this function:

◆ kvm_xen_update_runstate()

void kvm_xen_update_runstate ( struct kvm_vcpu *  v,
int  state 
)

Definition at line 465 of file xen.c.

466 {
467  struct kvm_vcpu_xen *vx = &v->arch.xen;
468  u64 now = get_kvmclock_ns(v->kvm);
469  u64 delta_ns = now - vx->runstate_entry_time;
470  u64 run_delay = current->sched_info.run_delay;
471 
472  if (unlikely(!vx->runstate_entry_time))
473  vx->current_runstate = RUNSTATE_offline;
474 
475  /*
476  * Time waiting for the scheduler isn't "stolen" if the
477  * vCPU wasn't running anyway.
478  */
479  if (vx->current_runstate == RUNSTATE_running) {
480  u64 steal_ns = run_delay - vx->last_steal;
481 
482  delta_ns -= steal_ns;
483 
484  vx->runstate_times[RUNSTATE_runnable] += steal_ns;
485  }
486  vx->last_steal = run_delay;
487 
488  vx->runstate_times[vx->current_runstate] += delta_ns;
489  vx->current_runstate = state;
490  vx->runstate_entry_time = now;
491 
492  if (vx->runstate_cache.active)
493  kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable);
494 }
static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
Definition: xen.c:198
int state
Definition: xen.h:0
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_update_runstate_guest()

static void kvm_xen_update_runstate_guest ( struct kvm_vcpu *  v,
bool  atomic 
)
static

Definition at line 198 of file xen.c.

199 {
200  struct kvm_vcpu_xen *vx = &v->arch.xen;
201  struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache;
202  struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache;
203  size_t user_len, user_len1, user_len2;
204  struct vcpu_runstate_info rs;
205  unsigned long flags;
206  size_t times_ofs;
207  uint8_t *update_bit = NULL;
208  uint64_t entry_time;
209  uint64_t *rs_times;
210  int *rs_state;
211 
212  /*
213  * The only difference between 32-bit and 64-bit versions of the
214  * runstate struct is the alignment of uint64_t in 32-bit, which
215  * means that the 64-bit version has an additional 4 bytes of
216  * padding after the first field 'state'. Let's be really really
217  * paranoid about that, and matching it with our internal data
218  * structures that we memcpy into it...
219  */
220  BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
221  BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
222  BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
223 #ifdef CONFIG_X86_64
224  /*
225  * The 64-bit structure has 4 bytes of padding before 'state_entry_time'
226  * so each subsequent field is shifted by 4, and it's 4 bytes longer.
227  */
228  BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
229  offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
230  BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
231  offsetof(struct compat_vcpu_runstate_info, time) + 4);
232  BUILD_BUG_ON(sizeof(struct vcpu_runstate_info) != 0x2c + 4);
233 #endif
234  /*
235  * The state field is in the same place at the start of both structs,
236  * and is the same size (int) as vx->current_runstate.
237  */
238  BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
239  offsetof(struct compat_vcpu_runstate_info, state));
240  BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
241  sizeof(vx->current_runstate));
242  BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
243  sizeof(vx->current_runstate));
244 
245  /*
246  * The state_entry_time field is 64 bits in both versions, and the
247  * XEN_RUNSTATE_UPDATE flag is in the top bit, which given that x86
248  * is little-endian means that it's in the last *byte* of the word.
249  * That detail is important later.
250  */
251  BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
252  sizeof(uint64_t));
253  BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
254  sizeof(uint64_t));
255  BUILD_BUG_ON((XEN_RUNSTATE_UPDATE >> 56) != 0x80);
256 
257  /*
258  * The time array is four 64-bit quantities in both versions, matching
259  * the vx->runstate_times and immediately following state_entry_time.
260  */
261  BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
262  offsetof(struct vcpu_runstate_info, time) - sizeof(uint64_t));
263  BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
264  offsetof(struct compat_vcpu_runstate_info, time) - sizeof(uint64_t));
265  BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
266  sizeof_field(struct compat_vcpu_runstate_info, time));
267  BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
268  sizeof(vx->runstate_times));
269 
270  if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
271  user_len = sizeof(struct vcpu_runstate_info);
272  times_ofs = offsetof(struct vcpu_runstate_info,
274  } else {
275  user_len = sizeof(struct compat_vcpu_runstate_info);
276  times_ofs = offsetof(struct compat_vcpu_runstate_info,
278  }
279 
280  /*
281  * There are basically no alignment constraints. The guest can set it
282  * up so it crosses from one page to the next, and at arbitrary byte
283  * alignment (and the 32-bit ABI doesn't align the 64-bit integers
284  * anyway, even if the overall struct had been 64-bit aligned).
285  */
286  if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) {
287  user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK);
288  user_len2 = user_len - user_len1;
289  } else {
290  user_len1 = user_len;
291  user_len2 = 0;
292  }
293  BUG_ON(user_len1 + user_len2 != user_len);
294 
295  retry:
296  /*
297  * Attempt to obtain the GPC lock on *both* (if there are two)
298  * gfn_to_pfn caches that cover the region.
299  */
300  if (atomic) {
301  local_irq_save(flags);
302  if (!read_trylock(&gpc1->lock)) {
303  local_irq_restore(flags);
304  return;
305  }
306  } else {
307  read_lock_irqsave(&gpc1->lock, flags);
308  }
309  while (!kvm_gpc_check(gpc1, user_len1)) {
310  read_unlock_irqrestore(&gpc1->lock, flags);
311 
312  /* When invoked from kvm_sched_out() we cannot sleep */
313  if (atomic)
314  return;
315 
316  if (kvm_gpc_refresh(gpc1, user_len1))
317  return;
318 
319  read_lock_irqsave(&gpc1->lock, flags);
320  }
321 
322  if (likely(!user_len2)) {
323  /*
324  * Set up three pointers directly to the runstate_info
325  * struct in the guest (via the GPC).
326  *
327  * • @rs_state → state field
328  * • @rs_times → state_entry_time field.
329  * • @update_bit → last byte of state_entry_time, which
330  * contains the XEN_RUNSTATE_UPDATE bit.
331  */
332  rs_state = gpc1->khva;
333  rs_times = gpc1->khva + times_ofs;
334  if (v->kvm->arch.xen.runstate_update_flag)
335  update_bit = ((void *)(&rs_times[1])) - 1;
336  } else {
337  /*
338  * The guest's runstate_info is split across two pages and we
339  * need to hold and validate both GPCs simultaneously. We can
340  * declare a lock ordering GPC1 > GPC2 because nothing else
341  * takes them more than one at a time. Set a subclass on the
342  * gpc1 lock to make lockdep shut up about it.
343  */
344  lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
345  if (atomic) {
346  if (!read_trylock(&gpc2->lock)) {
347  read_unlock_irqrestore(&gpc1->lock, flags);
348  return;
349  }
350  } else {
351  read_lock(&gpc2->lock);
352  }
353 
354  if (!kvm_gpc_check(gpc2, user_len2)) {
355  read_unlock(&gpc2->lock);
356  read_unlock_irqrestore(&gpc1->lock, flags);
357 
358  /* When invoked from kvm_sched_out() we cannot sleep */
359  if (atomic)
360  return;
361 
362  /*
363  * Use kvm_gpc_activate() here because if the runstate
364  * area was configured in 32-bit mode and only extends
365  * to the second page now because the guest changed to
366  * 64-bit mode, the second GPC won't have been set up.
367  */
368  if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1,
369  user_len2))
370  return;
371 
372  /*
373  * We dropped the lock on GPC1 so we have to go all the
374  * way back and revalidate that too.
375  */
376  goto retry;
377  }
378 
379  /*
380  * In this case, the runstate_info struct will be assembled on
381  * the kernel stack (compat or not as appropriate) and will
382  * be copied to GPC1/GPC2 with a dual memcpy. Set up the three
383  * rs pointers accordingly.
384  */
385  rs_times = &rs.state_entry_time;
386 
387  /*
388  * The rs_state pointer points to the start of what we'll
389  * copy to the guest, which in the case of a compat guest
390  * is the 32-bit field that the compiler thinks is padding.
391  */
392  rs_state = ((void *)rs_times) - times_ofs;
393 
394  /*
395  * The update_bit is still directly in the guest memory,
396  * via one GPC or the other.
397  */
398  if (v->kvm->arch.xen.runstate_update_flag) {
399  if (user_len1 >= times_ofs + sizeof(uint64_t))
400  update_bit = gpc1->khva + times_ofs +
401  sizeof(uint64_t) - 1;
402  else
403  update_bit = gpc2->khva + times_ofs +
404  sizeof(uint64_t) - 1 - user_len1;
405  }
406 
407 #ifdef CONFIG_X86_64
408  /*
409  * Don't leak kernel memory through the padding in the 64-bit
410  * version of the struct.
411  */
412  memset(&rs, 0, offsetof(struct vcpu_runstate_info, state_entry_time));
413 #endif
414  }
415 
416  /*
417  * First, set the XEN_RUNSTATE_UPDATE bit in the top bit of the
418  * state_entry_time field, directly in the guest. We need to set
419  * that (and write-barrier) before writing to the rest of the
420  * structure, and clear it last. Just as Xen does, we address the
421  * single *byte* in which it resides because it might be in a
422  * different cache line to the rest of the 64-bit word, due to
423  * the (lack of) alignment constraints.
424  */
425  entry_time = vx->runstate_entry_time;
426  if (update_bit) {
427  entry_time |= XEN_RUNSTATE_UPDATE;
428  *update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56;
429  smp_wmb();
430  }
431 
432  /*
433  * Now assemble the actual structure, either on our kernel stack
434  * or directly in the guest according to how the rs_state and
435  * rs_times pointers were set up above.
436  */
437  *rs_state = vx->current_runstate;
438  rs_times[0] = entry_time;
439  memcpy(rs_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
440 
441  /* For the split case, we have to then copy it to the guest. */
442  if (user_len2) {
443  memcpy(gpc1->khva, rs_state, user_len1);
444  memcpy(gpc2->khva, ((void *)rs_state) + user_len1, user_len2);
445  }
446  smp_wmb();
447 
448  /* Finally, clear the XEN_RUNSTATE_UPDATE bit. */
449  if (update_bit) {
450  entry_time &= ~XEN_RUNSTATE_UPDATE;
451  *update_bit = entry_time >> 56;
452  smp_wmb();
453  }
454 
455  if (user_len2)
456  read_unlock(&gpc2->lock);
457 
458  read_unlock_irqrestore(&gpc1->lock, flags);
459 
460  mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
461  if (user_len2)
462  mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
463 }
uint64_t state_entry_time
Definition: xen.h:1
uint64_t time[4]
Definition: xen.h:2
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_update_tsc_info()

void kvm_xen_update_tsc_info ( struct kvm_vcpu *  vcpu)

Definition at line 2135 of file xen.c.

2136 {
2137  struct kvm_cpuid_entry2 *entry;
2138  u32 function;
2139 
2140  if (!vcpu->arch.xen.cpuid.base)
2141  return;
2142 
2143  function = vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3);
2144  if (function > vcpu->arch.xen.cpuid.limit)
2145  return;
2146 
2147  entry = kvm_find_cpuid_entry_index(vcpu, function, 1);
2148  if (entry) {
2149  entry->ecx = vcpu->arch.hv_clock.tsc_to_system_mul;
2150  entry->edx = vcpu->arch.hv_clock.tsc_shift;
2151  }
2152 
2153  entry = kvm_find_cpuid_entry_index(vcpu, function, 2);
2154  if (entry)
2155  entry->eax = vcpu->arch.hw_tsc_khz;
2156 }
struct kvm_cpuid_entry2 * kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index)
Definition: cpuid.c:1447
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_vcpu_get_attr()

int kvm_xen_vcpu_get_attr ( struct kvm_vcpu *  vcpu,
struct kvm_xen_vcpu_attr *  data 
)

Definition at line 972 of file xen.c.

973 {
974  int r = -ENOENT;
975 
976  mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
977 
978  switch (data->type) {
979  case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
980  if (vcpu->arch.xen.vcpu_info_cache.active)
981  data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
982  else
983  data->u.gpa = KVM_XEN_INVALID_GPA;
984  r = 0;
985  break;
986 
987  case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
988  if (vcpu->arch.xen.vcpu_time_info_cache.active)
989  data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
990  else
991  data->u.gpa = KVM_XEN_INVALID_GPA;
992  r = 0;
993  break;
994 
995  case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
996  if (!sched_info_on()) {
997  r = -EOPNOTSUPP;
998  break;
999  }
1000  if (vcpu->arch.xen.runstate_cache.active) {
1001  data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
1002  r = 0;
1003  }
1004  break;
1005 
1006  case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
1007  if (!sched_info_on()) {
1008  r = -EOPNOTSUPP;
1009  break;
1010  }
1011  data->u.runstate.state = vcpu->arch.xen.current_runstate;
1012  r = 0;
1013  break;
1014 
1015  case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
1016  if (!sched_info_on()) {
1017  r = -EOPNOTSUPP;
1018  break;
1019  }
1020  data->u.runstate.state = vcpu->arch.xen.current_runstate;
1021  data->u.runstate.state_entry_time =
1022  vcpu->arch.xen.runstate_entry_time;
1023  data->u.runstate.time_running =
1024  vcpu->arch.xen.runstate_times[RUNSTATE_running];
1025  data->u.runstate.time_runnable =
1026  vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
1027  data->u.runstate.time_blocked =
1028  vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
1029  data->u.runstate.time_offline =
1030  vcpu->arch.xen.runstate_times[RUNSTATE_offline];
1031  r = 0;
1032  break;
1033 
1034  case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
1035  r = -EINVAL;
1036  break;
1037 
1038  case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
1039  data->u.vcpu_id = vcpu->arch.xen.vcpu_id;
1040  r = 0;
1041  break;
1042 
1043  case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
1044  /*
1045  * Ensure a consistent snapshot of state is captured, with a
1046  * timer either being pending, or the event channel delivered
1047  * to the corresponding bit in the shared_info. Not still
1048  * lurking in the timer_pending flag for deferred delivery.
1049  * Purely as an optimisation, if the timer_expires field is
1050  * zero, that means the timer isn't active (or even in the
1051  * timer_pending flag) and there is no need to cancel it.
1052  */
1053  if (vcpu->arch.xen.timer_expires) {
1054  hrtimer_cancel(&vcpu->arch.xen.timer);
1056  }
1057 
1058  data->u.timer.port = vcpu->arch.xen.timer_virq;
1059  data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
1060  data->u.timer.expires_ns = vcpu->arch.xen.timer_expires;
1061 
1062  /*
1063  * The hrtimer may trigger and raise the IRQ immediately,
1064  * while the returned state causes it to be set up and
1065  * raised again on the destination system after migration.
1066  * That's fine, as the guest won't even have had a chance
1067  * to run and handle the interrupt. Asserting an already
1068  * pending event channel is idempotent.
1069  */
1070  if (vcpu->arch.xen.timer_expires)
1071  hrtimer_start_expires(&vcpu->arch.xen.timer,
1072  HRTIMER_MODE_ABS_HARD);
1073 
1074  r = 0;
1075  break;
1076 
1077  case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
1078  data->u.vector = vcpu->arch.xen.upcall_vector;
1079  r = 0;
1080  break;
1081 
1082  default:
1083  break;
1084  }
1085 
1086  mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
1087  return r;
1088 }
void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
Definition: xen.c:116
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_vcpu_set_attr()

int kvm_xen_vcpu_set_attr ( struct kvm_vcpu *  vcpu,
struct kvm_xen_vcpu_attr *  data 
)

Definition at line 736 of file xen.c.

737 {
738  int idx, r = -ENOENT;
739 
740  mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
741  idx = srcu_read_lock(&vcpu->kvm->srcu);
742 
743  switch (data->type) {
744  case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
745  /* No compat necessary here. */
746  BUILD_BUG_ON(sizeof(struct vcpu_info) !=
747  sizeof(struct compat_vcpu_info));
748  BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
749  offsetof(struct compat_vcpu_info, time));
750 
751  if (data->u.gpa == KVM_XEN_INVALID_GPA) {
752  kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
753  r = 0;
754  break;
755  }
756 
757  r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
758  data->u.gpa, sizeof(struct vcpu_info));
759  if (!r)
760  kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
761 
762  break;
763 
764  case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
765  if (data->u.gpa == KVM_XEN_INVALID_GPA) {
766  kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
767  r = 0;
768  break;
769  }
770 
771  r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache,
772  data->u.gpa,
773  sizeof(struct pvclock_vcpu_time_info));
774  if (!r)
775  kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
776  break;
777 
778  case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR: {
779  size_t sz, sz1, sz2;
780 
781  if (!sched_info_on()) {
782  r = -EOPNOTSUPP;
783  break;
784  }
785  if (data->u.gpa == KVM_XEN_INVALID_GPA) {
786  r = 0;
787  deactivate_out:
788  kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
789  kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
790  break;
791  }
792 
793  /*
794  * If the guest switches to 64-bit mode after setting the runstate
795  * address, that's actually OK. kvm_xen_update_runstate_guest()
796  * will cope.
797  */
798  if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode)
799  sz = sizeof(struct vcpu_runstate_info);
800  else
801  sz = sizeof(struct compat_vcpu_runstate_info);
802 
803  /* How much fits in the (first) page? */
804  sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK);
805  r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache,
806  data->u.gpa, sz1);
807  if (r)
808  goto deactivate_out;
809 
810  /* Either map the second page, or deactivate the second GPC */
811  if (sz1 >= sz) {
812  kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
813  } else {
814  sz2 = sz - sz1;
815  BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK);
816  r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache,
817  data->u.gpa + sz1, sz2);
818  if (r)
819  goto deactivate_out;
820  }
821 
822  kvm_xen_update_runstate_guest(vcpu, false);
823  break;
824  }
825  case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
826  if (!sched_info_on()) {
827  r = -EOPNOTSUPP;
828  break;
829  }
830  if (data->u.runstate.state > RUNSTATE_offline) {
831  r = -EINVAL;
832  break;
833  }
834 
835  kvm_xen_update_runstate(vcpu, data->u.runstate.state);
836  r = 0;
837  break;
838 
839  case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
840  if (!sched_info_on()) {
841  r = -EOPNOTSUPP;
842  break;
843  }
844  if (data->u.runstate.state > RUNSTATE_offline) {
845  r = -EINVAL;
846  break;
847  }
848  if (data->u.runstate.state_entry_time !=
849  (data->u.runstate.time_running +
850  data->u.runstate.time_runnable +
851  data->u.runstate.time_blocked +
852  data->u.runstate.time_offline)) {
853  r = -EINVAL;
854  break;
855  }
856  if (get_kvmclock_ns(vcpu->kvm) <
857  data->u.runstate.state_entry_time) {
858  r = -EINVAL;
859  break;
860  }
861 
862  vcpu->arch.xen.current_runstate = data->u.runstate.state;
863  vcpu->arch.xen.runstate_entry_time =
864  data->u.runstate.state_entry_time;
865  vcpu->arch.xen.runstate_times[RUNSTATE_running] =
866  data->u.runstate.time_running;
867  vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
868  data->u.runstate.time_runnable;
869  vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
870  data->u.runstate.time_blocked;
871  vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
872  data->u.runstate.time_offline;
873  vcpu->arch.xen.last_steal = current->sched_info.run_delay;
874  r = 0;
875  break;
876 
877  case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
878  if (!sched_info_on()) {
879  r = -EOPNOTSUPP;
880  break;
881  }
882  if (data->u.runstate.state > RUNSTATE_offline &&
883  data->u.runstate.state != (u64)-1) {
884  r = -EINVAL;
885  break;
886  }
887  /* The adjustment must add up */
888  if (data->u.runstate.state_entry_time !=
889  (data->u.runstate.time_running +
890  data->u.runstate.time_runnable +
891  data->u.runstate.time_blocked +
892  data->u.runstate.time_offline)) {
893  r = -EINVAL;
894  break;
895  }
896 
897  if (get_kvmclock_ns(vcpu->kvm) <
898  (vcpu->arch.xen.runstate_entry_time +
899  data->u.runstate.state_entry_time)) {
900  r = -EINVAL;
901  break;
902  }
903 
904  vcpu->arch.xen.runstate_entry_time +=
905  data->u.runstate.state_entry_time;
906  vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
907  data->u.runstate.time_running;
908  vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
909  data->u.runstate.time_runnable;
910  vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
911  data->u.runstate.time_blocked;
912  vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
913  data->u.runstate.time_offline;
914 
915  if (data->u.runstate.state <= RUNSTATE_offline)
916  kvm_xen_update_runstate(vcpu, data->u.runstate.state);
917  else if (vcpu->arch.xen.runstate_cache.active)
918  kvm_xen_update_runstate_guest(vcpu, false);
919  r = 0;
920  break;
921 
922  case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
923  if (data->u.vcpu_id >= KVM_MAX_VCPUS)
924  r = -EINVAL;
925  else {
926  vcpu->arch.xen.vcpu_id = data->u.vcpu_id;
927  r = 0;
928  }
929  break;
930 
931  case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
932  if (data->u.timer.port &&
933  data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
934  r = -EINVAL;
935  break;
936  }
937 
938  if (!vcpu->arch.xen.timer.function)
939  kvm_xen_init_timer(vcpu);
940 
941  /* Stop the timer (if it's running) before changing the vector */
942  kvm_xen_stop_timer(vcpu);
943  vcpu->arch.xen.timer_virq = data->u.timer.port;
944 
945  /* Start the timer if the new value has a valid vector+expiry. */
946  if (data->u.timer.port && data->u.timer.expires_ns)
947  kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
948  data->u.timer.expires_ns -
949  get_kvmclock_ns(vcpu->kvm));
950 
951  r = 0;
952  break;
953 
954  case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
955  if (data->u.vector && data->u.vector < 0x10)
956  r = -EINVAL;
957  else {
958  vcpu->arch.xen.upcall_vector = data->u.vector;
959  r = 0;
960  }
961  break;
962 
963  default:
964  break;
965  }
966 
967  srcu_read_unlock(&vcpu->kvm->srcu, idx);
968  mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
969  return r;
970 }
void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
Definition: xen.c:465
static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
Definition: xen.c:191
Here is the call graph for this function:
Here is the caller graph for this function:

◆ kvm_xen_write_hypercall_page()

int kvm_xen_write_hypercall_page ( struct kvm_vcpu *  vcpu,
u64  data 
)

Definition at line 1090 of file xen.c.

1091 {
1092  struct kvm *kvm = vcpu->kvm;
1093  u32 page_num = data & ~PAGE_MASK;
1094  u64 page_addr = data & PAGE_MASK;
1095  bool lm = is_long_mode(vcpu);
1096 
1097  /* Latch long_mode for shared_info pages etc. */
1098  vcpu->kvm->arch.xen.long_mode = lm;
1099 
1100  /*
1101  * If Xen hypercall intercept is enabled, fill the hypercall
1102  * page with VMCALL/VMMCALL instructions since that's what
1103  * we catch. Else the VMM has provided the hypercall pages
1104  * with instructions of its own choosing, so use those.
1105  */
1106  if (kvm_xen_hypercall_enabled(kvm)) {
1107  u8 instructions[32];
1108  int i;
1109 
1110  if (page_num)
1111  return 1;
1112 
1113  /* mov imm32, %eax */
1114  instructions[0] = 0xb8;
1115 
1116  /* vmcall / vmmcall */
1117  static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
1118 
1119  /* ret */
1120  instructions[8] = 0xc3;
1121 
1122  /* int3 to pad */
1123  memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
1124 
1125  for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
1126  *(u32 *)&instructions[1] = i;
1127  if (kvm_vcpu_write_guest(vcpu,
1128  page_addr + (i * sizeof(instructions)),
1129  instructions, sizeof(instructions)))
1130  return 1;
1131  }
1132  } else {
1133  /*
1134  * Note, truncation is a non-issue as 'lm' is guaranteed to be
1135  * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
1136  */
1137  hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
1138  : kvm->arch.xen_hvm_config.blob_addr_32;
1139  u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1140  : kvm->arch.xen_hvm_config.blob_size_32;
1141  u8 *page;
1142  int ret;
1143 
1144  if (page_num >= blob_size)
1145  return 1;
1146 
1147  blob_addr += page_num * PAGE_SIZE;
1148 
1149  page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
1150  if (IS_ERR(page))
1151  return PTR_ERR(page);
1152 
1153  ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
1154  kfree(page);
1155  if (ret)
1156  return 1;
1157  }
1158  return 0;
1159 }
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, unsigned long len)
Definition: kvm_main.c:3470
static bool is_long_mode(struct kvm_vcpu *vcpu)
Definition: x86.h:143
static bool kvm_xen_hypercall_enabled(struct kvm *kvm)
Definition: xen.h:127
Here is the call graph for this function:
Here is the caller graph for this function:

◆ max_evtchn_port()

static int max_evtchn_port ( struct kvm *  kvm)
inlinestatic

Definition at line 1215 of file xen.c.

1216 {
1217  if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
1218  return EVTCHN_2L_NR_CHANNELS;
1219  else
1221 }
#define COMPAT_EVTCHN_2L_NR_CHANNELS
Definition: xen.h:226
Here is the caller graph for this function:

◆ wait_pending_event()

static bool wait_pending_event ( struct kvm_vcpu *  vcpu,
int  nr_ports,
evtchn_port_t *  ports 
)
static

Definition at line 1223 of file xen.c.

1225 {
1226  struct kvm *kvm = vcpu->kvm;
1227  struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1228  unsigned long *pending_bits;
1229  unsigned long flags;
1230  bool ret = true;
1231  int idx, i;
1232 
1233  idx = srcu_read_lock(&kvm->srcu);
1234  read_lock_irqsave(&gpc->lock, flags);
1235  if (!kvm_gpc_check(gpc, PAGE_SIZE))
1236  goto out_rcu;
1237 
1238  ret = false;
1239  if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1240  struct shared_info *shinfo = gpc->khva;
1241  pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1242  } else {
1243  struct compat_shared_info *shinfo = gpc->khva;
1244  pending_bits = (unsigned long *)&shinfo->evtchn_pending;
1245  }
1246 
1247  for (i = 0; i < nr_ports; i++) {
1248  if (test_bit(ports[i], pending_bits)) {
1249  ret = true;
1250  break;
1251  }
1252  }
1253 
1254  out_rcu:
1255  read_unlock_irqrestore(&gpc->lock, flags);
1256  srcu_read_unlock(&kvm->srcu, idx);
1257 
1258  return ret;
1259 }
Here is the call graph for this function:
Here is the caller graph for this function:

◆ xen_timer_callback()

static enum hrtimer_restart xen_timer_callback ( struct hrtimer *  timer)
static

Definition at line 116 of file xen.c.

134 {
135  struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu,
136  arch.xen.timer);
137  struct kvm_xen_evtchn e;
138  int rc;
139 
140  if (atomic_read(&vcpu->arch.xen.timer_pending))
141  return HRTIMER_NORESTART;
142 
143  e.vcpu_id = vcpu->vcpu_id;
144  e.vcpu_idx = vcpu->vcpu_idx;
145  e.port = vcpu->arch.xen.timer_virq;
146  e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
147 
148  rc = kvm_xen_set_evtchn_fast(&e, vcpu->kvm);
149  if (rc != -EWOULDBLOCK) {
150  vcpu->arch.xen.timer_expires = 0;
151  return HRTIMER_NORESTART;
152  }
153 
154  atomic_inc(&vcpu->arch.xen.timer_pending);
155  kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
156  kvm_vcpu_kick(vcpu);
157 
158  return HRTIMER_NORESTART;
159 }
Here is the caller graph for this function:

Variable Documentation

◆ __attribute__

struct evtchnfd __attribute__

◆ flags

uint32_t flags

Definition at line 1 of file xen.c.

◆ timeout_abs_ns

uint64_t timeout_abs_ns

Definition at line 0 of file xen.c.