KVM
mmio.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <trace/events/kvm.h>
10 
11 #include "trace.h"
12 
13 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
14 {
15  void *datap = NULL;
16  union {
17  u8 byte;
18  u16 hword;
19  u32 word;
20  u64 dword;
21  } tmp;
22 
23  switch (len) {
24  case 1:
25  tmp.byte = data;
26  datap = &tmp.byte;
27  break;
28  case 2:
29  tmp.hword = data;
30  datap = &tmp.hword;
31  break;
32  case 4:
33  tmp.word = data;
34  datap = &tmp.word;
35  break;
36  case 8:
37  tmp.dword = data;
38  datap = &tmp.dword;
39  break;
40  }
41 
42  memcpy(buf, datap, len);
43 }
44 
45 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
46 {
47  unsigned long data = 0;
48  union {
49  u16 hword;
50  u32 word;
51  u64 dword;
52  } tmp;
53 
54  switch (len) {
55  case 1:
56  data = *(u8 *)buf;
57  break;
58  case 2:
59  memcpy(&tmp.hword, buf, len);
60  data = tmp.hword;
61  break;
62  case 4:
63  memcpy(&tmp.word, buf, len);
64  data = tmp.word;
65  break;
66  case 8:
67  memcpy(&tmp.dword, buf, len);
68  data = tmp.dword;
69  break;
70  }
71 
72  return data;
73 }
74 
75 /**
76  * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
77  * or in-kernel IO emulation
78  *
79  * @vcpu: The VCPU pointer
80  */
81 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
82 {
83  unsigned long data;
84  unsigned int len;
85  int mask;
86 
87  /* Detect an already handled MMIO return */
88  if (unlikely(!vcpu->mmio_needed))
89  return 0;
90 
91  vcpu->mmio_needed = 0;
92 
93  if (!kvm_vcpu_dabt_iswrite(vcpu)) {
94  struct kvm_run *run = vcpu->run;
95 
96  len = kvm_vcpu_dabt_get_as(vcpu);
97  data = kvm_mmio_read_buf(run->mmio.data, len);
98 
99  if (kvm_vcpu_dabt_issext(vcpu) &&
100  len < sizeof(unsigned long)) {
101  mask = 1U << ((len * 8) - 1);
102  data = (data ^ mask) - mask;
103  }
104 
105  if (!kvm_vcpu_dabt_issf(vcpu))
106  data = data & 0xffffffff;
107 
108  trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
109  &data);
110  data = vcpu_data_host_to_guest(vcpu, data, len);
111  vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
112  }
113 
114  /*
115  * The MMIO instruction is emulated and should not be re-executed
116  * in the guest.
117  */
118  kvm_incr_pc(vcpu);
119 
120  return 0;
121 }
122 
123 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
124 {
125  struct kvm_run *run = vcpu->run;
126  unsigned long data;
127  unsigned long rt;
128  int ret;
129  bool is_write;
130  int len;
131  u8 data_buf[8];
132 
133  /*
134  * No valid syndrome? Ask userspace for help if it has
135  * volunteered to do so, and bail out otherwise.
136  */
137  if (!kvm_vcpu_dabt_isvalid(vcpu)) {
138  trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
139  kvm_vcpu_get_hfar(vcpu), fault_ipa);
140 
141  if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
142  &vcpu->kvm->arch.flags)) {
143  run->exit_reason = KVM_EXIT_ARM_NISV;
144  run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
145  run->arm_nisv.fault_ipa = fault_ipa;
146  return 0;
147  }
148 
149  return -ENOSYS;
150  }
151 
152  /*
153  * Prepare MMIO operation. First decode the syndrome data we get
154  * from the CPU. Then try if some in-kernel emulation feels
155  * responsible, otherwise let user space do its magic.
156  */
157  is_write = kvm_vcpu_dabt_iswrite(vcpu);
158  len = kvm_vcpu_dabt_get_as(vcpu);
159  rt = kvm_vcpu_dabt_get_rd(vcpu);
160 
161  if (is_write) {
162  data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
163  len);
164 
165  trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
166  kvm_mmio_write_buf(data_buf, len, data);
167 
168  ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
169  data_buf);
170  } else {
171  trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
172  fault_ipa, NULL);
173 
174  ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
175  data_buf);
176  }
177 
178  /* Now prepare kvm_run for the potential return to userland. */
179  run->mmio.is_write = is_write;
180  run->mmio.phys_addr = fault_ipa;
181  run->mmio.len = len;
182  vcpu->mmio_needed = 1;
183 
184  if (!ret) {
185  /* We handled the access successfully in the kernel. */
186  if (!is_write)
187  memcpy(run->mmio.data, data_buf, len);
188  vcpu->stat.mmio_exit_kernel++;
190  return 1;
191  }
192 
193  if (is_write)
194  memcpy(run->mmio.data, data_buf, len);
195  vcpu->stat.mmio_exit_user++;
196  run->exit_reason = KVM_EXIT_MMIO;
197  return 0;
198 }
int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, void *val)
Definition: kvm_main.c:5878
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, int len, const void *val)
Definition: kvm_main.c:5807
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
Definition: mmio.c:13
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
Definition: mmio.c:81
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
Definition: mmio.c:45
int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
Definition: mmio.c:123