Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mmio.c
Go to the documentation of this file.
1 /*
2  * mmio.c: MMIO emulation components.
3  * Copyright (c) 2004, Intel Corporation.
4  * Yaozu Dong (Eddie Dong) ([email protected])
5  * Kun Tian (Kevin Tian) ([email protected])
6  *
7  * Copyright (c) 2007 Intel Corporation KVM support.
8  * Xuefei Xu (Anthony Xu) ([email protected])
9  * Xiantao Zhang ([email protected])
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms and conditions of the GNU General Public License,
13  * version 2, as published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18  * more details.
19  *
20  * You should have received a copy of the GNU General Public License along with
21  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22  * Place - Suite 330, Boston, MA 02111-1307 USA.
23  *
24  */
25 
26 #include <linux/kvm_host.h>
27 
28 #include "vcpu.h"
29 
30 static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
31 {
32  VLSAPIC_XTP(v) = val;
33 }
34 
35 /*
36  * LSAPIC OFFSET
37  */
38 #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
39 #define PIB_OFST_INTA 0x1E0000
40 #define PIB_OFST_XTP 0x1E0008
41 
42 /*
43  * execute write IPI op.
44  */
45 static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
47 {
48  struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
49  unsigned long psr;
50 
51  local_irq_save(psr);
52 
54  p->u.ipi_data.addr.val = addr;
55  p->u.ipi_data.data.val = data;
57 
58  local_irq_restore(psr);
59 
60 }
61 
62 void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
63  unsigned long length, unsigned long val)
64 {
65  addr &= (PIB_SIZE - 1);
66 
67  switch (addr) {
68  case PIB_OFST_INTA:
69  panic_vm(v, "Undefined write on PIB INTA\n");
70  break;
71  case PIB_OFST_XTP:
72  if (length == 1) {
73  vlsapic_write_xtp(v, val);
74  } else {
75  panic_vm(v, "Undefined write on PIB XTP\n");
76  }
77  break;
78  default:
79  if (PIB_LOW_HALF(addr)) {
80  /*Lower half */
81  if (length != 8)
82  panic_vm(v, "Can't LHF write with size %ld!\n",
83  length);
84  else
85  vlsapic_write_ipi(v, addr, val);
86  } else { /*Upper half */
87  panic_vm(v, "IPI-UHF write %lx\n", addr);
88  }
89  break;
90  }
91 }
92 
93 unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
94  unsigned long length)
95 {
96  uint64_t result = 0;
97 
98  addr &= (PIB_SIZE - 1);
99 
100  switch (addr) {
101  case PIB_OFST_INTA:
102  if (length == 1) /* 1 byte load */
103  ; /* There is no i8259, there is no INTA access*/
104  else
105  panic_vm(v, "Undefined read on PIB INTA\n");
106 
107  break;
108  case PIB_OFST_XTP:
109  if (length == 1) {
110  result = VLSAPIC_XTP(v);
111  } else {
112  panic_vm(v, "Undefined read on PIB XTP\n");
113  }
114  break;
115  default:
116  panic_vm(v, "Undefined addr access for lsapic!\n");
117  break;
118  }
119  return result;
120 }
121 
122 static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
123  u16 s, int ma, int dir)
124 {
125  unsigned long iot;
126  struct exit_ctl_data *p = &vcpu->arch.exit_data;
127  unsigned long psr;
128 
129  iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
130 
131  local_irq_save(psr);
132 
133  /*Intercept the access for PIB range*/
134  if (iot == GPFN_PIB) {
135  if (!dir)
136  lsapic_write(vcpu, src_pa, s, *dest);
137  else
138  *dest = lsapic_read(vcpu, src_pa, s);
139  goto out;
140  }
142  p->u.ioreq.addr = src_pa;
143  p->u.ioreq.size = s;
144  p->u.ioreq.dir = dir;
145  if (dir == IOREQ_WRITE)
146  p->u.ioreq.data = *dest;
147  p->u.ioreq.state = STATE_IOREQ_READY;
148  vmm_transition(vcpu);
149 
150  if (p->u.ioreq.state == STATE_IORESP_READY) {
151  if (dir == IOREQ_READ)
152  /* it's necessary to ensure zero extending */
153  *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
154  } else
155  panic_vm(vcpu, "Unhandled mmio access returned!\n");
156 out:
157  local_irq_restore(psr);
158  return ;
159 }
160 
161 /*
162  dir 1: read 0:write
163  inst_type 0:integer 1:floating point
164  */
165 #define SL_INTEGER 0 /* store/load interger*/
166 #define SL_FLOATING 1 /* store/load floating*/
167 
168 void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
169 {
170  struct kvm_pt_regs *regs;
171  IA64_BUNDLE bundle;
172  int slot, dir = 0;
173  int inst_type = -1;
174  u16 size = 0;
175  u64 data, slot1a, slot1b, temp, update_reg;
176  s32 imm;
177  INST64 inst;
178 
179  regs = vcpu_regs(vcpu);
180 
181  if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
182  /* if fetch code fail, return and try again */
183  return;
184  }
185  slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
186  if (!slot)
187  inst.inst = bundle.slot0;
188  else if (slot == 1) {
189  slot1a = bundle.slot1a;
190  slot1b = bundle.slot1b;
191  inst.inst = slot1a + (slot1b << 18);
192  } else if (slot == 2)
193  inst.inst = bundle.slot2;
194 
195  /* Integer Load/Store */
196  if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
197  inst_type = SL_INTEGER;
198  size = (inst.M1.x6 & 0x3);
199  if ((inst.M1.x6 >> 2) > 0xb) {
200  /*write*/
201  dir = IOREQ_WRITE;
202  data = vcpu_get_gr(vcpu, inst.M4.r2);
203  } else if ((inst.M1.x6 >> 2) < 0xb) {
204  /*read*/
205  dir = IOREQ_READ;
206  }
207  } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
208  /* Integer Load + Reg update */
209  inst_type = SL_INTEGER;
210  dir = IOREQ_READ;
211  size = (inst.M2.x6 & 0x3);
212  temp = vcpu_get_gr(vcpu, inst.M2.r3);
213  update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
214  temp += update_reg;
215  vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
216  } else if (inst.M3.major == 5) {
217  /*Integer Load/Store + Imm update*/
218  inst_type = SL_INTEGER;
219  size = (inst.M3.x6&0x3);
220  if ((inst.M5.x6 >> 2) > 0xb) {
221  /*write*/
222  dir = IOREQ_WRITE;
223  data = vcpu_get_gr(vcpu, inst.M5.r2);
224  temp = vcpu_get_gr(vcpu, inst.M5.r3);
225  imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
226  (inst.M5.imm7 << 23);
227  temp += imm >> 23;
228  vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
229 
230  } else if ((inst.M3.x6 >> 2) < 0xb) {
231  /*read*/
232  dir = IOREQ_READ;
233  temp = vcpu_get_gr(vcpu, inst.M3.r3);
234  imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
235  (inst.M3.imm7 << 23);
236  temp += imm >> 23;
237  vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
238 
239  }
240  } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
241  && inst.M9.m == 0 && inst.M9.x == 0) {
242  /* Floating-point spill*/
243  struct ia64_fpreg v;
244 
245  inst_type = SL_FLOATING;
246  dir = IOREQ_WRITE;
247  vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
248  /* Write high word. FIXME: this is a kludge! */
249  v.u.bits[1] &= 0x3ffff;
250  mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8,
251  ma, IOREQ_WRITE);
252  data = v.u.bits[0];
253  size = 3;
254  } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
255  /* Floating-point spill + Imm update */
256  struct ia64_fpreg v;
257 
258  inst_type = SL_FLOATING;
259  dir = IOREQ_WRITE;
260  vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
261  temp = vcpu_get_gr(vcpu, inst.M10.r3);
262  imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
263  (inst.M10.imm7 << 23);
264  temp += imm >> 23;
265  vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
266 
267  /* Write high word.FIXME: this is a kludge! */
268  v.u.bits[1] &= 0x3ffff;
269  mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1],
270  8, ma, IOREQ_WRITE);
271  data = v.u.bits[0];
272  size = 3;
273  } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
274  /* Floating-point stf8 + Imm update */
275  struct ia64_fpreg v;
276  inst_type = SL_FLOATING;
277  dir = IOREQ_WRITE;
278  size = 3;
279  vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
280  data = v.u.bits[0]; /* Significand. */
281  temp = vcpu_get_gr(vcpu, inst.M10.r3);
282  imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
283  (inst.M10.imm7 << 23);
284  temp += imm >> 23;
285  vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
286  } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
287  && inst.M15.x6 <= 0x2f) {
288  temp = vcpu_get_gr(vcpu, inst.M15.r3);
289  imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
290  (inst.M15.imm7 << 23);
291  temp += imm >> 23;
292  vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
293 
294  vcpu_increment_iip(vcpu);
295  return;
296  } else if (inst.M12.major == 6 && inst.M12.m == 1
297  && inst.M12.x == 1 && inst.M12.x6 == 1) {
298  /* Floating-point Load Pair + Imm ldfp8 M12*/
299  struct ia64_fpreg v;
300 
301  inst_type = SL_FLOATING;
302  dir = IOREQ_READ;
303  size = 8; /*ldfd*/
304  mmio_access(vcpu, padr, &data, size, ma, dir);
305  v.u.bits[0] = data;
306  v.u.bits[1] = 0x1003E;
307  vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
308  padr += 8;
309  mmio_access(vcpu, padr, &data, size, ma, dir);
310  v.u.bits[0] = data;
311  v.u.bits[1] = 0x1003E;
312  vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
313  padr += 8;
314  vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
315  vcpu_increment_iip(vcpu);
316  return;
317  } else {
318  inst_type = -1;
319  panic_vm(vcpu, "Unsupported MMIO access instruction! "
320  "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
321  bundle.i64[0], bundle.i64[1]);
322  }
323 
324  size = 1 << size;
325  if (dir == IOREQ_WRITE) {
326  mmio_access(vcpu, padr, &data, size, ma, dir);
327  } else {
328  mmio_access(vcpu, padr, &data, size, ma, dir);
329  if (inst_type == SL_INTEGER)
330  vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
331  else
332  panic_vm(vcpu, "Unsupported instruction type!\n");
333 
334  }
335  vcpu_increment_iip(vcpu);
336 }