35 #include <linux/hrtimer.h>
37 #include <linux/slab.h>
38 #include <asm/processor.h>
40 #include <asm/current.h>
48 #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
50 #define ioapic_debug(fmt, arg...)
52 static int ioapic_deliver(
struct kvm_ioapic *vioapic,
int irq);
54 static unsigned long ioapic_read_indirect(
struct kvm_ioapic *ioapic,
68 result = ((ioapic->
id & 0xf) << 24);
80 (redir_content >> 32) & 0xffffffff :
81 redir_content & 0xffffffff;
89 static int ioapic_service(
struct kvm_ioapic *ioapic,
unsigned int idx)
97 injected = ioapic_deliver(ioapic, idx);
99 pent->
fields.remote_irr = 1;
105 static void update_handled_vectors(
struct kvm_ioapic *ioapic)
110 memset(handled_vectors, 0,
sizeof(handled_vectors));
113 memcpy(ioapic->handled_vectors, handled_vectors,
114 sizeof(handled_vectors));
121 bool mask_before, mask_after;
130 ioapic->
id = (val >> 24) & 0xf;
137 index = (ioapic->
ioregsel - 0x10) >> 1;
139 ioapic_debug(
"change redir index %x val %x\n", index, val);
140 if (index >= IOAPIC_NUM_PINS)
143 mask_before = e->
fields.mask;
145 e->
bits &= 0xffffffff;
148 e->
bits &= ~0xffffffffULL;
152 update_handled_vectors(ioapic);
153 mask_after = e->
fields.mask;
154 if (mask_before != mask_after)
157 && ioapic->
irr & (1 << index))
158 ioapic_service(ioapic, index);
163 static int ioapic_deliver(
struct kvm_ioapic *ioapic,
int irq)
169 "vector=%x trig_mode=%x\n",
174 irqe.dest_id = entry->
fields.dest_id;
175 irqe.vector = entry->
fields.vector;
176 irqe.dest_mode = entry->
fields.dest_mode;
177 irqe.trig_mode = entry->
fields.trig_mode;
178 irqe.delivery_mode = entry->
fields.delivery_mode << 8;
188 irqe.dest_id = ioapic->
kvm->bsp_vcpu_id;
202 BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
204 spin_lock(&ioapic->
lock);
205 old_irr = ioapic->
irr;
206 irq_level = __kvm_irq_line_state(&ioapic->
irq_states[irq],
207 irq_source_id, level);
209 irq_level ^= entry.
fields.polarity;
211 ioapic->
irr &= ~mask;
216 if ((edge && old_irr != ioapic->
irr) ||
217 (!edge && !entry.
fields.remote_irr))
218 ret = ioapic_service(ioapic, irq);
222 trace_kvm_ioapic_set_irq(entry.
bits, irq, ret == 0);
223 spin_unlock(&ioapic->
lock);
232 spin_lock(&ioapic->
lock);
235 spin_unlock(&ioapic->
lock);
246 if (ent->
fields.vector != vector)
257 spin_unlock(&ioapic->
lock);
259 spin_lock(&ioapic->
lock);
265 ent->
fields.remote_irr = 0;
266 if (!ent->
fields.mask && (ioapic->
irr & (1 << i)))
267 ioapic_service(ioapic, i);
275 return test_bit(vector, ioapic->handled_vectors);
282 spin_lock(&ioapic->
lock);
283 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
284 spin_unlock(&ioapic->
lock);
292 static inline int ioapic_in_range(
struct kvm_ioapic *ioapic,
gpa_t addr)
303 if (!ioapic_in_range(ioapic, addr))
310 spin_lock(&ioapic->
lock);
317 result = ioapic_read_indirect(ioapic, addr, len);
324 spin_unlock(&ioapic->
lock);
328 *(
u64 *) val = result;
333 memcpy(val, (
char *)&result, len);
346 if (!ioapic_in_range(ioapic, addr))
349 ioapic_debug(
"ioapic_mmio_write addr=%p len=%d val=%p\n",
350 (
void*)addr, len, val);
370 spin_lock(&ioapic->
lock);
377 ioapic_write_indirect(ioapic, data);
388 spin_unlock(&ioapic->
lock);
402 update_handled_vectors(ioapic);
406 .read = ioapic_mmio_read,
407 .write = ioapic_mmio_write,
419 kvm->
arch.vioapic = ioapic;
421 kvm_iodevice_init(&ioapic->
dev, &ioapic_mmio_ops);
448 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
452 spin_lock(&ioapic->
lock);
454 spin_unlock(&ioapic->
lock);
460 struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
464 spin_lock(&ioapic->
lock);
466 update_handled_vectors(ioapic);
467 spin_unlock(&ioapic->
lock);