14 #include <linux/slab.h>
19 static inline struct kvm_coalesced_mmio_dev *to_mmio(
struct kvm_io_device *
dev)
21 return container_of(dev,
struct kvm_coalesced_mmio_dev, dev);
24 static int coalesced_mmio_in_range(
struct kvm_coalesced_mmio_dev *
dev,
33 if (addr + len < addr)
35 if (addr < dev->
zone.addr)
37 if (addr + len > dev->zone.addr + dev->zone.size)
42 static int coalesced_mmio_has_room(
struct kvm_coalesced_mmio_dev *dev)
53 ring = dev->kvm->coalesced_mmio_ring;
64 gpa_t addr,
int len,
const void *
val)
66 struct kvm_coalesced_mmio_dev *dev = to_mmio(
this);
69 if (!coalesced_mmio_in_range(dev, addr, len))
72 spin_lock(&dev->kvm->ring_lock);
74 if (!coalesced_mmio_has_room(dev)) {
75 spin_unlock(&dev->kvm->ring_lock);
86 spin_unlock(&dev->kvm->ring_lock);
90 static void coalesced_mmio_destructor(
struct kvm_io_device *
this)
92 struct kvm_coalesced_mmio_dev *dev = to_mmio(
this);
100 .write = coalesced_mmio_write,
101 .destructor = coalesced_mmio_destructor,
123 INIT_LIST_HEAD(&kvm->coalesced_zones);
131 if (kvm->coalesced_mmio_ring)
132 free_page((
unsigned long)kvm->coalesced_mmio_ring);
139 struct kvm_coalesced_mmio_dev *
dev;
141 dev = kzalloc(
sizeof(
struct kvm_coalesced_mmio_dev),
GFP_KERNEL);
145 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
151 zone->
size, &dev->dev);
173 struct kvm_coalesced_mmio_dev *
dev, *
tmp;
178 if (coalesced_mmio_in_range(dev, zone->
addr, zone->
size)) {
180 kvm_iodevice_destructor(&dev->dev);