33 #include <linux/slab.h>
34 #include <linux/types.h>
37 #include <linux/export.h>
38 #include <asm/xen/hypervisor.h>
39 #include <asm/xen/page.h>
43 #include <xen/events.h>
71 static const char *
const name[] = {
103 const char **,
unsigned int))
141 const char **,
unsigned int),
164 static void xenbus_switch_fatal(
struct xenbus_device *,
int,
int,
188 if (state == dev->
state)
196 xenbus_switch_fatal(dev, depth, err,
"starting transaction");
206 xenbus_switch_fatal(dev, depth, err,
"writing new state");
214 if (err == -
EAGAIN && !abort)
216 xenbus_switch_fatal(dev, depth, err,
"ending transaction");
234 return __xenbus_switch_state(dev, state, 0);
257 static void xenbus_va_dev_error(
struct xenbus_device *dev,
int err,
262 char *printf_buffer =
NULL;
263 char *path_buffer =
NULL;
265 #define PRINTF_BUFFER_SIZE 4096
267 if (printf_buffer ==
NULL)
270 len =
sprintf(printf_buffer,
"%i ", -err);
271 ret =
vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
273 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
277 path_buffer = error_path(dev);
279 if (path_buffer ==
NULL) {
280 dev_err(&dev->
dev,
"failed to write error node for %s (%s)\n",
286 dev_err(&dev->
dev,
"failed to write error node for %s (%s)\n",
292 kfree(printf_buffer);
311 xenbus_va_dev_error(dev, err, fmt, ap);
332 xenbus_va_dev_error(dev, err, fmt, ap);
343 static void xenbus_switch_fatal(
struct xenbus_device *dev,
int depth,
int err,
344 const char *fmt, ...)
349 xenbus_va_dev_error(dev, err, fmt, ap);
394 *port = alloc_unbound.
port;
418 "binding to event channel %d from domain %d",
463 return ring_ops->map(dev, gnt_ref, vaddr);
467 static int xenbus_map_ring_valloc_pv(
struct xenbus_device *dev,
468 int gnt_ref,
void **
vaddr)
499 "mapping in shared page %d from domain %d",
507 spin_lock(&xenbus_valloc_lock);
508 list_add(&node->
next, &xenbus_valloc_pages);
509 spin_unlock(&xenbus_valloc_lock);
515 static int xenbus_map_ring_valloc_hvm(
struct xenbus_device *dev,
516 int gnt_ref,
void **vaddr)
538 spin_lock(&xenbus_valloc_lock);
539 list_add(&node->
next, &xenbus_valloc_pages);
540 spin_unlock(&xenbus_valloc_lock);
578 "mapping in shared page %d from domain %d",
602 return ring_ops->unmap(dev, vaddr);
606 static int xenbus_unmap_ring_vfree_pv(
struct xenbus_device *dev,
void *vaddr)
614 spin_lock(&xenbus_valloc_lock);
616 if (node->
area->addr == vaddr) {
623 spin_unlock(&xenbus_valloc_lock);
627 "can't find mapped virtual address %p", vaddr);
642 "unmapping page at handle %d error %d",
649 static int xenbus_unmap_ring_vfree_hvm(
struct xenbus_device *dev,
void *vaddr)
655 spin_lock(&xenbus_valloc_lock);
665 spin_unlock(&xenbus_valloc_lock);
669 "can't find mapped virtual address %p", vaddr);
678 WARN(1,
"Leaking %p\n", vaddr);
699 gnttab_set_unmap_op(&op, (
unsigned long)vaddr,
GNTMAP_host_map, handle);
706 "unmapping page at handle %d error %d",
733 .map = xenbus_map_ring_valloc_pv,
734 .unmap = xenbus_unmap_ring_vfree_pv,
738 .map = xenbus_map_ring_valloc_hvm,
739 .unmap = xenbus_unmap_ring_vfree_hvm,
745 ring_ops = &ring_ops_pv;
747 ring_ops = &ring_ops_hvm;