Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xenbus_client.c
Go to the documentation of this file.
1 /******************************************************************************
2  * Client-facing interface for the Xenbus driver. In other words, the
3  * interface between the Xenbus and the device-specific code, be it the
4  * frontend or the backend of that driver.
5  *
6  * Copyright (C) 2005 XenSource Ltd
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/spinlock.h>
36 #include <linux/vmalloc.h>
37 #include <linux/export.h>
38 #include <asm/xen/hypervisor.h>
39 #include <asm/xen/page.h>
40 #include <xen/interface/xen.h>
42 #include <xen/balloon.h>
43 #include <xen/events.h>
44 #include <xen/grant_table.h>
45 #include <xen/xenbus.h>
46 #include <xen/xen.h>
47 
48 #include "xenbus_probe.h"
49 
51  struct list_head next;
52  union {
53  struct vm_struct *area; /* PV */
54  struct page *page; /* HVM */
55  };
57 };
58 
59 static DEFINE_SPINLOCK(xenbus_valloc_lock);
60 static LIST_HEAD(xenbus_valloc_pages);
61 
63  int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
64  int (*unmap)(struct xenbus_device *dev, void *vaddr);
65 };
66 
67 static const struct xenbus_ring_ops *ring_ops __read_mostly;
68 
70 {
71  static const char *const name[] = {
72  [ XenbusStateUnknown ] = "Unknown",
73  [ XenbusStateInitialising ] = "Initialising",
74  [ XenbusStateInitWait ] = "InitWait",
75  [ XenbusStateInitialised ] = "Initialised",
76  [ XenbusStateConnected ] = "Connected",
77  [ XenbusStateClosing ] = "Closing",
78  [ XenbusStateClosed ] = "Closed",
79  [XenbusStateReconfiguring] = "Reconfiguring",
80  [XenbusStateReconfigured] = "Reconfigured",
81  };
82  return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
83 }
85 
100 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
101  struct xenbus_watch *watch,
102  void (*callback)(struct xenbus_watch *,
103  const char **, unsigned int))
104 {
105  int err;
106 
107  watch->node = path;
108  watch->callback = callback;
109 
110  err = register_xenbus_watch(watch);
111 
112  if (err) {
113  watch->node = NULL;
114  watch->callback = NULL;
115  xenbus_dev_fatal(dev, err, "adding watch on %s", path);
116  }
117 
118  return err;
119 }
121 
122 
139  struct xenbus_watch *watch,
140  void (*callback)(struct xenbus_watch *,
141  const char **, unsigned int),
142  const char *pathfmt, ...)
143 {
144  int err;
145  va_list ap;
146  char *path;
147 
148  va_start(ap, pathfmt);
149  path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
150  va_end(ap);
151 
152  if (!path) {
153  xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
154  return -ENOMEM;
155  }
156  err = xenbus_watch_path(dev, path, watch, callback);
157 
158  if (err)
159  kfree(path);
160  return err;
161 }
163 
164 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
165  const char *, ...);
166 
167 static int
168 __xenbus_switch_state(struct xenbus_device *dev,
169  enum xenbus_state state, int depth)
170 {
171  /* We check whether the state is currently set to the given value, and
172  if not, then the state is set. We don't want to unconditionally
173  write the given state, because we don't want to fire watches
174  unnecessarily. Furthermore, if the node has gone, we don't write
175  to it, as the device will be tearing down, and we don't want to
176  resurrect that directory.
177 
178  Note that, because of this cached value of our state, this
179  function will not take a caller's Xenstore transaction
180  (something it was trying to in the past) because dev->state
181  would not get reset if the transaction was aborted.
182  */
183 
184  struct xenbus_transaction xbt;
185  int current_state;
186  int err, abort;
187 
188  if (state == dev->state)
189  return 0;
190 
191 again:
192  abort = 1;
193 
194  err = xenbus_transaction_start(&xbt);
195  if (err) {
196  xenbus_switch_fatal(dev, depth, err, "starting transaction");
197  return 0;
198  }
199 
200  err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
201  if (err != 1)
202  goto abort;
203 
204  err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
205  if (err) {
206  xenbus_switch_fatal(dev, depth, err, "writing new state");
207  goto abort;
208  }
209 
210  abort = 0;
211 abort:
212  err = xenbus_transaction_end(xbt, abort);
213  if (err) {
214  if (err == -EAGAIN && !abort)
215  goto again;
216  xenbus_switch_fatal(dev, depth, err, "ending transaction");
217  } else
218  dev->state = state;
219 
220  return 0;
221 }
222 
232 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
233 {
234  return __xenbus_switch_state(dev, state, 0);
235 }
236 
238 
240 {
242  complete(&dev->down);
243  return 0;
244 }
246 
251 static char *error_path(struct xenbus_device *dev)
252 {
253  return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
254 }
255 
256 
257 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
258  const char *fmt, va_list ap)
259 {
260  int ret;
261  unsigned int len;
262  char *printf_buffer = NULL;
263  char *path_buffer = NULL;
264 
265 #define PRINTF_BUFFER_SIZE 4096
266  printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
267  if (printf_buffer == NULL)
268  goto fail;
269 
270  len = sprintf(printf_buffer, "%i ", -err);
271  ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
272 
273  BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
274 
275  dev_err(&dev->dev, "%s\n", printf_buffer);
276 
277  path_buffer = error_path(dev);
278 
279  if (path_buffer == NULL) {
280  dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
281  dev->nodename, printf_buffer);
282  goto fail;
283  }
284 
285  if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
286  dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
287  dev->nodename, printf_buffer);
288  goto fail;
289  }
290 
291 fail:
292  kfree(printf_buffer);
293  kfree(path_buffer);
294 }
295 
296 
306 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
307 {
308  va_list ap;
309 
310  va_start(ap, fmt);
311  xenbus_va_dev_error(dev, err, fmt, ap);
312  va_end(ap);
313 }
315 
327 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
328 {
329  va_list ap;
330 
331  va_start(ap, fmt);
332  xenbus_va_dev_error(dev, err, fmt, ap);
333  va_end(ap);
334 
336 }
338 
343 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
344  const char *fmt, ...)
345 {
346  va_list ap;
347 
348  va_start(ap, fmt);
349  xenbus_va_dev_error(dev, err, fmt, ap);
350  va_end(ap);
351 
352  if (!depth)
353  __xenbus_switch_state(dev, XenbusStateClosing, 1);
354 }
355 
365 int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
366 {
367  int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
368  if (err < 0)
369  xenbus_dev_fatal(dev, err, "granting access to ring page");
370  return err;
371 }
373 
374 
381 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
382 {
383  struct evtchn_alloc_unbound alloc_unbound;
384  int err;
385 
386  alloc_unbound.dom = DOMID_SELF;
387  alloc_unbound.remote_dom = dev->otherend_id;
388 
390  &alloc_unbound);
391  if (err)
392  xenbus_dev_fatal(dev, err, "allocating event channel");
393  else
394  *port = alloc_unbound.port;
395 
396  return err;
397 }
399 
400 
407 {
408  struct evtchn_bind_interdomain bind_interdomain;
409  int err;
410 
411  bind_interdomain.remote_dom = dev->otherend_id;
412  bind_interdomain.remote_port = remote_port;
413 
415  &bind_interdomain);
416  if (err)
417  xenbus_dev_fatal(dev, err,
418  "binding to event channel %d from domain %d",
419  remote_port, dev->otherend_id);
420  else
421  *port = bind_interdomain.local_port;
422 
423  return err;
424 }
426 
427 
431 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
432 {
433  struct evtchn_close close;
434  int err;
435 
436  close.port = port;
437 
439  if (err)
440  xenbus_dev_error(dev, err, "freeing event channel %d", port);
441 
442  return err;
443 }
445 
446 
461 int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
462 {
463  return ring_ops->map(dev, gnt_ref, vaddr);
464 }
466 
467 static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
468  int gnt_ref, void **vaddr)
469 {
470  struct gnttab_map_grant_ref op = {
472  .ref = gnt_ref,
473  .dom = dev->otherend_id,
474  };
475  struct xenbus_map_node *node;
476  struct vm_struct *area;
477  pte_t *pte;
478 
479  *vaddr = NULL;
480 
481  node = kzalloc(sizeof(*node), GFP_KERNEL);
482  if (!node)
483  return -ENOMEM;
484 
485  area = alloc_vm_area(PAGE_SIZE, &pte);
486  if (!area) {
487  kfree(node);
488  return -ENOMEM;
489  }
490 
492 
493  gnttab_batch_map(&op, 1);
494 
495  if (op.status != GNTST_okay) {
496  free_vm_area(area);
497  kfree(node);
498  xenbus_dev_fatal(dev, op.status,
499  "mapping in shared page %d from domain %d",
500  gnt_ref, dev->otherend_id);
501  return op.status;
502  }
503 
504  node->handle = op.handle;
505  node->area = area;
506 
507  spin_lock(&xenbus_valloc_lock);
508  list_add(&node->next, &xenbus_valloc_pages);
509  spin_unlock(&xenbus_valloc_lock);
510 
511  *vaddr = area->addr;
512  return 0;
513 }
514 
515 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
516  int gnt_ref, void **vaddr)
517 {
518  struct xenbus_map_node *node;
519  int err;
520  void *addr;
521 
522  *vaddr = NULL;
523 
524  node = kzalloc(sizeof(*node), GFP_KERNEL);
525  if (!node)
526  return -ENOMEM;
527 
528  err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
529  if (err)
530  goto out_err;
531 
532  addr = pfn_to_kaddr(page_to_pfn(node->page));
533 
534  err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
535  if (err)
536  goto out_err;
537 
538  spin_lock(&xenbus_valloc_lock);
539  list_add(&node->next, &xenbus_valloc_pages);
540  spin_unlock(&xenbus_valloc_lock);
541 
542  *vaddr = addr;
543  return 0;
544 
545  out_err:
546  free_xenballooned_pages(1, &node->page);
547  kfree(node);
548  return err;
549 }
550 
551 
566 int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
567  grant_handle_t *handle, void *vaddr)
568 {
569  struct gnttab_map_grant_ref op;
570 
571  gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref,
572  dev->otherend_id);
573 
574  gnttab_batch_map(&op, 1);
575 
576  if (op.status != GNTST_okay) {
577  xenbus_dev_fatal(dev, op.status,
578  "mapping in shared page %d from domain %d",
579  gnt_ref, dev->otherend_id);
580  } else
581  *handle = op.handle;
582 
583  return op.status;
584 }
586 
587 
600 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
601 {
602  return ring_ops->unmap(dev, vaddr);
603 }
605 
606 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
607 {
608  struct xenbus_map_node *node;
609  struct gnttab_unmap_grant_ref op = {
610  .host_addr = (unsigned long)vaddr,
611  };
612  unsigned int level;
613 
614  spin_lock(&xenbus_valloc_lock);
615  list_for_each_entry(node, &xenbus_valloc_pages, next) {
616  if (node->area->addr == vaddr) {
617  list_del(&node->next);
618  goto found;
619  }
620  }
621  node = NULL;
622  found:
623  spin_unlock(&xenbus_valloc_lock);
624 
625  if (!node) {
626  xenbus_dev_error(dev, -ENOENT,
627  "can't find mapped virtual address %p", vaddr);
628  return GNTST_bad_virt_addr;
629  }
630 
631  op.handle = node->handle;
633  lookup_address((unsigned long)vaddr, &level)).maddr;
634 
636  BUG();
637 
638  if (op.status == GNTST_okay)
639  free_vm_area(node->area);
640  else
641  xenbus_dev_error(dev, op.status,
642  "unmapping page at handle %d error %d",
643  node->handle, op.status);
644 
645  kfree(node);
646  return op.status;
647 }
648 
649 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
650 {
651  int rv;
652  struct xenbus_map_node *node;
653  void *addr;
654 
655  spin_lock(&xenbus_valloc_lock);
656  list_for_each_entry(node, &xenbus_valloc_pages, next) {
657  addr = pfn_to_kaddr(page_to_pfn(node->page));
658  if (addr == vaddr) {
659  list_del(&node->next);
660  goto found;
661  }
662  }
663  node = addr = NULL;
664  found:
665  spin_unlock(&xenbus_valloc_lock);
666 
667  if (!node) {
668  xenbus_dev_error(dev, -ENOENT,
669  "can't find mapped virtual address %p", vaddr);
670  return GNTST_bad_virt_addr;
671  }
672 
673  rv = xenbus_unmap_ring(dev, node->handle, addr);
674 
675  if (!rv)
676  free_xenballooned_pages(1, &node->page);
677  else
678  WARN(1, "Leaking %p\n", vaddr);
679 
680  kfree(node);
681  return rv;
682 }
683 
695  grant_handle_t handle, void *vaddr)
696 {
697  struct gnttab_unmap_grant_ref op;
698 
699  gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle);
700 
702  BUG();
703 
704  if (op.status != GNTST_okay)
705  xenbus_dev_error(dev, op.status,
706  "unmapping page at handle %d error %d",
707  handle, op.status);
708 
709  return op.status;
710 }
712 
713 
722 {
723  enum xenbus_state result;
724  int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
725  if (err)
726  result = XenbusStateUnknown;
727 
728  return result;
729 }
731 
732 static const struct xenbus_ring_ops ring_ops_pv = {
733  .map = xenbus_map_ring_valloc_pv,
734  .unmap = xenbus_unmap_ring_vfree_pv,
735 };
736 
737 static const struct xenbus_ring_ops ring_ops_hvm = {
738  .map = xenbus_map_ring_valloc_hvm,
739  .unmap = xenbus_unmap_ring_vfree_hvm,
740 };
741 
743 {
744  if (xen_pv_domain())
745  ring_ops = &ring_ops_pv;
746  else
747  ring_ops = &ring_ops_hvm;
748 }