Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
virtio_ring.c
Go to the documentation of this file.
1 /* Virtio ring implementation.
2  *
3  * Copyright 2007 Rusty Russell IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  */
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 
27 /* virtio guest is communicating with a virtual "device" that actually runs on
28  * a host processor. Memory barriers are used to control SMP effects. */
29 #ifdef CONFIG_SMP
30 /* Where possible, use SMP barriers which are more lightweight than mandatory
31  * barriers, because mandatory barriers control MMIO effects on accesses
32  * through relaxed memory I/O windows (which virtio-pci does not use). */
33 #define virtio_mb(vq) \
34  do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
35 #define virtio_rmb(vq) \
36  do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
37 #define virtio_wmb(vq) \
38  do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
39 #else
40 /* We must force memory ordering even if guest is UP since host could be
41  * running on another CPU, but SMP barriers are defined to barrier() in that
42  * configuration. So fall back to mandatory barriers instead. */
43 #define virtio_mb(vq) mb()
44 #define virtio_rmb(vq) rmb()
45 #define virtio_wmb(vq) wmb()
46 #endif
47 
48 #ifdef DEBUG
49 /* For development, we want to crash whenever the ring is screwed. */
50 #define BAD_RING(_vq, fmt, args...) \
51  do { \
52  dev_err(&(_vq)->vq.vdev->dev, \
53  "%s:"fmt, (_vq)->vq.name, ##args); \
54  BUG(); \
55  } while (0)
56 /* Caller is supposed to guarantee no reentry. */
57 #define START_USE(_vq) \
58  do { \
59  if ((_vq)->in_use) \
60  panic("%s:in_use = %i\n", \
61  (_vq)->vq.name, (_vq)->in_use); \
62  (_vq)->in_use = __LINE__; \
63  } while (0)
64 #define END_USE(_vq) \
65  do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
66 #else
67 #define BAD_RING(_vq, fmt, args...) \
68  do { \
69  dev_err(&_vq->vq.vdev->dev, \
70  "%s:"fmt, (_vq)->vq.name, ##args); \
71  (_vq)->broken = true; \
72  } while (0)
73 #define START_USE(vq)
74 #define END_USE(vq)
75 #endif
76 
78 {
79  struct virtqueue vq;
80 
81  /* Actual memory layout for this queue */
82  struct vring vring;
83 
84  /* Can we use weak barriers? */
86 
87  /* Other side has made a mess, don't try any more. */
88  bool broken;
89 
90  /* Host supports indirect buffers */
91  bool indirect;
92 
93  /* Host publishes avail event idx */
94  bool event;
95 
96  /* Number of free buffers */
97  unsigned int num_free;
98  /* Head of free buffer list. */
99  unsigned int free_head;
100  /* Number we've added since last sync. */
101  unsigned int num_added;
102 
103  /* Last used index we've seen. */
105 
106  /* How to notify other side. FIXME: commonalize hcalls! */
107  void (*notify)(struct virtqueue *vq);
108 
109  /* Index of the queue */
111 
112 #ifdef DEBUG
113  /* They're supposed to lock for us. */
114  unsigned int in_use;
115 
116  /* Figure out if their kicks are too delayed. */
117  bool last_add_time_valid;
118  ktime_t last_add_time;
119 #endif
120 
121  /* Tokens for callbacks. */
122  void *data[];
123 };
124 
125 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
126 
127 /* Set up an indirect table of descriptors and add it to the queue. */
128 static int vring_add_indirect(struct vring_virtqueue *vq,
129  struct scatterlist sg[],
130  unsigned int out,
131  unsigned int in,
132  gfp_t gfp)
133 {
134  struct vring_desc *desc;
135  unsigned head;
136  int i;
137 
138  desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
139  if (!desc)
140  return -ENOMEM;
141 
142  /* Transfer entries from the sg list into the indirect page */
143  for (i = 0; i < out; i++) {
144  desc[i].flags = VRING_DESC_F_NEXT;
145  desc[i].addr = sg_phys(sg);
146  desc[i].len = sg->length;
147  desc[i].next = i+1;
148  sg++;
149  }
150  for (; i < (out + in); i++) {
152  desc[i].addr = sg_phys(sg);
153  desc[i].len = sg->length;
154  desc[i].next = i+1;
155  sg++;
156  }
157 
158  /* Last one doesn't continue. */
159  desc[i-1].flags &= ~VRING_DESC_F_NEXT;
160  desc[i-1].next = 0;
161 
162  /* We're about to use a buffer */
163  vq->num_free--;
164 
165  /* Use a single buffer which doesn't continue */
166  head = vq->free_head;
167  vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
168  vq->vring.desc[head].addr = virt_to_phys(desc);
169  vq->vring.desc[head].len = i * sizeof(struct vring_desc);
170 
171  /* Update free pointer */
172  vq->free_head = vq->vring.desc[head].next;
173 
174  return head;
175 }
176 
178 {
179  struct vring_virtqueue *vq = to_vvq(_vq);
180  return vq->queue_index;
181 }
183 
201 int virtqueue_add_buf(struct virtqueue *_vq,
202  struct scatterlist sg[],
203  unsigned int out,
204  unsigned int in,
205  void *data,
206  gfp_t gfp)
207 {
208  struct vring_virtqueue *vq = to_vvq(_vq);
209  unsigned int i, avail, uninitialized_var(prev);
210  int head;
211 
212  START_USE(vq);
213 
214  BUG_ON(data == NULL);
215 
216 #ifdef DEBUG
217  {
218  ktime_t now = ktime_get();
219 
220  /* No kick or get, with .1 second between? Warn. */
221  if (vq->last_add_time_valid)
222  WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
223  > 100);
224  vq->last_add_time = now;
225  vq->last_add_time_valid = true;
226  }
227 #endif
228 
229  /* If the host supports indirect descriptor tables, and we have multiple
230  * buffers, then go indirect. FIXME: tune this threshold */
231  if (vq->indirect && (out + in) > 1 && vq->num_free) {
232  head = vring_add_indirect(vq, sg, out, in, gfp);
233  if (likely(head >= 0))
234  goto add_head;
235  }
236 
237  BUG_ON(out + in > vq->vring.num);
238  BUG_ON(out + in == 0);
239 
240  if (vq->num_free < out + in) {
241  pr_debug("Can't add buf len %i - avail = %i\n",
242  out + in, vq->num_free);
243  /* FIXME: for historical reasons, we force a notify here if
244  * there are outgoing parts to the buffer. Presumably the
245  * host should service the ring ASAP. */
246  if (out)
247  vq->notify(&vq->vq);
248  END_USE(vq);
249  return -ENOSPC;
250  }
251 
252  /* We're about to use some buffers from the free list. */
253  vq->num_free -= out + in;
254 
255  head = vq->free_head;
256  for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
257  vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
258  vq->vring.desc[i].addr = sg_phys(sg);
259  vq->vring.desc[i].len = sg->length;
260  prev = i;
261  sg++;
262  }
263  for (; in; i = vq->vring.desc[i].next, in--) {
264  vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
265  vq->vring.desc[i].addr = sg_phys(sg);
266  vq->vring.desc[i].len = sg->length;
267  prev = i;
268  sg++;
269  }
270  /* Last one doesn't continue. */
271  vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
272 
273  /* Update free pointer */
274  vq->free_head = i;
275 
276 add_head:
277  /* Set token. */
278  vq->data[head] = data;
279 
280  /* Put entry in available array (but don't update avail->idx until they
281  * do sync). */
282  avail = (vq->vring.avail->idx & (vq->vring.num-1));
283  vq->vring.avail->ring[avail] = head;
284 
285  /* Descriptors and available array need to be set before we expose the
286  * new available array entries. */
287  virtio_wmb(vq);
288  vq->vring.avail->idx++;
289  vq->num_added++;
290 
291  /* This is very unlikely, but theoretically possible. Kick
292  * just in case. */
293  if (unlikely(vq->num_added == (1 << 16) - 1))
294  virtqueue_kick(_vq);
295 
296  pr_debug("Added buffer head %i to %p\n", head, vq);
297  END_USE(vq);
298 
299  return vq->num_free;
300 }
302 
315 {
316  struct vring_virtqueue *vq = to_vvq(_vq);
317  u16 new, old;
318  bool needs_kick;
319 
320  START_USE(vq);
321  /* We need to expose available array entries before checking avail
322  * event. */
323  virtio_mb(vq);
324 
325  old = vq->vring.avail->idx - vq->num_added;
326  new = vq->vring.avail->idx;
327  vq->num_added = 0;
328 
329 #ifdef DEBUG
330  if (vq->last_add_time_valid) {
331  WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
332  vq->last_add_time)) > 100);
333  }
334  vq->last_add_time_valid = false;
335 #endif
336 
337  if (vq->event) {
338  needs_kick = vring_need_event(vring_avail_event(&vq->vring),
339  new, old);
340  } else {
341  needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
342  }
343  END_USE(vq);
344  return needs_kick;
345 }
347 
354 void virtqueue_notify(struct virtqueue *_vq)
355 {
356  struct vring_virtqueue *vq = to_vvq(_vq);
357 
358  /* Prod other side to tell it about changes. */
359  vq->notify(_vq);
360 }
362 
374 {
375  if (virtqueue_kick_prepare(vq))
376  virtqueue_notify(vq);
377 }
379 
380 static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
381 {
382  unsigned int i;
383 
384  /* Clear data ptr. */
385  vq->data[head] = NULL;
386 
387  /* Put back on free list: find end */
388  i = head;
389 
390  /* Free the indirect table */
391  if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
392  kfree(phys_to_virt(vq->vring.desc[i].addr));
393 
394  while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
395  i = vq->vring.desc[i].next;
396  vq->num_free++;
397  }
398 
399  vq->vring.desc[i].next = vq->free_head;
400  vq->free_head = head;
401  /* Plus final descriptor */
402  vq->num_free++;
403 }
404 
405 static inline bool more_used(const struct vring_virtqueue *vq)
406 {
407  return vq->last_used_idx != vq->vring.used->idx;
408 }
409 
426 void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
427 {
428  struct vring_virtqueue *vq = to_vvq(_vq);
429  void *ret;
430  unsigned int i;
431  u16 last_used;
432 
433  START_USE(vq);
434 
435  if (unlikely(vq->broken)) {
436  END_USE(vq);
437  return NULL;
438  }
439 
440  if (!more_used(vq)) {
441  pr_debug("No more buffers in queue\n");
442  END_USE(vq);
443  return NULL;
444  }
445 
446  /* Only get used array entries after they have been exposed by host. */
447  virtio_rmb(vq);
448 
449  last_used = (vq->last_used_idx & (vq->vring.num - 1));
450  i = vq->vring.used->ring[last_used].id;
451  *len = vq->vring.used->ring[last_used].len;
452 
453  if (unlikely(i >= vq->vring.num)) {
454  BAD_RING(vq, "id %u out of range\n", i);
455  return NULL;
456  }
457  if (unlikely(!vq->data[i])) {
458  BAD_RING(vq, "id %u is not a head!\n", i);
459  return NULL;
460  }
461 
462  /* detach_buf clears data, so grab it now. */
463  ret = vq->data[i];
464  detach_buf(vq, i);
465  vq->last_used_idx++;
466  /* If we expect an interrupt for the next entry, tell host
467  * by writing event index and flush out the write before
468  * the read in the next get_buf call. */
469  if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
471  virtio_mb(vq);
472  }
473 
474 #ifdef DEBUG
475  vq->last_add_time_valid = false;
476 #endif
477 
478  END_USE(vq);
479  return ret;
480 }
482 
493 {
494  struct vring_virtqueue *vq = to_vvq(_vq);
495 
496  vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
497 }
499 
512 {
513  struct vring_virtqueue *vq = to_vvq(_vq);
514 
515  START_USE(vq);
516 
517  /* We optimistically turn back on interrupts, then check if there was
518  * more to do. */
519  /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
520  * either clear the flags bit or point the event index at the next
521  * entry. Always do both to keep code simple. */
522  vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
524  virtio_mb(vq);
525  if (unlikely(more_used(vq))) {
526  END_USE(vq);
527  return false;
528  }
529 
530  END_USE(vq);
531  return true;
532 }
534 
549 {
550  struct vring_virtqueue *vq = to_vvq(_vq);
551  u16 bufs;
552 
553  START_USE(vq);
554 
555  /* We optimistically turn back on interrupts, then check if there was
556  * more to do. */
557  /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
558  * either clear the flags bit or point the event index at the next
559  * entry. Always do both to keep code simple. */
560  vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
561  /* TODO: tune this threshold */
562  bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
564  virtio_mb(vq);
565  if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
566  END_USE(vq);
567  return false;
568  }
569 
570  END_USE(vq);
571  return true;
572 }
574 
584 {
585  struct vring_virtqueue *vq = to_vvq(_vq);
586  unsigned int i;
587  void *buf;
588 
589  START_USE(vq);
590 
591  for (i = 0; i < vq->vring.num; i++) {
592  if (!vq->data[i])
593  continue;
594  /* detach_buf clears data, so grab it now. */
595  buf = vq->data[i];
596  detach_buf(vq, i);
597  vq->vring.avail->idx--;
598  END_USE(vq);
599  return buf;
600  }
601  /* That should have freed everything. */
602  BUG_ON(vq->num_free != vq->vring.num);
603 
604  END_USE(vq);
605  return NULL;
606 }
608 
609 irqreturn_t vring_interrupt(int irq, void *_vq)
610 {
611  struct vring_virtqueue *vq = to_vvq(_vq);
612 
613  if (!more_used(vq)) {
614  pr_debug("virtqueue interrupt with no work for %p\n", vq);
615  return IRQ_NONE;
616  }
617 
618  if (unlikely(vq->broken))
619  return IRQ_HANDLED;
620 
621  pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
622  if (vq->vq.callback)
623  vq->vq.callback(&vq->vq);
624 
625  return IRQ_HANDLED;
626 }
628 
629 struct virtqueue *vring_new_virtqueue(unsigned int index,
630  unsigned int num,
631  unsigned int vring_align,
632  struct virtio_device *vdev,
633  bool weak_barriers,
634  void *pages,
635  void (*notify)(struct virtqueue *),
636  void (*callback)(struct virtqueue *),
637  const char *name)
638 {
639  struct vring_virtqueue *vq;
640  unsigned int i;
641 
642  /* We assume num is a power of 2. */
643  if (num & (num - 1)) {
644  dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
645  return NULL;
646  }
647 
648  vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
649  if (!vq)
650  return NULL;
651 
652  vring_init(&vq->vring, num, pages, vring_align);
653  vq->vq.callback = callback;
654  vq->vq.vdev = vdev;
655  vq->vq.name = name;
656  vq->notify = notify;
658  vq->broken = false;
659  vq->last_used_idx = 0;
660  vq->num_added = 0;
661  vq->queue_index = index;
662  list_add_tail(&vq->vq.list, &vdev->vqs);
663 #ifdef DEBUG
664  vq->in_use = false;
665  vq->last_add_time_valid = false;
666 #endif
667 
670 
671  /* No callback? Tell other side not to bother us. */
672  if (!callback)
673  vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
674 
675  /* Put everything in free lists. */
676  vq->num_free = num;
677  vq->free_head = 0;
678  for (i = 0; i < num-1; i++) {
679  vq->vring.desc[i].next = i+1;
680  vq->data[i] = NULL;
681  }
682  vq->data[i] = NULL;
683 
684  return &vq->vq;
685 }
687 
689 {
690  list_del(&vq->list);
691  kfree(to_vvq(vq));
692 }
694 
695 /* Manipulates transport-specific feature bits. */
697 {
698  unsigned int i;
699 
701  switch (i) {
703  break;
705  break;
706  default:
707  /* We don't understand this bit. */
708  clear_bit(i, vdev->features);
709  }
710  }
711 }
713 
721 unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
722 {
723 
724  struct vring_virtqueue *vq = to_vvq(_vq);
725 
726  return vq->vring.num;
727 }
729 
730 MODULE_LICENSE("GPL");