Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vhost.h
Go to the documentation of this file.
1 #ifndef _VHOST_H
2 #define _VHOST_H
3 
4 #include <linux/eventfd.h>
5 #include <linux/vhost.h>
6 #include <linux/mm.h>
7 #include <linux/mutex.h>
8 #include <linux/poll.h>
9 #include <linux/file.h>
10 #include <linux/skbuff.h>
11 #include <linux/uio.h>
12 #include <linux/virtio_config.h>
13 #include <linux/virtio_ring.h>
14 #include <linux/atomic.h>
15 
16 /* This is for zerocopy, used buffer len is set to 1 when lower device DMA
17  * done */
18 #define VHOST_DMA_DONE_LEN 1
19 #define VHOST_DMA_CLEAR_LEN 0
20 
21 struct vhost_device;
22 
23 struct vhost_work;
24 typedef void (*vhost_work_fn_t)(struct vhost_work *work);
25 
26 struct vhost_work {
27  struct list_head node;
30  int flushing;
31  unsigned queue_seq;
32  unsigned done_seq;
33 };
34 
35 /* Poll a file (eventfd or socket) */
36 /* Note: there's nothing vhost specific about this structure. */
37 struct vhost_poll {
41  struct vhost_work work;
42  unsigned long mask;
43  struct vhost_dev *dev;
44 };
45 
47 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
48 
50  unsigned long mask, struct vhost_dev *dev);
51 void vhost_poll_start(struct vhost_poll *poll, struct file *file);
52 void vhost_poll_stop(struct vhost_poll *poll);
53 void vhost_poll_flush(struct vhost_poll *poll);
54 void vhost_poll_queue(struct vhost_poll *poll);
55 
56 struct vhost_log {
59 };
60 
61 struct vhost_virtqueue;
62 
64  struct kref kref;
67 };
68 
69 struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *, bool zcopy);
70 void vhost_ubuf_put(struct vhost_ubuf_ref *);
72 
73 /* The virtqueue structure describes a queue attached to a device. */
75  struct vhost_dev *dev;
76 
77  /* The actual ring of buffers. */
78  struct mutex mutex;
79  unsigned int num;
83  struct file *kick;
84  struct file *call;
85  struct file *error;
89 
90  struct vhost_poll poll;
91 
92  /* The routine to call when the Guest pings us, or timeout. */
94 
95  /* Last available index we saw. */
97 
98  /* Caches available index value from user. */
100 
101  /* Last index we used. */
103 
104  /* Used flags */
106 
107  /* Last used index value we have signalled on */
109 
110  /* Last used index value we have signalled on */
112 
113  /* Log writes to used structure. */
114  bool log_used;
116 
118  /* hdr is used to store the virtio header.
119  * Since each iovec has >= 1 byte length, we never need more than
120  * header length entries to store the header. */
122  struct iovec *indirect;
123  size_t vhost_hlen;
124  size_t sock_hlen;
126  /* We use a kind of RCU to access private pointer.
127  * All readers access it from worker, which makes it possible to
128  * flush the vhost_work instead of synchronize_rcu. Therefore readers do
129  * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
130  * vhost_work execution acts instead of rcu_read_lock() and the end of
131  * vhost_work execution acts instead of rcu_read_unlock().
132  * Writers use virtqueue mutex. */
134  /* Log write descriptors */
136  struct vhost_log *log;
137  /* vhost zerocopy support fields below: */
138  /* last used idx for outstanding DMA zerocopy buffers */
140  /* first used idx for DMA done zerocopy buffers */
141  int done_idx;
142  /* an array of userspace buffers info */
144  /* Reference counting for outstanding ubufs.
145  * Protected by vq mutex. Writers must also take device mutex. */
147 };
148 
149 struct vhost_dev {
150  /* Readers use RCU to access memory table pointer
151  * log base pointer and features.
152  * Writers use mutex below.*/
154  struct mm_struct *mm;
155  struct mutex mutex;
156  unsigned acked_features;
158  int nvqs;
159  struct file *log_file;
164 };
165 
166 long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
167 long vhost_dev_check_owner(struct vhost_dev *);
168 long vhost_dev_reset_owner(struct vhost_dev *);
169 void vhost_dev_cleanup(struct vhost_dev *, bool locked);
170 long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
171 int vhost_vq_access_ok(struct vhost_virtqueue *vq);
172 int vhost_log_access_ok(struct vhost_dev *);
173 
174 int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
175  struct iovec iov[], unsigned int iov_count,
176  unsigned int *out_num, unsigned int *in_num,
177  struct vhost_log *log, unsigned int *log_num);
178 void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
179 
180 int vhost_init_used(struct vhost_virtqueue *);
181 int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
183  unsigned count);
184 void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
185  unsigned int id, int len);
187  struct vring_used_elem *heads, unsigned count);
188 void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
189 void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
190 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
191 
192 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
193  unsigned int log_num, u64 len);
194 void vhost_zerocopy_callback(struct ubuf_info *);
196 
197 #define vq_err(vq, fmt, ...) do { \
198  pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
199  if ((vq)->error_ctx) \
200  eventfd_signal((vq)->error_ctx, 1);\
201  } while (0)
202 
203 enum {
205  (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
206  (1ULL << VIRTIO_RING_F_EVENT_IDX) |
207  (1ULL << VHOST_F_LOG_ALL),
209  (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
210  (1ULL << VIRTIO_NET_F_MRG_RXBUF),
211 };
212 
213 static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
214 {
215  unsigned acked_features;
216 
217  /* TODO: check that we are running from vhost_worker or dev mutex is
218  * held? */
219  acked_features = rcu_dereference_index_check(dev->acked_features, 1);
220  return acked_features & (1 << bit);
221 }
222 
223 void vhost_enable_zcopy(int vq);
224 
225 #endif