Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
netback.c
Go to the documentation of this file.
1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  * drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34 
35 #include "common.h"
36 
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 
41 #include <net/tcp.h>
42 
43 #include <xen/xen.h>
44 #include <xen/events.h>
45 #include <xen/interface/memory.h>
46 
47 #include <asm/xen/hypercall.h>
48 #include <asm/xen/page.h>
49 
52  struct xenvif *vif;
53 };
54 typedef unsigned int pending_ring_idx_t;
55 
56 struct netbk_rx_meta {
57  int id;
58  int size;
59  int gso_size;
60 };
61 
62 #define MAX_PENDING_REQS 256
63 
64 /* Discriminate from any valid pending_idx value. */
65 #define INVALID_PENDING_IDX 0xFFFF
66 
67 #define MAX_BUFFER_OFFSET PAGE_SIZE
68 
69 /* extra field used in struct page */
70 union page_ext {
71  struct {
72 #if BITS_PER_LONG < 64
73 #define IDX_WIDTH 8
74 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
75  unsigned int group:GROUP_WIDTH;
76  unsigned int idx:IDX_WIDTH;
77 #else
78  unsigned int group, idx;
79 #endif
80  } e;
81  void *mapping;
82 };
83 
84 struct xen_netbk {
86  struct task_struct *task;
87 
90 
92 
94 
98 
99  /* Protect the net_schedule_list in netif. */
101 
103 
106 
108 
109  /*
110  * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
111  * head/fragment page uses 2 copy operations because it
112  * straddles two buffers in the frontend.
113  */
116 };
117 
118 static struct xen_netbk *xen_netbk;
119 static int xen_netbk_group_nr;
120 
121 void xen_netbk_add_xenvif(struct xenvif *vif)
122 {
123  int i;
124  int min_netfront_count;
125  int min_group = 0;
126  struct xen_netbk *netbk;
127 
128  min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
129  for (i = 0; i < xen_netbk_group_nr; i++) {
130  int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
131  if (netfront_count < min_netfront_count) {
132  min_group = i;
133  min_netfront_count = netfront_count;
134  }
135  }
136 
137  netbk = &xen_netbk[min_group];
138 
139  vif->netbk = netbk;
140  atomic_inc(&netbk->netfront_count);
141 }
142 
144 {
145  struct xen_netbk *netbk = vif->netbk;
146  vif->netbk = NULL;
147  atomic_dec(&netbk->netfront_count);
148 }
149 
150 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
151 static void make_tx_response(struct xenvif *vif,
152  struct xen_netif_tx_request *txp,
153  s8 st);
154 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
155  u16 id,
156  s8 st,
157  u16 offset,
158  u16 size,
159  u16 flags);
160 
161 static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
162  u16 idx)
163 {
164  return page_to_pfn(netbk->mmap_pages[idx]);
165 }
166 
167 static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
168  u16 idx)
169 {
170  return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
171 }
172 
173 /* extra field used in struct page */
174 static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
175  unsigned int idx)
176 {
177  unsigned int group = netbk - xen_netbk;
178  union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
179 
180  BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
181  pg->mapping = ext.mapping;
182 }
183 
184 static int get_page_ext(struct page *pg,
185  unsigned int *pgroup, unsigned int *pidx)
186 {
187  union page_ext ext = { .mapping = pg->mapping };
188  struct xen_netbk *netbk;
189  unsigned int group, idx;
190 
191  group = ext.e.group - 1;
192 
193  if (group < 0 || group >= xen_netbk_group_nr)
194  return 0;
195 
196  netbk = &xen_netbk[group];
197 
198  idx = ext.e.idx;
199 
200  if ((idx < 0) || (idx >= MAX_PENDING_REQS))
201  return 0;
202 
203  if (netbk->mmap_pages[idx] != pg)
204  return 0;
205 
206  *pgroup = group;
207  *pidx = idx;
208 
209  return 1;
210 }
211 
212 /*
213  * This is the amount of packet we copy rather than map, so that the
214  * guest can't fiddle with the contents of the headers while we do
215  * packet processing on them (netfilter, routing, etc).
216  */
217 #define PKT_PROT_LEN (ETH_HLEN + \
218  VLAN_HLEN + \
219  sizeof(struct iphdr) + MAX_IPOPTLEN + \
220  sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
221 
222 static u16 frag_get_pending_idx(skb_frag_t *frag)
223 {
224  return (u16)frag->page_offset;
225 }
226 
227 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
228 {
229  frag->page_offset = pending_idx;
230 }
231 
232 static inline pending_ring_idx_t pending_index(unsigned i)
233 {
234  return i & (MAX_PENDING_REQS-1);
235 }
236 
237 static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
238 {
239  return MAX_PENDING_REQS -
240  netbk->pending_prod + netbk->pending_cons;
241 }
242 
243 static void xen_netbk_kick_thread(struct xen_netbk *netbk)
244 {
245  wake_up(&netbk->wq);
246 }
247 
248 static int max_required_rx_slots(struct xenvif *vif)
249 {
250  int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
251 
252  if (vif->can_sg || vif->gso || vif->gso_prefix)
253  max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
254 
255  return max;
256 }
257 
259 {
261  RING_IDX needed = max_required_rx_slots(vif);
262 
263  return ((vif->rx.sring->req_prod - peek) < needed) ||
264  ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
265 }
266 
268 {
269  if (!xen_netbk_rx_ring_full(vif))
270  return 0;
271 
272  vif->rx.sring->req_event = vif->rx_req_cons_peek +
273  max_required_rx_slots(vif);
274  mb(); /* request notification /then/ check the queue */
275 
276  return xen_netbk_rx_ring_full(vif);
277 }
278 
279 /*
280  * Returns true if we should start a new receive buffer instead of
281  * adding 'size' bytes to a buffer which currently contains 'offset'
282  * bytes.
283  */
284 static bool start_new_rx_buffer(int offset, unsigned long size, int head)
285 {
286  /* simple case: we have completely filled the current buffer. */
287  if (offset == MAX_BUFFER_OFFSET)
288  return true;
289 
290  /*
291  * complex case: start a fresh buffer if the current frag
292  * would overflow the current buffer but only if:
293  * (i) this frag would fit completely in the next buffer
294  * and (ii) there is already some data in the current buffer
295  * and (iii) this is not the head buffer.
296  *
297  * Where:
298  * - (i) stops us splitting a frag into two copies
299  * unless the frag is too large for a single buffer.
300  * - (ii) stops us from leaving a buffer pointlessly empty.
301  * - (iii) stops us leaving the first buffer
302  * empty. Strictly speaking this is already covered
303  * by (ii) but is explicitly checked because
304  * netfront relies on the first buffer being
305  * non-empty and can crash otherwise.
306  *
307  * This means we will effectively linearise small
308  * frags but do not needlessly split large buffers
309  * into multiple copies tend to give large frags their
310  * own buffers as before.
311  */
312  if ((offset + size > MAX_BUFFER_OFFSET) &&
313  (size <= MAX_BUFFER_OFFSET) && offset && !head)
314  return true;
315 
316  return false;
317 }
318 
319 /*
320  * Figure out how many ring slots we're going to need to send @skb to
321  * the guest. This function is essentially a dry run of
322  * netbk_gop_frag_copy.
323  */
324 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
325 {
326  unsigned int count;
327  int i, copy_off;
328 
329  count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
330 
331  copy_off = skb_headlen(skb) % PAGE_SIZE;
332 
333  if (skb_shinfo(skb)->gso_size)
334  count++;
335 
336  for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
337  unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
338  unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
339  unsigned long bytes;
340 
341  offset &= ~PAGE_MASK;
342 
343  while (size > 0) {
344  BUG_ON(offset >= PAGE_SIZE);
345  BUG_ON(copy_off > MAX_BUFFER_OFFSET);
346 
347  bytes = PAGE_SIZE - offset;
348 
349  if (bytes > size)
350  bytes = size;
351 
352  if (start_new_rx_buffer(copy_off, bytes, 0)) {
353  count++;
354  copy_off = 0;
355  }
356 
357  if (copy_off + bytes > MAX_BUFFER_OFFSET)
358  bytes = MAX_BUFFER_OFFSET - copy_off;
359 
360  copy_off += bytes;
361 
362  offset += bytes;
363  size -= bytes;
364 
365  if (offset == PAGE_SIZE)
366  offset = 0;
367  }
368  }
369  return count;
370 }
371 
373  unsigned copy_prod, copy_cons;
374  unsigned meta_prod, meta_cons;
375  struct gnttab_copy *copy;
377  int copy_off;
379 };
380 
381 static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
382  struct netrx_pending_operations *npo)
383 {
384  struct netbk_rx_meta *meta;
385  struct xen_netif_rx_request *req;
386 
387  req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
388 
389  meta = npo->meta + npo->meta_prod++;
390  meta->gso_size = 0;
391  meta->size = 0;
392  meta->id = req->id;
393 
394  npo->copy_off = 0;
395  npo->copy_gref = req->gref;
396 
397  return meta;
398 }
399 
400 /*
401  * Set up the grant operations for this fragment. If it's a flipping
402  * interface, we also set up the unmap request from here.
403  */
404 static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
405  struct netrx_pending_operations *npo,
406  struct page *page, unsigned long size,
407  unsigned long offset, int *head)
408 {
409  struct gnttab_copy *copy_gop;
410  struct netbk_rx_meta *meta;
411  /*
412  * These variables are used iff get_page_ext returns true,
413  * in which case they are guaranteed to be initialized.
414  */
415  unsigned int uninitialized_var(group), uninitialized_var(idx);
416  int foreign = get_page_ext(page, &group, &idx);
417  unsigned long bytes;
418 
419  /* Data must not cross a page boundary. */
420  BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
421 
422  meta = npo->meta + npo->meta_prod - 1;
423 
424  /* Skip unused frames from start of page */
425  page += offset >> PAGE_SHIFT;
426  offset &= ~PAGE_MASK;
427 
428  while (size > 0) {
429  BUG_ON(offset >= PAGE_SIZE);
431 
432  bytes = PAGE_SIZE - offset;
433 
434  if (bytes > size)
435  bytes = size;
436 
437  if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
438  /*
439  * Netfront requires there to be some data in the head
440  * buffer.
441  */
442  BUG_ON(*head);
443 
444  meta = get_next_rx_buffer(vif, npo);
445  }
446 
447  if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
448  bytes = MAX_BUFFER_OFFSET - npo->copy_off;
449 
450  copy_gop = npo->copy + npo->copy_prod++;
451  copy_gop->flags = GNTCOPY_dest_gref;
452  if (foreign) {
453  struct xen_netbk *netbk = &xen_netbk[group];
454  struct pending_tx_info *src_pend;
455 
456  src_pend = &netbk->pending_tx_info[idx];
457 
458  copy_gop->source.domid = src_pend->vif->domid;
459  copy_gop->source.u.ref = src_pend->req.gref;
460  copy_gop->flags |= GNTCOPY_source_gref;
461  } else {
462  void *vaddr = page_address(page);
463  copy_gop->source.domid = DOMID_SELF;
464  copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
465  }
466  copy_gop->source.offset = offset;
467  copy_gop->dest.domid = vif->domid;
468 
469  copy_gop->dest.offset = npo->copy_off;
470  copy_gop->dest.u.ref = npo->copy_gref;
471  copy_gop->len = bytes;
472 
473  npo->copy_off += bytes;
474  meta->size += bytes;
475 
476  offset += bytes;
477  size -= bytes;
478 
479  /* Next frame */
480  if (offset == PAGE_SIZE && size) {
481  BUG_ON(!PageCompound(page));
482  page++;
483  offset = 0;
484  }
485 
486  /* Leave a gap for the GSO descriptor. */
487  if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
488  vif->rx.req_cons++;
489 
490  *head = 0; /* There must be something in this buffer now. */
491 
492  }
493 }
494 
495 /*
496  * Prepare an SKB to be transmitted to the frontend.
497  *
498  * This function is responsible for allocating grant operations, meta
499  * structures, etc.
500  *
501  * It returns the number of meta structures consumed. The number of
502  * ring slots used is always equal to the number of meta slots used
503  * plus the number of GSO descriptors used. Currently, we use either
504  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
505  * frontend-side LRO).
506  */
507 static int netbk_gop_skb(struct sk_buff *skb,
508  struct netrx_pending_operations *npo)
509 {
510  struct xenvif *vif = netdev_priv(skb->dev);
511  int nr_frags = skb_shinfo(skb)->nr_frags;
512  int i;
513  struct xen_netif_rx_request *req;
514  struct netbk_rx_meta *meta;
515  unsigned char *data;
516  int head = 1;
517  int old_meta_prod;
518 
519  old_meta_prod = npo->meta_prod;
520 
521  /* Set up a GSO prefix descriptor, if necessary */
522  if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
523  req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
524  meta = npo->meta + npo->meta_prod++;
525  meta->gso_size = skb_shinfo(skb)->gso_size;
526  meta->size = 0;
527  meta->id = req->id;
528  }
529 
530  req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
531  meta = npo->meta + npo->meta_prod++;
532 
533  if (!vif->gso_prefix)
534  meta->gso_size = skb_shinfo(skb)->gso_size;
535  else
536  meta->gso_size = 0;
537 
538  meta->size = 0;
539  meta->id = req->id;
540  npo->copy_off = 0;
541  npo->copy_gref = req->gref;
542 
543  data = skb->data;
544  while (data < skb_tail_pointer(skb)) {
545  unsigned int offset = offset_in_page(data);
546  unsigned int len = PAGE_SIZE - offset;
547 
548  if (data + len > skb_tail_pointer(skb))
549  len = skb_tail_pointer(skb) - data;
550 
551  netbk_gop_frag_copy(vif, skb, npo,
552  virt_to_page(data), len, offset, &head);
553  data += len;
554  }
555 
556  for (i = 0; i < nr_frags; i++) {
557  netbk_gop_frag_copy(vif, skb, npo,
558  skb_frag_page(&skb_shinfo(skb)->frags[i]),
559  skb_frag_size(&skb_shinfo(skb)->frags[i]),
560  skb_shinfo(skb)->frags[i].page_offset,
561  &head);
562  }
563 
564  return npo->meta_prod - old_meta_prod;
565 }
566 
567 /*
568  * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
569  * used to set up the operations on the top of
570  * netrx_pending_operations, which have since been done. Check that
571  * they didn't give any errors and advance over them.
572  */
573 static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
574  struct netrx_pending_operations *npo)
575 {
576  struct gnttab_copy *copy_op;
578  int i;
579 
580  for (i = 0; i < nr_meta_slots; i++) {
581  copy_op = npo->copy + npo->copy_cons++;
582  if (copy_op->status != GNTST_okay) {
583  netdev_dbg(vif->dev,
584  "Bad status %d from copy to DOM%d.\n",
585  copy_op->status, vif->domid);
586  status = XEN_NETIF_RSP_ERROR;
587  }
588  }
589 
590  return status;
591 }
592 
593 static void netbk_add_frag_responses(struct xenvif *vif, int status,
594  struct netbk_rx_meta *meta,
595  int nr_meta_slots)
596 {
597  int i;
598  unsigned long offset;
599 
600  /* No fragments used */
601  if (nr_meta_slots <= 1)
602  return;
603 
604  nr_meta_slots--;
605 
606  for (i = 0; i < nr_meta_slots; i++) {
607  int flags;
608  if (i == nr_meta_slots - 1)
609  flags = 0;
610  else
611  flags = XEN_NETRXF_more_data;
612 
613  offset = 0;
614  make_rx_response(vif, meta[i].id, status, offset,
615  meta[i].size, flags);
616  }
617 }
618 
621 };
622 
623 static void xen_netbk_rx_action(struct xen_netbk *netbk)
624 {
625  struct xenvif *vif = NULL, *tmp;
626  s8 status;
627  u16 irq, flags;
628  struct xen_netif_rx_response *resp;
629  struct sk_buff_head rxq;
630  struct sk_buff *skb;
631  LIST_HEAD(notify);
632  int ret;
633  int nr_frags;
634  int count;
635  unsigned long offset;
636  struct skb_cb_overlay *sco;
637 
638  struct netrx_pending_operations npo = {
639  .copy = netbk->grant_copy_op,
640  .meta = netbk->meta,
641  };
642 
643  skb_queue_head_init(&rxq);
644 
645  count = 0;
646 
647  while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
648  vif = netdev_priv(skb->dev);
649  nr_frags = skb_shinfo(skb)->nr_frags;
650 
651  sco = (struct skb_cb_overlay *)skb->cb;
652  sco->meta_slots_used = netbk_gop_skb(skb, &npo);
653 
654  count += nr_frags + 1;
655 
656  __skb_queue_tail(&rxq, skb);
657 
658  /* Filled the batch queue? */
659  if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
660  break;
661  }
662 
663  BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
664 
665  if (!npo.copy_prod)
666  return;
667 
668  BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
670 
671  while ((skb = __skb_dequeue(&rxq)) != NULL) {
672  sco = (struct skb_cb_overlay *)skb->cb;
673 
674  vif = netdev_priv(skb->dev);
675 
676  if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
677  resp = RING_GET_RESPONSE(&vif->rx,
678  vif->rx.rsp_prod_pvt++);
679 
681 
682  resp->offset = netbk->meta[npo.meta_cons].gso_size;
683  resp->id = netbk->meta[npo.meta_cons].id;
684  resp->status = sco->meta_slots_used;
685 
686  npo.meta_cons++;
687  sco->meta_slots_used--;
688  }
689 
690 
691  vif->dev->stats.tx_bytes += skb->len;
692  vif->dev->stats.tx_packets++;
693 
694  status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
695 
696  if (sco->meta_slots_used == 1)
697  flags = 0;
698  else
699  flags = XEN_NETRXF_more_data;
700 
701  if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
703  else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
704  /* remote but checksummed. */
705  flags |= XEN_NETRXF_data_validated;
706 
707  offset = 0;
708  resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
709  status, offset,
710  netbk->meta[npo.meta_cons].size,
711  flags);
712 
713  if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
714  struct xen_netif_extra_info *gso =
715  (struct xen_netif_extra_info *)
716  RING_GET_RESPONSE(&vif->rx,
717  vif->rx.rsp_prod_pvt++);
718 
719  resp->flags |= XEN_NETRXF_extra_info;
720 
721  gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
722  gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
723  gso->u.gso.pad = 0;
724  gso->u.gso.features = 0;
725 
727  gso->flags = 0;
728  }
729 
730  netbk_add_frag_responses(vif, status,
731  netbk->meta + npo.meta_cons + 1,
732  sco->meta_slots_used);
733 
735  irq = vif->irq;
736  if (ret && list_empty(&vif->notify_list))
738 
740 
741  xenvif_put(vif);
742  npo.meta_cons += sco->meta_slots_used;
743  dev_kfree_skb(skb);
744  }
745 
746  list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
748  list_del_init(&vif->notify_list);
749  }
750 
751  /* More work to do? */
752  if (!skb_queue_empty(&netbk->rx_queue) &&
753  !timer_pending(&netbk->net_timer))
754  xen_netbk_kick_thread(netbk);
755 }
756 
757 void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
758 {
759  struct xen_netbk *netbk = vif->netbk;
760 
761  skb_queue_tail(&netbk->rx_queue, skb);
762 
763  xen_netbk_kick_thread(netbk);
764 }
765 
766 static void xen_netbk_alarm(unsigned long data)
767 {
768  struct xen_netbk *netbk = (struct xen_netbk *)data;
769  xen_netbk_kick_thread(netbk);
770 }
771 
772 static int __on_net_schedule_list(struct xenvif *vif)
773 {
774  return !list_empty(&vif->schedule_list);
775 }
776 
777 /* Must be called with net_schedule_list_lock held */
778 static void remove_from_net_schedule_list(struct xenvif *vif)
779 {
780  if (likely(__on_net_schedule_list(vif))) {
781  list_del_init(&vif->schedule_list);
782  xenvif_put(vif);
783  }
784 }
785 
786 static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
787 {
788  struct xenvif *vif = NULL;
789 
790  spin_lock_irq(&netbk->net_schedule_list_lock);
791  if (list_empty(&netbk->net_schedule_list))
792  goto out;
793 
794  vif = list_first_entry(&netbk->net_schedule_list,
795  struct xenvif, schedule_list);
796  if (!vif)
797  goto out;
798 
799  xenvif_get(vif);
800 
801  remove_from_net_schedule_list(vif);
802 out:
803  spin_unlock_irq(&netbk->net_schedule_list_lock);
804  return vif;
805 }
806 
808 {
809  unsigned long flags;
810  struct xen_netbk *netbk = vif->netbk;
811 
812  if (__on_net_schedule_list(vif))
813  goto kick;
814 
816  if (!__on_net_schedule_list(vif) &&
817  likely(xenvif_schedulable(vif))) {
819  xenvif_get(vif);
820  }
821  spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
822 
823 kick:
824  smp_mb();
825  if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
826  !list_empty(&netbk->net_schedule_list))
827  xen_netbk_kick_thread(netbk);
828 }
829 
831 {
832  struct xen_netbk *netbk = vif->netbk;
833  spin_lock_irq(&netbk->net_schedule_list_lock);
834  remove_from_net_schedule_list(vif);
835  spin_unlock_irq(&netbk->net_schedule_list_lock);
836 }
837 
839 {
840  int more_to_do;
841 
842  RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
843 
844  if (more_to_do)
846 }
847 
848 static void tx_add_credit(struct xenvif *vif)
849 {
850  unsigned long max_burst, max_credit;
851 
852  /*
853  * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
854  * Otherwise the interface can seize up due to insufficient credit.
855  */
856  max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
857  max_burst = min(max_burst, 131072UL);
858  max_burst = max(max_burst, vif->credit_bytes);
859 
860  /* Take care that adding a new chunk of credit doesn't wrap to zero. */
861  max_credit = vif->remaining_credit + vif->credit_bytes;
862  if (max_credit < vif->remaining_credit)
863  max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
864 
865  vif->remaining_credit = min(max_credit, max_burst);
866 }
867 
868 static void tx_credit_callback(unsigned long data)
869 {
870  struct xenvif *vif = (struct xenvif *)data;
871  tx_add_credit(vif);
873 }
874 
875 static void netbk_tx_err(struct xenvif *vif,
876  struct xen_netif_tx_request *txp, RING_IDX end)
877 {
878  RING_IDX cons = vif->tx.req_cons;
879 
880  do {
881  make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
882  if (cons >= end)
883  break;
884  txp = RING_GET_REQUEST(&vif->tx, cons++);
885  } while (1);
886  vif->tx.req_cons = cons;
888  xenvif_put(vif);
889 }
890 
891 static int netbk_count_requests(struct xenvif *vif,
892  struct xen_netif_tx_request *first,
893  struct xen_netif_tx_request *txp,
894  int work_to_do)
895 {
896  RING_IDX cons = vif->tx.req_cons;
897  int frags = 0;
898 
899  if (!(first->flags & XEN_NETTXF_more_data))
900  return 0;
901 
902  do {
903  if (frags >= work_to_do) {
904  netdev_dbg(vif->dev, "Need more frags\n");
905  return -frags;
906  }
907 
908  if (unlikely(frags >= MAX_SKB_FRAGS)) {
909  netdev_dbg(vif->dev, "Too many frags\n");
910  return -frags;
911  }
912 
913  memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
914  sizeof(*txp));
915  if (txp->size > first->size) {
916  netdev_dbg(vif->dev, "Frags galore\n");
917  return -frags;
918  }
919 
920  first->size -= txp->size;
921  frags++;
922 
923  if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
924  netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
925  txp->offset, txp->size);
926  return -frags;
927  }
928  } while ((txp++)->flags & XEN_NETTXF_more_data);
929  return frags;
930 }
931 
932 static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
933  struct sk_buff *skb,
934  u16 pending_idx)
935 {
936  struct page *page;
938  if (!page)
939  return NULL;
940  set_page_ext(page, netbk, pending_idx);
941  netbk->mmap_pages[pending_idx] = page;
942  return page;
943 }
944 
945 static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
946  struct xenvif *vif,
947  struct sk_buff *skb,
948  struct xen_netif_tx_request *txp,
949  struct gnttab_copy *gop)
950 {
951  struct skb_shared_info *shinfo = skb_shinfo(skb);
952  skb_frag_t *frags = shinfo->frags;
953  u16 pending_idx = *((u16 *)skb->data);
954  int i, start;
955 
956  /* Skip first skb fragment if it is on same page as header fragment. */
957  start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
958 
959  for (i = start; i < shinfo->nr_frags; i++, txp++) {
960  struct page *page;
963  netbk->pending_tx_info;
964 
965  index = pending_index(netbk->pending_cons++);
966  pending_idx = netbk->pending_ring[index];
967  page = xen_netbk_alloc_page(netbk, skb, pending_idx);
968  if (!page)
969  return NULL;
970 
971  gop->source.u.ref = txp->gref;
972  gop->source.domid = vif->domid;
973  gop->source.offset = txp->offset;
974 
975  gop->dest.u.gmfn = virt_to_mfn(page_address(page));
976  gop->dest.domid = DOMID_SELF;
977  gop->dest.offset = txp->offset;
978 
979  gop->len = txp->size;
980  gop->flags = GNTCOPY_source_gref;
981 
982  gop++;
983 
984  memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
985  xenvif_get(vif);
986  pending_tx_info[pending_idx].vif = vif;
987  frag_set_pending_idx(&frags[i], pending_idx);
988  }
989 
990  return gop;
991 }
992 
993 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
994  struct sk_buff *skb,
995  struct gnttab_copy **gopp)
996 {
997  struct gnttab_copy *gop = *gopp;
998  u16 pending_idx = *((u16 *)skb->data);
999  struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
1000  struct xenvif *vif = pending_tx_info[pending_idx].vif;
1001  struct xen_netif_tx_request *txp;
1002  struct skb_shared_info *shinfo = skb_shinfo(skb);
1003  int nr_frags = shinfo->nr_frags;
1004  int i, err, start;
1005 
1006  /* Check status of header. */
1007  err = gop->status;
1008  if (unlikely(err)) {
1010  index = pending_index(netbk->pending_prod++);
1011  txp = &pending_tx_info[pending_idx].req;
1012  make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1013  netbk->pending_ring[index] = pending_idx;
1014  xenvif_put(vif);
1015  }
1016 
1017  /* Skip first skb fragment if it is on same page as header fragment. */
1018  start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1019 
1020  for (i = start; i < nr_frags; i++) {
1021  int j, newerr;
1023 
1024  pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1025 
1026  /* Check error status: if okay then remember grant handle. */
1027  newerr = (++gop)->status;
1028  if (likely(!newerr)) {
1029  /* Had a previous error? Invalidate this fragment. */
1030  if (unlikely(err))
1031  xen_netbk_idx_release(netbk, pending_idx);
1032  continue;
1033  }
1034 
1035  /* Error on this fragment: respond to client with an error. */
1036  txp = &netbk->pending_tx_info[pending_idx].req;
1037  make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1038  index = pending_index(netbk->pending_prod++);
1039  netbk->pending_ring[index] = pending_idx;
1040  xenvif_put(vif);
1041 
1042  /* Not the first error? Preceding frags already invalidated. */
1043  if (err)
1044  continue;
1045 
1046  /* First error: invalidate header and preceding fragments. */
1047  pending_idx = *((u16 *)skb->data);
1048  xen_netbk_idx_release(netbk, pending_idx);
1049  for (j = start; j < i; j++) {
1050  pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1051  xen_netbk_idx_release(netbk, pending_idx);
1052  }
1053 
1054  /* Remember the error: invalidate all subsequent fragments. */
1055  err = newerr;
1056  }
1057 
1058  *gopp = gop + 1;
1059  return err;
1060 }
1061 
1062 static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1063 {
1064  struct skb_shared_info *shinfo = skb_shinfo(skb);
1065  int nr_frags = shinfo->nr_frags;
1066  int i;
1067 
1068  for (i = 0; i < nr_frags; i++) {
1069  skb_frag_t *frag = shinfo->frags + i;
1070  struct xen_netif_tx_request *txp;
1071  struct page *page;
1072  u16 pending_idx;
1073 
1074  pending_idx = frag_get_pending_idx(frag);
1075 
1076  txp = &netbk->pending_tx_info[pending_idx].req;
1077  page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
1078  __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1079  skb->len += txp->size;
1080  skb->data_len += txp->size;
1081  skb->truesize += txp->size;
1082 
1083  /* Take an extra reference to offset xen_netbk_idx_release */
1084  get_page(netbk->mmap_pages[pending_idx]);
1085  xen_netbk_idx_release(netbk, pending_idx);
1086  }
1087 }
1088 
1089 static int xen_netbk_get_extras(struct xenvif *vif,
1090  struct xen_netif_extra_info *extras,
1091  int work_to_do)
1092 {
1093  struct xen_netif_extra_info extra;
1094  RING_IDX cons = vif->tx.req_cons;
1095 
1096  do {
1097  if (unlikely(work_to_do-- <= 0)) {
1098  netdev_dbg(vif->dev, "Missing extra info\n");
1099  return -EBADR;
1100  }
1101 
1102  memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1103  sizeof(extra));
1104  if (unlikely(!extra.type ||
1105  extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1106  vif->tx.req_cons = ++cons;
1107  netdev_dbg(vif->dev,
1108  "Invalid extra type: %d\n", extra.type);
1109  return -EINVAL;
1110  }
1111 
1112  memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1113  vif->tx.req_cons = ++cons;
1114  } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1115 
1116  return work_to_do;
1117 }
1118 
1119 static int netbk_set_skb_gso(struct xenvif *vif,
1120  struct sk_buff *skb,
1121  struct xen_netif_extra_info *gso)
1122 {
1123  if (!gso->u.gso.size) {
1124  netdev_dbg(vif->dev, "GSO size must not be zero.\n");
1125  return -EINVAL;
1126  }
1127 
1128  /* Currently only TCPv4 S.O. is supported. */
1129  if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1130  netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1131  return -EINVAL;
1132  }
1133 
1134  skb_shinfo(skb)->gso_size = gso->u.gso.size;
1135  skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1136 
1137  /* Header must be checked, and gso_segs computed. */
1138  skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1139  skb_shinfo(skb)->gso_segs = 0;
1140 
1141  return 0;
1142 }
1143 
1144 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1145 {
1146  struct iphdr *iph;
1147  unsigned char *th;
1148  int err = -EPROTO;
1149  int recalculate_partial_csum = 0;
1150 
1151  /*
1152  * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1153  * peers can fail to set NETRXF_csum_blank when sending a GSO
1154  * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1155  * recalculate the partial checksum.
1156  */
1157  if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1158  vif->rx_gso_checksum_fixup++;
1159  skb->ip_summed = CHECKSUM_PARTIAL;
1160  recalculate_partial_csum = 1;
1161  }
1162 
1163  /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1164  if (skb->ip_summed != CHECKSUM_PARTIAL)
1165  return 0;
1166 
1167  if (skb->protocol != htons(ETH_P_IP))
1168  goto out;
1169 
1170  iph = (void *)skb->data;
1171  th = skb->data + 4 * iph->ihl;
1172  if (th >= skb_tail_pointer(skb))
1173  goto out;
1174 
1175  skb->csum_start = th - skb->head;
1176  switch (iph->protocol) {
1177  case IPPROTO_TCP:
1178  skb->csum_offset = offsetof(struct tcphdr, check);
1179 
1180  if (recalculate_partial_csum) {
1181  struct tcphdr *tcph = (struct tcphdr *)th;
1182  tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1183  skb->len - iph->ihl*4,
1184  IPPROTO_TCP, 0);
1185  }
1186  break;
1187  case IPPROTO_UDP:
1188  skb->csum_offset = offsetof(struct udphdr, check);
1189 
1190  if (recalculate_partial_csum) {
1191  struct udphdr *udph = (struct udphdr *)th;
1192  udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1193  skb->len - iph->ihl*4,
1194  IPPROTO_UDP, 0);
1195  }
1196  break;
1197  default:
1198  if (net_ratelimit())
1199  netdev_err(vif->dev,
1200  "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1201  iph->protocol);
1202  goto out;
1203  }
1204 
1205  if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
1206  goto out;
1207 
1208  err = 0;
1209 
1210 out:
1211  return err;
1212 }
1213 
1214 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1215 {
1216  unsigned long now = jiffies;
1217  unsigned long next_credit =
1218  vif->credit_timeout.expires +
1219  msecs_to_jiffies(vif->credit_usec / 1000);
1220 
1221  /* Timer could already be pending in rare cases. */
1222  if (timer_pending(&vif->credit_timeout))
1223  return true;
1224 
1225  /* Passed the point where we can replenish credit? */
1226  if (time_after_eq(now, next_credit)) {
1227  vif->credit_timeout.expires = now;
1228  tx_add_credit(vif);
1229  }
1230 
1231  /* Still too big to send right now? Set a callback. */
1232  if (size > vif->remaining_credit) {
1233  vif->credit_timeout.data =
1234  (unsigned long)vif;
1235  vif->credit_timeout.function =
1236  tx_credit_callback;
1237  mod_timer(&vif->credit_timeout,
1238  next_credit);
1239 
1240  return true;
1241  }
1242 
1243  return false;
1244 }
1245 
1246 static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1247 {
1248  struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
1249  struct sk_buff *skb;
1250  int ret;
1251 
1252  while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
1253  !list_empty(&netbk->net_schedule_list)) {
1254  struct xenvif *vif;
1255  struct xen_netif_tx_request txreq;
1256  struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
1257  struct page *page;
1259  u16 pending_idx;
1260  RING_IDX idx;
1261  int work_to_do;
1262  unsigned int data_len;
1264 
1265  /* Get a netif from the list with work to do. */
1266  vif = poll_net_schedule_list(netbk);
1267  if (!vif)
1268  continue;
1269 
1270  RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1271  if (!work_to_do) {
1272  xenvif_put(vif);
1273  continue;
1274  }
1275 
1276  idx = vif->tx.req_cons;
1277  rmb(); /* Ensure that we see the request before we copy it. */
1278  memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1279 
1280  /* Credit-based scheduling. */
1281  if (txreq.size > vif->remaining_credit &&
1282  tx_credit_exceeded(vif, txreq.size)) {
1283  xenvif_put(vif);
1284  continue;
1285  }
1286 
1287  vif->remaining_credit -= txreq.size;
1288 
1289  work_to_do--;
1290  vif->tx.req_cons = ++idx;
1291 
1292  memset(extras, 0, sizeof(extras));
1293  if (txreq.flags & XEN_NETTXF_extra_info) {
1294  work_to_do = xen_netbk_get_extras(vif, extras,
1295  work_to_do);
1296  idx = vif->tx.req_cons;
1297  if (unlikely(work_to_do < 0)) {
1298  netbk_tx_err(vif, &txreq, idx);
1299  continue;
1300  }
1301  }
1302 
1303  ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1304  if (unlikely(ret < 0)) {
1305  netbk_tx_err(vif, &txreq, idx - ret);
1306  continue;
1307  }
1308  idx += ret;
1309 
1310  if (unlikely(txreq.size < ETH_HLEN)) {
1311  netdev_dbg(vif->dev,
1312  "Bad packet size: %d\n", txreq.size);
1313  netbk_tx_err(vif, &txreq, idx);
1314  continue;
1315  }
1316 
1317  /* No crossing a page as the payload mustn't fragment. */
1318  if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1319  netdev_dbg(vif->dev,
1320  "txreq.offset: %x, size: %u, end: %lu\n",
1321  txreq.offset, txreq.size,
1322  (txreq.offset&~PAGE_MASK) + txreq.size);
1323  netbk_tx_err(vif, &txreq, idx);
1324  continue;
1325  }
1326 
1327  index = pending_index(netbk->pending_cons);
1328  pending_idx = netbk->pending_ring[index];
1329 
1330  data_len = (txreq.size > PKT_PROT_LEN &&
1331  ret < MAX_SKB_FRAGS) ?
1332  PKT_PROT_LEN : txreq.size;
1333 
1334  skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1336  if (unlikely(skb == NULL)) {
1337  netdev_dbg(vif->dev,
1338  "Can't allocate a skb in start_xmit.\n");
1339  netbk_tx_err(vif, &txreq, idx);
1340  break;
1341  }
1342 
1343  /* Packets passed to netif_rx() must have some headroom. */
1344  skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1345 
1346  if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1347  struct xen_netif_extra_info *gso;
1348  gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1349 
1350  if (netbk_set_skb_gso(vif, skb, gso)) {
1351  kfree_skb(skb);
1352  netbk_tx_err(vif, &txreq, idx);
1353  continue;
1354  }
1355  }
1356 
1357  /* XXX could copy straight to head */
1358  page = xen_netbk_alloc_page(netbk, skb, pending_idx);
1359  if (!page) {
1360  kfree_skb(skb);
1361  netbk_tx_err(vif, &txreq, idx);
1362  continue;
1363  }
1364 
1365  gop->source.u.ref = txreq.gref;
1366  gop->source.domid = vif->domid;
1367  gop->source.offset = txreq.offset;
1368 
1369  gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1370  gop->dest.domid = DOMID_SELF;
1371  gop->dest.offset = txreq.offset;
1372 
1373  gop->len = txreq.size;
1374  gop->flags = GNTCOPY_source_gref;
1375 
1376  gop++;
1377 
1378  memcpy(&netbk->pending_tx_info[pending_idx].req,
1379  &txreq, sizeof(txreq));
1380  netbk->pending_tx_info[pending_idx].vif = vif;
1381  *((u16 *)skb->data) = pending_idx;
1382 
1383  __skb_put(skb, data_len);
1384 
1385  skb_shinfo(skb)->nr_frags = ret;
1386  if (data_len < txreq.size) {
1387  skb_shinfo(skb)->nr_frags++;
1388  frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1389  pending_idx);
1390  } else {
1391  frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1393  }
1394 
1395  netbk->pending_cons++;
1396 
1397  request_gop = xen_netbk_get_requests(netbk, vif,
1398  skb, txfrags, gop);
1399  if (request_gop == NULL) {
1400  kfree_skb(skb);
1401  netbk_tx_err(vif, &txreq, idx);
1402  continue;
1403  }
1404  gop = request_gop;
1405 
1406  __skb_queue_tail(&netbk->tx_queue, skb);
1407 
1408  vif->tx.req_cons = idx;
1410 
1411  if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
1412  break;
1413  }
1414 
1415  return gop - netbk->tx_copy_ops;
1416 }
1417 
1418 static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1419 {
1420  struct gnttab_copy *gop = netbk->tx_copy_ops;
1421  struct sk_buff *skb;
1422 
1423  while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
1424  struct xen_netif_tx_request *txp;
1425  struct xenvif *vif;
1426  u16 pending_idx;
1427  unsigned data_len;
1428 
1429  pending_idx = *((u16 *)skb->data);
1430  vif = netbk->pending_tx_info[pending_idx].vif;
1431  txp = &netbk->pending_tx_info[pending_idx].req;
1432 
1433  /* Check the remap error code. */
1434  if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
1435  netdev_dbg(vif->dev, "netback grant failed.\n");
1436  skb_shinfo(skb)->nr_frags = 0;
1437  kfree_skb(skb);
1438  continue;
1439  }
1440 
1441  data_len = skb->len;
1442  memcpy(skb->data,
1443  (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
1444  data_len);
1445  if (data_len < txp->size) {
1446  /* Append the packet payload as a fragment. */
1447  txp->offset += data_len;
1448  txp->size -= data_len;
1449  } else {
1450  /* Schedule a response immediately. */
1451  xen_netbk_idx_release(netbk, pending_idx);
1452  }
1453 
1454  if (txp->flags & XEN_NETTXF_csum_blank)
1455  skb->ip_summed = CHECKSUM_PARTIAL;
1456  else if (txp->flags & XEN_NETTXF_data_validated)
1458 
1459  xen_netbk_fill_frags(netbk, skb);
1460 
1461  /*
1462  * If the initial fragment was < PKT_PROT_LEN then
1463  * pull through some bytes from the other fragments to
1464  * increase the linear region to PKT_PROT_LEN bytes.
1465  */
1466  if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
1467  int target = min_t(int, skb->len, PKT_PROT_LEN);
1468  __pskb_pull_tail(skb, target - skb_headlen(skb));
1469  }
1470 
1471  skb->dev = vif->dev;
1472  skb->protocol = eth_type_trans(skb, skb->dev);
1473 
1474  if (checksum_setup(vif, skb)) {
1475  netdev_dbg(vif->dev,
1476  "Can't setup checksum in net_tx_action\n");
1477  kfree_skb(skb);
1478  continue;
1479  }
1480 
1481  vif->dev->stats.rx_bytes += skb->len;
1482  vif->dev->stats.rx_packets++;
1483 
1484  xenvif_receive_skb(vif, skb);
1485  }
1486 }
1487 
1488 /* Called after netfront has transmitted */
1489 static void xen_netbk_tx_action(struct xen_netbk *netbk)
1490 {
1491  unsigned nr_gops;
1492 
1493  nr_gops = xen_netbk_tx_build_gops(netbk);
1494 
1495  if (nr_gops == 0)
1496  return;
1497 
1498  gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
1499 
1500  xen_netbk_tx_submit(netbk);
1501 }
1502 
1503 static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1504 {
1505  struct xenvif *vif;
1506  struct pending_tx_info *pending_tx_info;
1508 
1509  /* Already complete? */
1510  if (netbk->mmap_pages[pending_idx] == NULL)
1511  return;
1512 
1513  pending_tx_info = &netbk->pending_tx_info[pending_idx];
1514 
1515  vif = pending_tx_info->vif;
1516 
1517  make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
1518 
1519  index = pending_index(netbk->pending_prod++);
1520  netbk->pending_ring[index] = pending_idx;
1521 
1522  xenvif_put(vif);
1523 
1524  netbk->mmap_pages[pending_idx]->mapping = 0;
1525  put_page(netbk->mmap_pages[pending_idx]);
1526  netbk->mmap_pages[pending_idx] = NULL;
1527 }
1528 
1529 static void make_tx_response(struct xenvif *vif,
1530  struct xen_netif_tx_request *txp,
1531  s8 st)
1532 {
1533  RING_IDX i = vif->tx.rsp_prod_pvt;
1534  struct xen_netif_tx_response *resp;
1535  int notify;
1536 
1537  resp = RING_GET_RESPONSE(&vif->tx, i);
1538  resp->id = txp->id;
1539  resp->status = st;
1540 
1541  if (txp->flags & XEN_NETTXF_extra_info)
1542  RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1543 
1544  vif->tx.rsp_prod_pvt = ++i;
1546  if (notify)
1547  notify_remote_via_irq(vif->irq);
1548 }
1549 
1550 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1551  u16 id,
1552  s8 st,
1553  u16 offset,
1554  u16 size,
1555  u16 flags)
1556 {
1557  RING_IDX i = vif->rx.rsp_prod_pvt;
1558  struct xen_netif_rx_response *resp;
1559 
1560  resp = RING_GET_RESPONSE(&vif->rx, i);
1561  resp->offset = offset;
1562  resp->flags = flags;
1563  resp->id = id;
1564  resp->status = (s16)size;
1565  if (st < 0)
1566  resp->status = (s16)st;
1567 
1568  vif->rx.rsp_prod_pvt = ++i;
1569 
1570  return resp;
1571 }
1572 
1573 static inline int rx_work_todo(struct xen_netbk *netbk)
1574 {
1575  return !skb_queue_empty(&netbk->rx_queue);
1576 }
1577 
1578 static inline int tx_work_todo(struct xen_netbk *netbk)
1579 {
1580 
1581  if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
1582  !list_empty(&netbk->net_schedule_list))
1583  return 1;
1584 
1585  return 0;
1586 }
1587 
1588 static int xen_netbk_kthread(void *data)
1589 {
1590  struct xen_netbk *netbk = data;
1591  while (!kthread_should_stop()) {
1593  rx_work_todo(netbk) ||
1594  tx_work_todo(netbk) ||
1596  cond_resched();
1597 
1598  if (kthread_should_stop())
1599  break;
1600 
1601  if (rx_work_todo(netbk))
1602  xen_netbk_rx_action(netbk);
1603 
1604  if (tx_work_todo(netbk))
1605  xen_netbk_tx_action(netbk);
1606  }
1607 
1608  return 0;
1609 }
1610 
1612 {
1613  if (vif->tx.sring)
1614  xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1615  vif->tx.sring);
1616  if (vif->rx.sring)
1617  xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1618  vif->rx.sring);
1619 }
1620 
1622  grant_ref_t tx_ring_ref,
1623  grant_ref_t rx_ring_ref)
1624 {
1625  void *addr;
1626  struct xen_netif_tx_sring *txs;
1627  struct xen_netif_rx_sring *rxs;
1628 
1629  int err = -ENOMEM;
1630 
1631  err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1632  tx_ring_ref, &addr);
1633  if (err)
1634  goto err;
1635 
1636  txs = (struct xen_netif_tx_sring *)addr;
1637  BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1638 
1639  err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1640  rx_ring_ref, &addr);
1641  if (err)
1642  goto err;
1643 
1644  rxs = (struct xen_netif_rx_sring *)addr;
1645  BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1646 
1647  vif->rx_req_cons_peek = 0;
1648 
1649  return 0;
1650 
1651 err:
1653  return err;
1654 }
1655 
1656 static int __init netback_init(void)
1657 {
1658  int i;
1659  int rc = 0;
1660  int group;
1661 
1662  if (!xen_domain())
1663  return -ENODEV;
1664 
1665  xen_netbk_group_nr = num_online_cpus();
1666  xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
1667  if (!xen_netbk)
1668  return -ENOMEM;
1669 
1670  for (group = 0; group < xen_netbk_group_nr; group++) {
1671  struct xen_netbk *netbk = &xen_netbk[group];
1672  skb_queue_head_init(&netbk->rx_queue);
1673  skb_queue_head_init(&netbk->tx_queue);
1674 
1675  init_timer(&netbk->net_timer);
1676  netbk->net_timer.data = (unsigned long)netbk;
1677  netbk->net_timer.function = xen_netbk_alarm;
1678 
1679  netbk->pending_cons = 0;
1680  netbk->pending_prod = MAX_PENDING_REQS;
1681  for (i = 0; i < MAX_PENDING_REQS; i++)
1682  netbk->pending_ring[i] = i;
1683 
1684  init_waitqueue_head(&netbk->wq);
1685  netbk->task = kthread_create(xen_netbk_kthread,
1686  (void *)netbk,
1687  "netback/%u", group);
1688 
1689  if (IS_ERR(netbk->task)) {
1690  printk(KERN_ALERT "kthread_create() fails at netback\n");
1691  del_timer(&netbk->net_timer);
1692  rc = PTR_ERR(netbk->task);
1693  goto failed_init;
1694  }
1695 
1696  kthread_bind(netbk->task, group);
1697 
1698  INIT_LIST_HEAD(&netbk->net_schedule_list);
1699 
1701 
1702  atomic_set(&netbk->netfront_count, 0);
1703 
1704  wake_up_process(netbk->task);
1705  }
1706 
1707  rc = xenvif_xenbus_init();
1708  if (rc)
1709  goto failed_init;
1710 
1711  return 0;
1712 
1713 failed_init:
1714  while (--group >= 0) {
1715  struct xen_netbk *netbk = &xen_netbk[group];
1716  for (i = 0; i < MAX_PENDING_REQS; i++) {
1717  if (netbk->mmap_pages[i])
1718  __free_page(netbk->mmap_pages[i]);
1719  }
1720  del_timer(&netbk->net_timer);
1721  kthread_stop(netbk->task);
1722  }
1723  vfree(xen_netbk);
1724  return rc;
1725 
1726 }
1727 
1728 module_init(netback_init);
1729 
1730 MODULE_LICENSE("Dual BSD/GPL");
1731 MODULE_ALIAS("xen-backend:vif");