Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
netdev.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel(R) 82576 Virtual Function Linux driver
4  Copyright(c) 2009 - 2012 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  e1000-devel Mailing List <[email protected]>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/init.h>
33 #include <linux/pci.h>
34 #include <linux/vmalloc.h>
35 #include <linux/pagemap.h>
36 #include <linux/delay.h>
37 #include <linux/netdevice.h>
38 #include <linux/tcp.h>
39 #include <linux/ipv6.h>
40 #include <linux/slab.h>
41 #include <net/checksum.h>
42 #include <net/ip6_checksum.h>
43 #include <linux/mii.h>
44 #include <linux/ethtool.h>
45 #include <linux/if_vlan.h>
46 #include <linux/prefetch.h>
47 
48 #include "igbvf.h"
49 
50 #define DRV_VERSION "2.0.1-k"
51 char igbvf_driver_name[] = "igbvf";
53 static const char igbvf_driver_string[] =
54  "Intel(R) Gigabit Virtual Function Network Driver";
55 static const char igbvf_copyright[] =
56  "Copyright (c) 2009 - 2012 Intel Corporation.";
57 
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
59 static int debug = -1;
60 module_param(debug, int, 0);
61 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
62 
63 static int igbvf_poll(struct napi_struct *napi, int budget);
64 static void igbvf_reset(struct igbvf_adapter *);
65 static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
66 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
67 
68 static struct igbvf_info igbvf_vf_info = {
69  .mac = e1000_vfadapt,
70  .flags = 0,
71  .pba = 10,
73 };
74 
75 static struct igbvf_info igbvf_i350_vf_info = {
76  .mac = e1000_vfadapt_i350,
77  .flags = 0,
78  .pba = 10,
80 };
81 
82 static const struct igbvf_info *igbvf_info_tbl[] = {
83  [board_vf] = &igbvf_vf_info,
84  [board_i350_vf] = &igbvf_i350_vf_info,
85 };
86 
90 static int igbvf_desc_unused(struct igbvf_ring *ring)
91 {
92  if (ring->next_to_clean > ring->next_to_use)
93  return ring->next_to_clean - ring->next_to_use - 1;
94 
95  return ring->count + ring->next_to_clean - ring->next_to_use - 1;
96 }
97 
105 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
106  struct net_device *netdev,
107  struct sk_buff *skb,
108  u32 status, u16 vlan)
109 {
110  if (status & E1000_RXD_STAT_VP) {
112  if (test_bit(vid, adapter->active_vlans))
113  __vlan_hwaccel_put_tag(skb, vid);
114  }
115  netif_receive_skb(skb);
116 }
117 
118 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
119  u32 status_err, struct sk_buff *skb)
120 {
121  skb_checksum_none_assert(skb);
122 
123  /* Ignore Checksum bit is set or checksum is disabled through ethtool */
124  if ((status_err & E1000_RXD_STAT_IXSM) ||
125  (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
126  return;
127 
128  /* TCP/UDP checksum error bit is set */
129  if (status_err &
131  /* let the stack verify checksum errors */
132  adapter->hw_csum_err++;
133  return;
134  }
135 
136  /* It must be a TCP or UDP packet with a valid checksum */
137  if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
139 
140  adapter->hw_csum_good++;
141 }
142 
148 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
149  int cleaned_count)
150 {
151  struct igbvf_adapter *adapter = rx_ring->adapter;
152  struct net_device *netdev = adapter->netdev;
153  struct pci_dev *pdev = adapter->pdev;
154  union e1000_adv_rx_desc *rx_desc;
155  struct igbvf_buffer *buffer_info;
156  struct sk_buff *skb;
157  unsigned int i;
158  int bufsz;
159 
160  i = rx_ring->next_to_use;
161  buffer_info = &rx_ring->buffer_info[i];
162 
163  if (adapter->rx_ps_hdr_size)
164  bufsz = adapter->rx_ps_hdr_size;
165  else
166  bufsz = adapter->rx_buffer_len;
167 
168  while (cleaned_count--) {
169  rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
170 
171  if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
172  if (!buffer_info->page) {
173  buffer_info->page = alloc_page(GFP_ATOMIC);
174  if (!buffer_info->page) {
175  adapter->alloc_rx_buff_failed++;
176  goto no_buffers;
177  }
178  buffer_info->page_offset = 0;
179  } else {
180  buffer_info->page_offset ^= PAGE_SIZE / 2;
181  }
182  buffer_info->page_dma =
183  dma_map_page(&pdev->dev, buffer_info->page,
184  buffer_info->page_offset,
185  PAGE_SIZE / 2,
187  }
188 
189  if (!buffer_info->skb) {
190  skb = netdev_alloc_skb_ip_align(netdev, bufsz);
191  if (!skb) {
192  adapter->alloc_rx_buff_failed++;
193  goto no_buffers;
194  }
195 
196  buffer_info->skb = skb;
197  buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
198  bufsz,
200  }
201  /* Refresh the desc even if buffer_addrs didn't change because
202  * each write-back erases this info. */
203  if (adapter->rx_ps_hdr_size) {
204  rx_desc->read.pkt_addr =
205  cpu_to_le64(buffer_info->page_dma);
206  rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
207  } else {
208  rx_desc->read.pkt_addr =
209  cpu_to_le64(buffer_info->dma);
210  rx_desc->read.hdr_addr = 0;
211  }
212 
213  i++;
214  if (i == rx_ring->count)
215  i = 0;
216  buffer_info = &rx_ring->buffer_info[i];
217  }
218 
219 no_buffers:
220  if (rx_ring->next_to_use != i) {
221  rx_ring->next_to_use = i;
222  if (i == 0)
223  i = (rx_ring->count - 1);
224  else
225  i--;
226 
227  /* Force memory writes to complete before letting h/w
228  * know there are new descriptors to fetch. (Only
229  * applicable for weak-ordered memory model archs,
230  * such as IA-64). */
231  wmb();
232  writel(i, adapter->hw.hw_addr + rx_ring->tail);
233  }
234 }
235 
243 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
244  int *work_done, int work_to_do)
245 {
246  struct igbvf_ring *rx_ring = adapter->rx_ring;
247  struct net_device *netdev = adapter->netdev;
248  struct pci_dev *pdev = adapter->pdev;
249  union e1000_adv_rx_desc *rx_desc, *next_rxd;
250  struct igbvf_buffer *buffer_info, *next_buffer;
251  struct sk_buff *skb;
252  bool cleaned = false;
253  int cleaned_count = 0;
254  unsigned int total_bytes = 0, total_packets = 0;
255  unsigned int i;
256  u32 length, hlen, staterr;
257 
258  i = rx_ring->next_to_clean;
259  rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
260  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
261 
262  while (staterr & E1000_RXD_STAT_DD) {
263  if (*work_done >= work_to_do)
264  break;
265  (*work_done)++;
266  rmb(); /* read descriptor and rx_buffer_info after status DD */
267 
268  buffer_info = &rx_ring->buffer_info[i];
269 
270  /* HW will not DMA in data larger than the given buffer, even
271  * if it parses the (NFS, of course) header to be larger. In
272  * that case, it fills the header buffer and spills the rest
273  * into the page.
274  */
275  hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
277  if (hlen > adapter->rx_ps_hdr_size)
278  hlen = adapter->rx_ps_hdr_size;
279 
280  length = le16_to_cpu(rx_desc->wb.upper.length);
281  cleaned = true;
282  cleaned_count++;
283 
284  skb = buffer_info->skb;
285  prefetch(skb->data - NET_IP_ALIGN);
286  buffer_info->skb = NULL;
287  if (!adapter->rx_ps_hdr_size) {
288  dma_unmap_single(&pdev->dev, buffer_info->dma,
289  adapter->rx_buffer_len,
291  buffer_info->dma = 0;
292  skb_put(skb, length);
293  goto send_up;
294  }
295 
296  if (!skb_shinfo(skb)->nr_frags) {
297  dma_unmap_single(&pdev->dev, buffer_info->dma,
298  adapter->rx_ps_hdr_size,
300  skb_put(skb, hlen);
301  }
302 
303  if (length) {
304  dma_unmap_page(&pdev->dev, buffer_info->page_dma,
305  PAGE_SIZE / 2,
307  buffer_info->page_dma = 0;
308 
309  skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
310  buffer_info->page,
311  buffer_info->page_offset,
312  length);
313 
314  if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
315  (page_count(buffer_info->page) != 1))
316  buffer_info->page = NULL;
317  else
318  get_page(buffer_info->page);
319 
320  skb->len += length;
321  skb->data_len += length;
322  skb->truesize += PAGE_SIZE / 2;
323  }
324 send_up:
325  i++;
326  if (i == rx_ring->count)
327  i = 0;
328  next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
329  prefetch(next_rxd);
330  next_buffer = &rx_ring->buffer_info[i];
331 
332  if (!(staterr & E1000_RXD_STAT_EOP)) {
333  buffer_info->skb = next_buffer->skb;
334  buffer_info->dma = next_buffer->dma;
335  next_buffer->skb = skb;
336  next_buffer->dma = 0;
337  goto next_desc;
338  }
339 
340  if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
341  dev_kfree_skb_irq(skb);
342  goto next_desc;
343  }
344 
345  total_bytes += skb->len;
346  total_packets++;
347 
348  igbvf_rx_checksum_adv(adapter, staterr, skb);
349 
350  skb->protocol = eth_type_trans(skb, netdev);
351 
352  igbvf_receive_skb(adapter, netdev, skb, staterr,
353  rx_desc->wb.upper.vlan);
354 
355 next_desc:
356  rx_desc->wb.upper.status_error = 0;
357 
358  /* return some buffers to hardware, one at a time is too slow */
359  if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
360  igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
361  cleaned_count = 0;
362  }
363 
364  /* use prefetched values */
365  rx_desc = next_rxd;
366  buffer_info = next_buffer;
367 
368  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
369  }
370 
371  rx_ring->next_to_clean = i;
372  cleaned_count = igbvf_desc_unused(rx_ring);
373 
374  if (cleaned_count)
375  igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
376 
377  adapter->total_rx_packets += total_packets;
378  adapter->total_rx_bytes += total_bytes;
379  adapter->net_stats.rx_bytes += total_bytes;
380  adapter->net_stats.rx_packets += total_packets;
381  return cleaned;
382 }
383 
384 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
385  struct igbvf_buffer *buffer_info)
386 {
387  if (buffer_info->dma) {
388  if (buffer_info->mapped_as_page)
389  dma_unmap_page(&adapter->pdev->dev,
390  buffer_info->dma,
391  buffer_info->length,
392  DMA_TO_DEVICE);
393  else
394  dma_unmap_single(&adapter->pdev->dev,
395  buffer_info->dma,
396  buffer_info->length,
397  DMA_TO_DEVICE);
398  buffer_info->dma = 0;
399  }
400  if (buffer_info->skb) {
401  dev_kfree_skb_any(buffer_info->skb);
402  buffer_info->skb = NULL;
403  }
404  buffer_info->time_stamp = 0;
405 }
406 
414  struct igbvf_ring *tx_ring)
415 {
416  struct pci_dev *pdev = adapter->pdev;
417  int size;
418 
419  size = sizeof(struct igbvf_buffer) * tx_ring->count;
420  tx_ring->buffer_info = vzalloc(size);
421  if (!tx_ring->buffer_info)
422  goto err;
423 
424  /* round up to nearest 4K */
425  tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
426  tx_ring->size = ALIGN(tx_ring->size, 4096);
427 
428  tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
429  &tx_ring->dma, GFP_KERNEL);
430 
431  if (!tx_ring->desc)
432  goto err;
433 
434  tx_ring->adapter = adapter;
435  tx_ring->next_to_use = 0;
436  tx_ring->next_to_clean = 0;
437 
438  return 0;
439 err:
440  vfree(tx_ring->buffer_info);
441  dev_err(&adapter->pdev->dev,
442  "Unable to allocate memory for the transmit descriptor ring\n");
443  return -ENOMEM;
444 }
445 
453  struct igbvf_ring *rx_ring)
454 {
455  struct pci_dev *pdev = adapter->pdev;
456  int size, desc_len;
457 
458  size = sizeof(struct igbvf_buffer) * rx_ring->count;
459  rx_ring->buffer_info = vzalloc(size);
460  if (!rx_ring->buffer_info)
461  goto err;
462 
463  desc_len = sizeof(union e1000_adv_rx_desc);
464 
465  /* Round up to nearest 4K */
466  rx_ring->size = rx_ring->count * desc_len;
467  rx_ring->size = ALIGN(rx_ring->size, 4096);
468 
469  rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
470  &rx_ring->dma, GFP_KERNEL);
471 
472  if (!rx_ring->desc)
473  goto err;
474 
475  rx_ring->next_to_clean = 0;
476  rx_ring->next_to_use = 0;
477 
478  rx_ring->adapter = adapter;
479 
480  return 0;
481 
482 err:
483  vfree(rx_ring->buffer_info);
484  rx_ring->buffer_info = NULL;
485  dev_err(&adapter->pdev->dev,
486  "Unable to allocate memory for the receive descriptor ring\n");
487  return -ENOMEM;
488 }
489 
494 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
495 {
496  struct igbvf_adapter *adapter = tx_ring->adapter;
497  struct igbvf_buffer *buffer_info;
498  unsigned long size;
499  unsigned int i;
500 
501  if (!tx_ring->buffer_info)
502  return;
503 
504  /* Free all the Tx ring sk_buffs */
505  for (i = 0; i < tx_ring->count; i++) {
506  buffer_info = &tx_ring->buffer_info[i];
507  igbvf_put_txbuf(adapter, buffer_info);
508  }
509 
510  size = sizeof(struct igbvf_buffer) * tx_ring->count;
511  memset(tx_ring->buffer_info, 0, size);
512 
513  /* Zero out the descriptor ring */
514  memset(tx_ring->desc, 0, tx_ring->size);
515 
516  tx_ring->next_to_use = 0;
517  tx_ring->next_to_clean = 0;
518 
519  writel(0, adapter->hw.hw_addr + tx_ring->head);
520  writel(0, adapter->hw.hw_addr + tx_ring->tail);
521 }
522 
529 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
530 {
531  struct pci_dev *pdev = tx_ring->adapter->pdev;
532 
533  igbvf_clean_tx_ring(tx_ring);
534 
535  vfree(tx_ring->buffer_info);
536  tx_ring->buffer_info = NULL;
537 
538  dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
539  tx_ring->dma);
540 
541  tx_ring->desc = NULL;
542 }
543 
548 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
549 {
550  struct igbvf_adapter *adapter = rx_ring->adapter;
551  struct igbvf_buffer *buffer_info;
552  struct pci_dev *pdev = adapter->pdev;
553  unsigned long size;
554  unsigned int i;
555 
556  if (!rx_ring->buffer_info)
557  return;
558 
559  /* Free all the Rx ring sk_buffs */
560  for (i = 0; i < rx_ring->count; i++) {
561  buffer_info = &rx_ring->buffer_info[i];
562  if (buffer_info->dma) {
563  if (adapter->rx_ps_hdr_size){
564  dma_unmap_single(&pdev->dev, buffer_info->dma,
565  adapter->rx_ps_hdr_size,
567  } else {
568  dma_unmap_single(&pdev->dev, buffer_info->dma,
569  adapter->rx_buffer_len,
571  }
572  buffer_info->dma = 0;
573  }
574 
575  if (buffer_info->skb) {
576  dev_kfree_skb(buffer_info->skb);
577  buffer_info->skb = NULL;
578  }
579 
580  if (buffer_info->page) {
581  if (buffer_info->page_dma)
582  dma_unmap_page(&pdev->dev,
583  buffer_info->page_dma,
584  PAGE_SIZE / 2,
586  put_page(buffer_info->page);
587  buffer_info->page = NULL;
588  buffer_info->page_dma = 0;
589  buffer_info->page_offset = 0;
590  }
591  }
592 
593  size = sizeof(struct igbvf_buffer) * rx_ring->count;
594  memset(rx_ring->buffer_info, 0, size);
595 
596  /* Zero out the descriptor ring */
597  memset(rx_ring->desc, 0, rx_ring->size);
598 
599  rx_ring->next_to_clean = 0;
600  rx_ring->next_to_use = 0;
601 
602  writel(0, adapter->hw.hw_addr + rx_ring->head);
603  writel(0, adapter->hw.hw_addr + rx_ring->tail);
604 }
605 
613 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
614 {
615  struct pci_dev *pdev = rx_ring->adapter->pdev;
616 
617  igbvf_clean_rx_ring(rx_ring);
618 
619  vfree(rx_ring->buffer_info);
620  rx_ring->buffer_info = NULL;
621 
622  dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
623  rx_ring->dma);
624  rx_ring->desc = NULL;
625 }
626 
642 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
643  enum latency_range itr_setting,
644  int packets, int bytes)
645 {
646  enum latency_range retval = itr_setting;
647 
648  if (packets == 0)
649  goto update_itr_done;
650 
651  switch (itr_setting) {
652  case lowest_latency:
653  /* handle TSO and jumbo frames */
654  if (bytes/packets > 8000)
655  retval = bulk_latency;
656  else if ((packets < 5) && (bytes > 512))
657  retval = low_latency;
658  break;
659  case low_latency: /* 50 usec aka 20000 ints/s */
660  if (bytes > 10000) {
661  /* this if handles the TSO accounting */
662  if (bytes/packets > 8000)
663  retval = bulk_latency;
664  else if ((packets < 10) || ((bytes/packets) > 1200))
665  retval = bulk_latency;
666  else if ((packets > 35))
667  retval = lowest_latency;
668  } else if (bytes/packets > 2000) {
669  retval = bulk_latency;
670  } else if (packets <= 2 && bytes < 512) {
671  retval = lowest_latency;
672  }
673  break;
674  case bulk_latency: /* 250 usec aka 4000 ints/s */
675  if (bytes > 25000) {
676  if (packets > 35)
677  retval = low_latency;
678  } else if (bytes < 6000) {
679  retval = low_latency;
680  }
681  break;
682  default:
683  break;
684  }
685 
686 update_itr_done:
687  return retval;
688 }
689 
690 static int igbvf_range_to_itr(enum latency_range current_range)
691 {
692  int new_itr;
693 
694  switch (current_range) {
695  /* counts and packets in update_itr are dependent on these numbers */
696  case lowest_latency:
697  new_itr = IGBVF_70K_ITR;
698  break;
699  case low_latency:
700  new_itr = IGBVF_20K_ITR;
701  break;
702  case bulk_latency:
703  new_itr = IGBVF_4K_ITR;
704  break;
705  default:
706  new_itr = IGBVF_START_ITR;
707  break;
708  }
709  return new_itr;
710 }
711 
712 static void igbvf_set_itr(struct igbvf_adapter *adapter)
713 {
714  u32 new_itr;
715 
716  adapter->tx_ring->itr_range =
717  igbvf_update_itr(adapter,
718  adapter->tx_ring->itr_val,
719  adapter->total_tx_packets,
720  adapter->total_tx_bytes);
721 
722  /* conservative mode (itr 3) eliminates the lowest_latency setting */
723  if (adapter->requested_itr == 3 &&
724  adapter->tx_ring->itr_range == lowest_latency)
725  adapter->tx_ring->itr_range = low_latency;
726 
727  new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
728 
729 
730  if (new_itr != adapter->tx_ring->itr_val) {
731  u32 current_itr = adapter->tx_ring->itr_val;
732  /*
733  * this attempts to bias the interrupt rate towards Bulk
734  * by adding intermediate steps when interrupt rate is
735  * increasing
736  */
737  new_itr = new_itr > current_itr ?
738  min(current_itr + (new_itr >> 2), new_itr) :
739  new_itr;
740  adapter->tx_ring->itr_val = new_itr;
741 
742  adapter->tx_ring->set_itr = 1;
743  }
744 
745  adapter->rx_ring->itr_range =
746  igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
747  adapter->total_rx_packets,
748  adapter->total_rx_bytes);
749  if (adapter->requested_itr == 3 &&
750  adapter->rx_ring->itr_range == lowest_latency)
751  adapter->rx_ring->itr_range = low_latency;
752 
753  new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
754 
755  if (new_itr != adapter->rx_ring->itr_val) {
756  u32 current_itr = adapter->rx_ring->itr_val;
757  new_itr = new_itr > current_itr ?
758  min(current_itr + (new_itr >> 2), new_itr) :
759  new_itr;
760  adapter->rx_ring->itr_val = new_itr;
761 
762  adapter->rx_ring->set_itr = 1;
763  }
764 }
765 
772 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
773 {
774  struct igbvf_adapter *adapter = tx_ring->adapter;
775  struct net_device *netdev = adapter->netdev;
776  struct igbvf_buffer *buffer_info;
777  struct sk_buff *skb;
778  union e1000_adv_tx_desc *tx_desc, *eop_desc;
779  unsigned int total_bytes = 0, total_packets = 0;
780  unsigned int i, eop, count = 0;
781  bool cleaned = false;
782 
783  i = tx_ring->next_to_clean;
784  eop = tx_ring->buffer_info[i].next_to_watch;
785  eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
786 
787  while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
788  (count < tx_ring->count)) {
789  rmb(); /* read buffer_info after eop_desc status */
790  for (cleaned = false; !cleaned; count++) {
791  tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
792  buffer_info = &tx_ring->buffer_info[i];
793  cleaned = (i == eop);
794  skb = buffer_info->skb;
795 
796  if (skb) {
797  unsigned int segs, bytecount;
798 
799  /* gso_segs is currently only valid for tcp */
800  segs = skb_shinfo(skb)->gso_segs ?: 1;
801  /* multiply data chunks by size of headers */
802  bytecount = ((segs - 1) * skb_headlen(skb)) +
803  skb->len;
804  total_packets += segs;
805  total_bytes += bytecount;
806  }
807 
808  igbvf_put_txbuf(adapter, buffer_info);
809  tx_desc->wb.status = 0;
810 
811  i++;
812  if (i == tx_ring->count)
813  i = 0;
814  }
815  eop = tx_ring->buffer_info[i].next_to_watch;
816  eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
817  }
818 
819  tx_ring->next_to_clean = i;
820 
821  if (unlikely(count &&
822  netif_carrier_ok(netdev) &&
823  igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
824  /* Make sure that anybody stopping the queue after this
825  * sees the new next_to_clean.
826  */
827  smp_mb();
828  if (netif_queue_stopped(netdev) &&
829  !(test_bit(__IGBVF_DOWN, &adapter->state))) {
830  netif_wake_queue(netdev);
831  ++adapter->restart_queue;
832  }
833  }
834 
835  adapter->net_stats.tx_bytes += total_bytes;
836  adapter->net_stats.tx_packets += total_packets;
837  return count < tx_ring->count;
838 }
839 
840 static irqreturn_t igbvf_msix_other(int irq, void *data)
841 {
842  struct net_device *netdev = data;
843  struct igbvf_adapter *adapter = netdev_priv(netdev);
844  struct e1000_hw *hw = &adapter->hw;
845 
846  adapter->int_counter1++;
847 
848  netif_carrier_off(netdev);
849  hw->mac.get_link_status = 1;
850  if (!test_bit(__IGBVF_DOWN, &adapter->state))
851  mod_timer(&adapter->watchdog_timer, jiffies + 1);
852 
853  ew32(EIMS, adapter->eims_other);
854 
855  return IRQ_HANDLED;
856 }
857 
858 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
859 {
860  struct net_device *netdev = data;
861  struct igbvf_adapter *adapter = netdev_priv(netdev);
862  struct e1000_hw *hw = &adapter->hw;
863  struct igbvf_ring *tx_ring = adapter->tx_ring;
864 
865  if (tx_ring->set_itr) {
866  writel(tx_ring->itr_val,
867  adapter->hw.hw_addr + tx_ring->itr_register);
868  adapter->tx_ring->set_itr = 0;
869  }
870 
871  adapter->total_tx_bytes = 0;
872  adapter->total_tx_packets = 0;
873 
874  /* auto mask will automatically reenable the interrupt when we write
875  * EICS */
876  if (!igbvf_clean_tx_irq(tx_ring))
877  /* Ring was not completely cleaned, so fire another interrupt */
878  ew32(EICS, tx_ring->eims_value);
879  else
880  ew32(EIMS, tx_ring->eims_value);
881 
882  return IRQ_HANDLED;
883 }
884 
885 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
886 {
887  struct net_device *netdev = data;
888  struct igbvf_adapter *adapter = netdev_priv(netdev);
889 
890  adapter->int_counter0++;
891 
892  /* Write the ITR value calculated at the end of the
893  * previous interrupt.
894  */
895  if (adapter->rx_ring->set_itr) {
896  writel(adapter->rx_ring->itr_val,
897  adapter->hw.hw_addr + adapter->rx_ring->itr_register);
898  adapter->rx_ring->set_itr = 0;
899  }
900 
901  if (napi_schedule_prep(&adapter->rx_ring->napi)) {
902  adapter->total_rx_bytes = 0;
903  adapter->total_rx_packets = 0;
904  __napi_schedule(&adapter->rx_ring->napi);
905  }
906 
907  return IRQ_HANDLED;
908 }
909 
910 #define IGBVF_NO_QUEUE -1
911 
912 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
913  int tx_queue, int msix_vector)
914 {
915  struct e1000_hw *hw = &adapter->hw;
916  u32 ivar, index;
917 
918  /* 82576 uses a table-based method for assigning vectors.
919  Each queue has a single entry in the table to which we write
920  a vector number along with a "valid" bit. Sadly, the layout
921  of the table is somewhat counterintuitive. */
922  if (rx_queue > IGBVF_NO_QUEUE) {
923  index = (rx_queue >> 1);
924  ivar = array_er32(IVAR0, index);
925  if (rx_queue & 0x1) {
926  /* vector goes into third byte of register */
927  ivar = ivar & 0xFF00FFFF;
928  ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
929  } else {
930  /* vector goes into low byte of register */
931  ivar = ivar & 0xFFFFFF00;
932  ivar |= msix_vector | E1000_IVAR_VALID;
933  }
934  adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
935  array_ew32(IVAR0, index, ivar);
936  }
937  if (tx_queue > IGBVF_NO_QUEUE) {
938  index = (tx_queue >> 1);
939  ivar = array_er32(IVAR0, index);
940  if (tx_queue & 0x1) {
941  /* vector goes into high byte of register */
942  ivar = ivar & 0x00FFFFFF;
943  ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
944  } else {
945  /* vector goes into second byte of register */
946  ivar = ivar & 0xFFFF00FF;
947  ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
948  }
949  adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
950  array_ew32(IVAR0, index, ivar);
951  }
952 }
953 
960 static void igbvf_configure_msix(struct igbvf_adapter *adapter)
961 {
962  u32 tmp;
963  struct e1000_hw *hw = &adapter->hw;
964  struct igbvf_ring *tx_ring = adapter->tx_ring;
965  struct igbvf_ring *rx_ring = adapter->rx_ring;
966  int vector = 0;
967 
968  adapter->eims_enable_mask = 0;
969 
970  igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
971  adapter->eims_enable_mask |= tx_ring->eims_value;
972  writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
973  igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
974  adapter->eims_enable_mask |= rx_ring->eims_value;
975  writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
976 
977  /* set vector for other causes, i.e. link changes */
978 
979  tmp = (vector++ | E1000_IVAR_VALID);
980 
981  ew32(IVAR_MISC, tmp);
982 
983  adapter->eims_enable_mask = (1 << (vector)) - 1;
984  adapter->eims_other = 1 << (vector - 1);
985  e1e_flush();
986 }
987 
988 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
989 {
990  if (adapter->msix_entries) {
991  pci_disable_msix(adapter->pdev);
992  kfree(adapter->msix_entries);
993  adapter->msix_entries = NULL;
994  }
995 }
996 
1003 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1004 {
1005  int err = -ENOMEM;
1006  int i;
1007 
1008  /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
1009  adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1010  GFP_KERNEL);
1011  if (adapter->msix_entries) {
1012  for (i = 0; i < 3; i++)
1013  adapter->msix_entries[i].entry = i;
1014 
1015  err = pci_enable_msix(adapter->pdev,
1016  adapter->msix_entries, 3);
1017  }
1018 
1019  if (err) {
1020  /* MSI-X failed */
1021  dev_err(&adapter->pdev->dev,
1022  "Failed to initialize MSI-X interrupts.\n");
1023  igbvf_reset_interrupt_capability(adapter);
1024  }
1025 }
1026 
1033 static int igbvf_request_msix(struct igbvf_adapter *adapter)
1034 {
1035  struct net_device *netdev = adapter->netdev;
1036  int err = 0, vector = 0;
1037 
1038  if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1039  sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1040  sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1041  } else {
1042  memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1043  memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1044  }
1045 
1046  err = request_irq(adapter->msix_entries[vector].vector,
1047  igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1048  netdev);
1049  if (err)
1050  goto out;
1051 
1052  adapter->tx_ring->itr_register = E1000_EITR(vector);
1053  adapter->tx_ring->itr_val = adapter->current_itr;
1054  vector++;
1055 
1056  err = request_irq(adapter->msix_entries[vector].vector,
1057  igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1058  netdev);
1059  if (err)
1060  goto out;
1061 
1062  adapter->rx_ring->itr_register = E1000_EITR(vector);
1063  adapter->rx_ring->itr_val = adapter->current_itr;
1064  vector++;
1065 
1066  err = request_irq(adapter->msix_entries[vector].vector,
1067  igbvf_msix_other, 0, netdev->name, netdev);
1068  if (err)
1069  goto out;
1070 
1071  igbvf_configure_msix(adapter);
1072  return 0;
1073 out:
1074  return err;
1075 }
1076 
1081 static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
1082 {
1083  struct net_device *netdev = adapter->netdev;
1084 
1085  adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1086  if (!adapter->tx_ring)
1087  return -ENOMEM;
1088 
1089  adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1090  if (!adapter->rx_ring) {
1091  kfree(adapter->tx_ring);
1092  return -ENOMEM;
1093  }
1094 
1095  netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1096 
1097  return 0;
1098 }
1099 
1106 static int igbvf_request_irq(struct igbvf_adapter *adapter)
1107 {
1108  int err = -1;
1109 
1110  /* igbvf supports msi-x only */
1111  if (adapter->msix_entries)
1112  err = igbvf_request_msix(adapter);
1113 
1114  if (!err)
1115  return err;
1116 
1117  dev_err(&adapter->pdev->dev,
1118  "Unable to allocate interrupt, Error: %d\n", err);
1119 
1120  return err;
1121 }
1122 
1123 static void igbvf_free_irq(struct igbvf_adapter *adapter)
1124 {
1125  struct net_device *netdev = adapter->netdev;
1126  int vector;
1127 
1128  if (adapter->msix_entries) {
1129  for (vector = 0; vector < 3; vector++)
1130  free_irq(adapter->msix_entries[vector].vector, netdev);
1131  }
1132 }
1133 
1137 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1138 {
1139  struct e1000_hw *hw = &adapter->hw;
1140 
1141  ew32(EIMC, ~0);
1142 
1143  if (adapter->msix_entries)
1144  ew32(EIAC, 0);
1145 }
1146 
1150 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1151 {
1152  struct e1000_hw *hw = &adapter->hw;
1153 
1154  ew32(EIAC, adapter->eims_enable_mask);
1155  ew32(EIAM, adapter->eims_enable_mask);
1156  ew32(EIMS, adapter->eims_enable_mask);
1157 }
1158 
1164 static int igbvf_poll(struct napi_struct *napi, int budget)
1165 {
1166  struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1167  struct igbvf_adapter *adapter = rx_ring->adapter;
1168  struct e1000_hw *hw = &adapter->hw;
1169  int work_done = 0;
1170 
1171  igbvf_clean_rx_irq(adapter, &work_done, budget);
1172 
1173  /* If not enough Rx work done, exit the polling mode */
1174  if (work_done < budget) {
1175  napi_complete(napi);
1176 
1177  if (adapter->requested_itr & 3)
1178  igbvf_set_itr(adapter);
1179 
1180  if (!test_bit(__IGBVF_DOWN, &adapter->state))
1181  ew32(EIMS, adapter->rx_ring->eims_value);
1182  }
1183 
1184  return work_done;
1185 }
1186 
1193 static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1194 {
1195  int max_frame_size;
1196  struct e1000_hw *hw = &adapter->hw;
1197 
1198  max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
1199  e1000_rlpml_set_vf(hw, max_frame_size);
1200 }
1201 
1202 static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1203 {
1204  struct igbvf_adapter *adapter = netdev_priv(netdev);
1205  struct e1000_hw *hw = &adapter->hw;
1206 
1207  if (hw->mac.ops.set_vfta(hw, vid, true)) {
1208  dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1209  return -EINVAL;
1210  }
1211  set_bit(vid, adapter->active_vlans);
1212  return 0;
1213 }
1214 
1215 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1216 {
1217  struct igbvf_adapter *adapter = netdev_priv(netdev);
1218  struct e1000_hw *hw = &adapter->hw;
1219 
1220  if (hw->mac.ops.set_vfta(hw, vid, false)) {
1221  dev_err(&adapter->pdev->dev,
1222  "Failed to remove vlan id %d\n", vid);
1223  return -EINVAL;
1224  }
1225  clear_bit(vid, adapter->active_vlans);
1226  return 0;
1227 }
1228 
1229 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1230 {
1231  u16 vid;
1232 
1233  for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1234  igbvf_vlan_rx_add_vid(adapter->netdev, vid);
1235 }
1236 
1243 static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1244 {
1245  struct e1000_hw *hw = &adapter->hw;
1246  struct igbvf_ring *tx_ring = adapter->tx_ring;
1247  u64 tdba;
1248  u32 txdctl, dca_txctrl;
1249 
1250  /* disable transmits */
1251  txdctl = er32(TXDCTL(0));
1252  ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1253  e1e_flush();
1254  msleep(10);
1255 
1256  /* Setup the HW Tx Head and Tail descriptor pointers */
1257  ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1258  tdba = tx_ring->dma;
1259  ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1260  ew32(TDBAH(0), (tdba >> 32));
1261  ew32(TDH(0), 0);
1262  ew32(TDT(0), 0);
1263  tx_ring->head = E1000_TDH(0);
1264  tx_ring->tail = E1000_TDT(0);
1265 
1266  /* Turn off Relaxed Ordering on head write-backs. The writebacks
1267  * MUST be delivered in order or it will completely screw up
1268  * our bookeeping.
1269  */
1270  dca_txctrl = er32(DCA_TXCTRL(0));
1271  dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1272  ew32(DCA_TXCTRL(0), dca_txctrl);
1273 
1274  /* enable transmits */
1275  txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1276  ew32(TXDCTL(0), txdctl);
1277 
1278  /* Setup Transmit Descriptor Settings for eop descriptor */
1279  adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1280 
1281  /* enable Report Status bit */
1282  adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1283 }
1284 
1289 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1290 {
1291  struct e1000_hw *hw = &adapter->hw;
1292  u32 srrctl = 0;
1293 
1294  srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1297 
1298  /* Enable queue drop to avoid head of line blocking */
1299  srrctl |= E1000_SRRCTL_DROP_EN;
1300 
1301  /* Setup buffer sizes */
1302  srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1304 
1305  if (adapter->rx_buffer_len < 2048) {
1306  adapter->rx_ps_hdr_size = 0;
1308  } else {
1309  adapter->rx_ps_hdr_size = 128;
1310  srrctl |= adapter->rx_ps_hdr_size <<
1313  }
1314 
1315  ew32(SRRCTL(0), srrctl);
1316 }
1317 
1324 static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1325 {
1326  struct e1000_hw *hw = &adapter->hw;
1327  struct igbvf_ring *rx_ring = adapter->rx_ring;
1328  u64 rdba;
1329  u32 rdlen, rxdctl;
1330 
1331  /* disable receives */
1332  rxdctl = er32(RXDCTL(0));
1333  ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1334  e1e_flush();
1335  msleep(10);
1336 
1337  rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1338 
1339  /*
1340  * Setup the HW Rx Head and Tail Descriptor Pointers and
1341  * the Base and Length of the Rx Descriptor Ring
1342  */
1343  rdba = rx_ring->dma;
1344  ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1345  ew32(RDBAH(0), (rdba >> 32));
1346  ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1347  rx_ring->head = E1000_RDH(0);
1348  rx_ring->tail = E1000_RDT(0);
1349  ew32(RDH(0), 0);
1350  ew32(RDT(0), 0);
1351 
1352  rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1353  rxdctl &= 0xFFF00000;
1354  rxdctl |= IGBVF_RX_PTHRESH;
1355  rxdctl |= IGBVF_RX_HTHRESH << 8;
1356  rxdctl |= IGBVF_RX_WTHRESH << 16;
1357 
1358  igbvf_set_rlpml(adapter);
1359 
1360  /* enable receives */
1361  ew32(RXDCTL(0), rxdctl);
1362 }
1363 
1373 static void igbvf_set_multi(struct net_device *netdev)
1374 {
1375  struct igbvf_adapter *adapter = netdev_priv(netdev);
1376  struct e1000_hw *hw = &adapter->hw;
1377  struct netdev_hw_addr *ha;
1378  u8 *mta_list = NULL;
1379  int i;
1380 
1381  if (!netdev_mc_empty(netdev)) {
1382  mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
1383  if (!mta_list) {
1384  dev_err(&adapter->pdev->dev,
1385  "failed to allocate multicast filter list\n");
1386  return;
1387  }
1388  }
1389 
1390  /* prepare a packed array of only addresses. */
1391  i = 0;
1392  netdev_for_each_mc_addr(ha, netdev)
1393  memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1394 
1395  hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1396  kfree(mta_list);
1397 }
1398 
1403 static void igbvf_configure(struct igbvf_adapter *adapter)
1404 {
1405  igbvf_set_multi(adapter->netdev);
1406 
1407  igbvf_restore_vlan(adapter);
1408 
1409  igbvf_configure_tx(adapter);
1410  igbvf_setup_srrctl(adapter);
1411  igbvf_configure_rx(adapter);
1412  igbvf_alloc_rx_buffers(adapter->rx_ring,
1413  igbvf_desc_unused(adapter->rx_ring));
1414 }
1415 
1416 /* igbvf_reset - bring the hardware into a known good state
1417  *
1418  * This function boots the hardware and enables some settings that
1419  * require a configuration cycle of the hardware - those cannot be
1420  * set/changed during runtime. After reset the device needs to be
1421  * properly configured for Rx, Tx etc.
1422  */
1423 static void igbvf_reset(struct igbvf_adapter *adapter)
1424 {
1425  struct e1000_mac_info *mac = &adapter->hw.mac;
1426  struct net_device *netdev = adapter->netdev;
1427  struct e1000_hw *hw = &adapter->hw;
1428 
1429  /* Allow time for pending master requests to run */
1430  if (mac->ops.reset_hw(hw))
1431  dev_err(&adapter->pdev->dev, "PF still resetting\n");
1432 
1433  mac->ops.init_hw(hw);
1434 
1435  if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1436  memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1437  netdev->addr_len);
1438  memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1439  netdev->addr_len);
1440  }
1441 
1442  adapter->last_reset = jiffies;
1443 }
1444 
1445 int igbvf_up(struct igbvf_adapter *adapter)
1446 {
1447  struct e1000_hw *hw = &adapter->hw;
1448 
1449  /* hardware has been reset, we need to reload some things */
1450  igbvf_configure(adapter);
1451 
1452  clear_bit(__IGBVF_DOWN, &adapter->state);
1453 
1454  napi_enable(&adapter->rx_ring->napi);
1455  if (adapter->msix_entries)
1456  igbvf_configure_msix(adapter);
1457 
1458  /* Clear any pending interrupts. */
1459  er32(EICR);
1460  igbvf_irq_enable(adapter);
1461 
1462  /* start the watchdog */
1463  hw->mac.get_link_status = 1;
1464  mod_timer(&adapter->watchdog_timer, jiffies + 1);
1465 
1466 
1467  return 0;
1468 }
1469 
1470 void igbvf_down(struct igbvf_adapter *adapter)
1471 {
1472  struct net_device *netdev = adapter->netdev;
1473  struct e1000_hw *hw = &adapter->hw;
1474  u32 rxdctl, txdctl;
1475 
1476  /*
1477  * signal that we're down so the interrupt handler does not
1478  * reschedule our watchdog timer
1479  */
1480  set_bit(__IGBVF_DOWN, &adapter->state);
1481 
1482  /* disable receives in the hardware */
1483  rxdctl = er32(RXDCTL(0));
1484  ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1485 
1486  netif_stop_queue(netdev);
1487 
1488  /* disable transmits in the hardware */
1489  txdctl = er32(TXDCTL(0));
1490  ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1491 
1492  /* flush both disables and wait for them to finish */
1493  e1e_flush();
1494  msleep(10);
1495 
1496  napi_disable(&adapter->rx_ring->napi);
1497 
1498  igbvf_irq_disable(adapter);
1499 
1500  del_timer_sync(&adapter->watchdog_timer);
1501 
1502  netif_carrier_off(netdev);
1503 
1504  /* record the stats before reset*/
1505  igbvf_update_stats(adapter);
1506 
1507  adapter->link_speed = 0;
1508  adapter->link_duplex = 0;
1509 
1510  igbvf_reset(adapter);
1511  igbvf_clean_tx_ring(adapter->tx_ring);
1512  igbvf_clean_rx_ring(adapter->rx_ring);
1513 }
1514 
1515 void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1516 {
1517  might_sleep();
1518  while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1519  msleep(1);
1520  igbvf_down(adapter);
1521  igbvf_up(adapter);
1522  clear_bit(__IGBVF_RESETTING, &adapter->state);
1523 }
1524 
1533 static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
1534 {
1535  struct net_device *netdev = adapter->netdev;
1536  s32 rc;
1537 
1539  adapter->rx_ps_hdr_size = 0;
1540  adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1541  adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1542 
1543  adapter->tx_int_delay = 8;
1544  adapter->tx_abs_int_delay = 32;
1545  adapter->rx_int_delay = 0;
1546  adapter->rx_abs_int_delay = 8;
1547  adapter->requested_itr = 3;
1548  adapter->current_itr = IGBVF_START_ITR;
1549 
1550  /* Set various function pointers */
1551  adapter->ei->init_ops(&adapter->hw);
1552 
1553  rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1554  if (rc)
1555  return rc;
1556 
1557  rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1558  if (rc)
1559  return rc;
1560 
1561  igbvf_set_interrupt_capability(adapter);
1562 
1563  if (igbvf_alloc_queues(adapter))
1564  return -ENOMEM;
1565 
1566  spin_lock_init(&adapter->tx_queue_lock);
1567 
1568  /* Explicitly disable IRQ since the NIC can be in any state. */
1569  igbvf_irq_disable(adapter);
1570 
1571  spin_lock_init(&adapter->stats_lock);
1572 
1573  set_bit(__IGBVF_DOWN, &adapter->state);
1574  return 0;
1575 }
1576 
1577 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1578 {
1579  struct e1000_hw *hw = &adapter->hw;
1580 
1581  adapter->stats.last_gprc = er32(VFGPRC);
1582  adapter->stats.last_gorc = er32(VFGORC);
1583  adapter->stats.last_gptc = er32(VFGPTC);
1584  adapter->stats.last_gotc = er32(VFGOTC);
1585  adapter->stats.last_mprc = er32(VFMPRC);
1586  adapter->stats.last_gotlbc = er32(VFGOTLBC);
1587  adapter->stats.last_gptlbc = er32(VFGPTLBC);
1588  adapter->stats.last_gorlbc = er32(VFGORLBC);
1589  adapter->stats.last_gprlbc = er32(VFGPRLBC);
1590 
1591  adapter->stats.base_gprc = er32(VFGPRC);
1592  adapter->stats.base_gorc = er32(VFGORC);
1593  adapter->stats.base_gptc = er32(VFGPTC);
1594  adapter->stats.base_gotc = er32(VFGOTC);
1595  adapter->stats.base_mprc = er32(VFMPRC);
1596  adapter->stats.base_gotlbc = er32(VFGOTLBC);
1597  adapter->stats.base_gptlbc = er32(VFGPTLBC);
1598  adapter->stats.base_gorlbc = er32(VFGORLBC);
1599  adapter->stats.base_gprlbc = er32(VFGPRLBC);
1600 }
1601 
1614 static int igbvf_open(struct net_device *netdev)
1615 {
1616  struct igbvf_adapter *adapter = netdev_priv(netdev);
1617  struct e1000_hw *hw = &adapter->hw;
1618  int err;
1619 
1620  /* disallow open during test */
1621  if (test_bit(__IGBVF_TESTING, &adapter->state))
1622  return -EBUSY;
1623 
1624  /* allocate transmit descriptors */
1625  err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1626  if (err)
1627  goto err_setup_tx;
1628 
1629  /* allocate receive descriptors */
1630  err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1631  if (err)
1632  goto err_setup_rx;
1633 
1634  /*
1635  * before we allocate an interrupt, we must be ready to handle it.
1636  * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1637  * as soon as we call pci_request_irq, so we have to setup our
1638  * clean_rx handler before we do so.
1639  */
1640  igbvf_configure(adapter);
1641 
1642  err = igbvf_request_irq(adapter);
1643  if (err)
1644  goto err_req_irq;
1645 
1646  /* From here on the code is the same as igbvf_up() */
1647  clear_bit(__IGBVF_DOWN, &adapter->state);
1648 
1649  napi_enable(&adapter->rx_ring->napi);
1650 
1651  /* clear any pending interrupts */
1652  er32(EICR);
1653 
1654  igbvf_irq_enable(adapter);
1655 
1656  /* start the watchdog */
1657  hw->mac.get_link_status = 1;
1658  mod_timer(&adapter->watchdog_timer, jiffies + 1);
1659 
1660  return 0;
1661 
1662 err_req_irq:
1663  igbvf_free_rx_resources(adapter->rx_ring);
1664 err_setup_rx:
1665  igbvf_free_tx_resources(adapter->tx_ring);
1666 err_setup_tx:
1667  igbvf_reset(adapter);
1668 
1669  return err;
1670 }
1671 
1683 static int igbvf_close(struct net_device *netdev)
1684 {
1685  struct igbvf_adapter *adapter = netdev_priv(netdev);
1686 
1687  WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1688  igbvf_down(adapter);
1689 
1690  igbvf_free_irq(adapter);
1691 
1692  igbvf_free_tx_resources(adapter->tx_ring);
1693  igbvf_free_rx_resources(adapter->rx_ring);
1694 
1695  return 0;
1696 }
1704 static int igbvf_set_mac(struct net_device *netdev, void *p)
1705 {
1706  struct igbvf_adapter *adapter = netdev_priv(netdev);
1707  struct e1000_hw *hw = &adapter->hw;
1708  struct sockaddr *addr = p;
1709 
1710  if (!is_valid_ether_addr(addr->sa_data))
1711  return -EADDRNOTAVAIL;
1712 
1713  memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1714 
1715  hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1716 
1717  if (memcmp(addr->sa_data, hw->mac.addr, 6))
1718  return -EADDRNOTAVAIL;
1719 
1720  memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1721  netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
1722 
1723  return 0;
1724 }
1725 
1726 #define UPDATE_VF_COUNTER(reg, name) \
1727  { \
1728  u32 current_counter = er32(reg); \
1729  if (current_counter < adapter->stats.last_##name) \
1730  adapter->stats.name += 0x100000000LL; \
1731  adapter->stats.last_##name = current_counter; \
1732  adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1733  adapter->stats.name |= current_counter; \
1734  }
1735 
1740 void igbvf_update_stats(struct igbvf_adapter *adapter)
1741 {
1742  struct e1000_hw *hw = &adapter->hw;
1743  struct pci_dev *pdev = adapter->pdev;
1744 
1745  /*
1746  * Prevent stats update while adapter is being reset, link is down
1747  * or if the pci connection is down.
1748  */
1749  if (adapter->link_speed == 0)
1750  return;
1751 
1752  if (test_bit(__IGBVF_RESETTING, &adapter->state))
1753  return;
1754 
1755  if (pci_channel_offline(pdev))
1756  return;
1757 
1758  UPDATE_VF_COUNTER(VFGPRC, gprc);
1759  UPDATE_VF_COUNTER(VFGORC, gorc);
1760  UPDATE_VF_COUNTER(VFGPTC, gptc);
1761  UPDATE_VF_COUNTER(VFGOTC, gotc);
1762  UPDATE_VF_COUNTER(VFMPRC, mprc);
1763  UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1764  UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1765  UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1766  UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1767 
1768  /* Fill out the OS statistics structure */
1769  adapter->net_stats.multicast = adapter->stats.mprc;
1770 }
1771 
1772 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1773 {
1774  dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
1775  adapter->link_speed,
1776  adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
1777 }
1778 
1779 static bool igbvf_has_link(struct igbvf_adapter *adapter)
1780 {
1781  struct e1000_hw *hw = &adapter->hw;
1782  s32 ret_val = E1000_SUCCESS;
1783  bool link_active;
1784 
1785  /* If interface is down, stay link down */
1786  if (test_bit(__IGBVF_DOWN, &adapter->state))
1787  return false;
1788 
1789  ret_val = hw->mac.ops.check_for_link(hw);
1790  link_active = !hw->mac.get_link_status;
1791 
1792  /* if check for link returns error we will need to reset */
1793  if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
1794  schedule_work(&adapter->reset_task);
1795 
1796  return link_active;
1797 }
1798 
1803 static void igbvf_watchdog(unsigned long data)
1804 {
1805  struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
1806 
1807  /* Do the rest outside of interrupt context */
1808  schedule_work(&adapter->watchdog_task);
1809 }
1810 
1811 static void igbvf_watchdog_task(struct work_struct *work)
1812 {
1813  struct igbvf_adapter *adapter = container_of(work,
1814  struct igbvf_adapter,
1815  watchdog_task);
1816  struct net_device *netdev = adapter->netdev;
1817  struct e1000_mac_info *mac = &adapter->hw.mac;
1818  struct igbvf_ring *tx_ring = adapter->tx_ring;
1819  struct e1000_hw *hw = &adapter->hw;
1820  u32 link;
1821  int tx_pending = 0;
1822 
1823  link = igbvf_has_link(adapter);
1824 
1825  if (link) {
1826  if (!netif_carrier_ok(netdev)) {
1827  mac->ops.get_link_up_info(&adapter->hw,
1828  &adapter->link_speed,
1829  &adapter->link_duplex);
1830  igbvf_print_link_info(adapter);
1831 
1832  netif_carrier_on(netdev);
1833  netif_wake_queue(netdev);
1834  }
1835  } else {
1836  if (netif_carrier_ok(netdev)) {
1837  adapter->link_speed = 0;
1838  adapter->link_duplex = 0;
1839  dev_info(&adapter->pdev->dev, "Link is Down\n");
1840  netif_carrier_off(netdev);
1841  netif_stop_queue(netdev);
1842  }
1843  }
1844 
1845  if (netif_carrier_ok(netdev)) {
1846  igbvf_update_stats(adapter);
1847  } else {
1848  tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1849  tx_ring->count);
1850  if (tx_pending) {
1851  /*
1852  * We've lost link, so the controller stops DMA,
1853  * but we've got queued Tx work that's never going
1854  * to get done, so reset controller to flush Tx.
1855  * (Do the reset outside of interrupt context).
1856  */
1857  adapter->tx_timeout_count++;
1858  schedule_work(&adapter->reset_task);
1859  }
1860  }
1861 
1862  /* Cause software interrupt to ensure Rx ring is cleaned */
1863  ew32(EICS, adapter->rx_ring->eims_value);
1864 
1865  /* Reset the timer */
1866  if (!test_bit(__IGBVF_DOWN, &adapter->state))
1867  mod_timer(&adapter->watchdog_timer,
1868  round_jiffies(jiffies + (2 * HZ)));
1869 }
1870 
1871 #define IGBVF_TX_FLAGS_CSUM 0x00000001
1872 #define IGBVF_TX_FLAGS_VLAN 0x00000002
1873 #define IGBVF_TX_FLAGS_TSO 0x00000004
1874 #define IGBVF_TX_FLAGS_IPV4 0x00000008
1875 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1876 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1877 
1878 static int igbvf_tso(struct igbvf_adapter *adapter,
1879  struct igbvf_ring *tx_ring,
1880  struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1881 {
1882  struct e1000_adv_tx_context_desc *context_desc;
1883  unsigned int i;
1884  int err;
1885  struct igbvf_buffer *buffer_info;
1886  u32 info = 0, tu_cmd = 0;
1887  u32 mss_l4len_idx, l4len;
1888  *hdr_len = 0;
1889 
1890  if (skb_header_cloned(skb)) {
1891  err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1892  if (err) {
1893  dev_err(&adapter->pdev->dev,
1894  "igbvf_tso returning an error\n");
1895  return err;
1896  }
1897  }
1898 
1899  l4len = tcp_hdrlen(skb);
1900  *hdr_len += l4len;
1901 
1902  if (skb->protocol == htons(ETH_P_IP)) {
1903  struct iphdr *iph = ip_hdr(skb);
1904  iph->tot_len = 0;
1905  iph->check = 0;
1906  tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1907  iph->daddr, 0,
1908  IPPROTO_TCP,
1909  0);
1910  } else if (skb_is_gso_v6(skb)) {
1911  ipv6_hdr(skb)->payload_len = 0;
1912  tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1913  &ipv6_hdr(skb)->daddr,
1914  0, IPPROTO_TCP, 0);
1915  }
1916 
1917  i = tx_ring->next_to_use;
1918 
1919  buffer_info = &tx_ring->buffer_info[i];
1920  context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1921  /* VLAN MACLEN IPLEN */
1922  if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1923  info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1924  info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1925  *hdr_len += skb_network_offset(skb);
1926  info |= (skb_transport_header(skb) - skb_network_header(skb));
1927  *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
1928  context_desc->vlan_macip_lens = cpu_to_le32(info);
1929 
1930  /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1932 
1933  if (skb->protocol == htons(ETH_P_IP))
1934  tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1935  tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1936 
1937  context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1938 
1939  /* MSS L4LEN IDX */
1940  mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
1941  mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
1942 
1943  context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1944  context_desc->seqnum_seed = 0;
1945 
1946  buffer_info->time_stamp = jiffies;
1947  buffer_info->next_to_watch = i;
1948  buffer_info->dma = 0;
1949  i++;
1950  if (i == tx_ring->count)
1951  i = 0;
1952 
1953  tx_ring->next_to_use = i;
1954 
1955  return true;
1956 }
1957 
1958 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1959  struct igbvf_ring *tx_ring,
1960  struct sk_buff *skb, u32 tx_flags)
1961 {
1962  struct e1000_adv_tx_context_desc *context_desc;
1963  unsigned int i;
1964  struct igbvf_buffer *buffer_info;
1965  u32 info = 0, tu_cmd = 0;
1966 
1967  if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
1968  (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
1969  i = tx_ring->next_to_use;
1970  buffer_info = &tx_ring->buffer_info[i];
1971  context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1972 
1973  if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1974  info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1975 
1976  info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1977  if (skb->ip_summed == CHECKSUM_PARTIAL)
1978  info |= (skb_transport_header(skb) -
1979  skb_network_header(skb));
1980 
1981 
1982  context_desc->vlan_macip_lens = cpu_to_le32(info);
1983 
1985 
1986  if (skb->ip_summed == CHECKSUM_PARTIAL) {
1987  switch (skb->protocol) {
1988  case __constant_htons(ETH_P_IP):
1989  tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1990  if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1991  tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1992  break;
1994  if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1995  tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1996  break;
1997  default:
1998  break;
1999  }
2000  }
2001 
2002  context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2003  context_desc->seqnum_seed = 0;
2004  context_desc->mss_l4len_idx = 0;
2005 
2006  buffer_info->time_stamp = jiffies;
2007  buffer_info->next_to_watch = i;
2008  buffer_info->dma = 0;
2009  i++;
2010  if (i == tx_ring->count)
2011  i = 0;
2012  tx_ring->next_to_use = i;
2013 
2014  return true;
2015  }
2016 
2017  return false;
2018 }
2019 
2020 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2021 {
2022  struct igbvf_adapter *adapter = netdev_priv(netdev);
2023 
2024  /* there is enough descriptors then we don't need to worry */
2025  if (igbvf_desc_unused(adapter->tx_ring) >= size)
2026  return 0;
2027 
2028  netif_stop_queue(netdev);
2029 
2030  smp_mb();
2031 
2032  /* We need to check again just in case room has been made available */
2033  if (igbvf_desc_unused(adapter->tx_ring) < size)
2034  return -EBUSY;
2035 
2036  netif_wake_queue(netdev);
2037 
2038  ++adapter->restart_queue;
2039  return 0;
2040 }
2041 
2042 #define IGBVF_MAX_TXD_PWR 16
2043 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2044 
2045 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2046  struct igbvf_ring *tx_ring,
2047  struct sk_buff *skb,
2048  unsigned int first)
2049 {
2050  struct igbvf_buffer *buffer_info;
2051  struct pci_dev *pdev = adapter->pdev;
2052  unsigned int len = skb_headlen(skb);
2053  unsigned int count = 0, i;
2054  unsigned int f;
2055 
2056  i = tx_ring->next_to_use;
2057 
2058  buffer_info = &tx_ring->buffer_info[i];
2060  buffer_info->length = len;
2061  /* set time_stamp *before* dma to help avoid a possible race */
2062  buffer_info->time_stamp = jiffies;
2063  buffer_info->next_to_watch = i;
2064  buffer_info->mapped_as_page = false;
2065  buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2066  DMA_TO_DEVICE);
2067  if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2068  goto dma_error;
2069 
2070 
2071  for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2072  const struct skb_frag_struct *frag;
2073 
2074  count++;
2075  i++;
2076  if (i == tx_ring->count)
2077  i = 0;
2078 
2079  frag = &skb_shinfo(skb)->frags[f];
2080  len = skb_frag_size(frag);
2081 
2082  buffer_info = &tx_ring->buffer_info[i];
2084  buffer_info->length = len;
2085  buffer_info->time_stamp = jiffies;
2086  buffer_info->next_to_watch = i;
2087  buffer_info->mapped_as_page = true;
2088  buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2089  DMA_TO_DEVICE);
2090  if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2091  goto dma_error;
2092  }
2093 
2094  tx_ring->buffer_info[i].skb = skb;
2095  tx_ring->buffer_info[first].next_to_watch = i;
2096 
2097  return ++count;
2098 
2099 dma_error:
2100  dev_err(&pdev->dev, "TX DMA map failed\n");
2101 
2102  /* clear timestamp and dma mappings for failed buffer_info mapping */
2103  buffer_info->dma = 0;
2104  buffer_info->time_stamp = 0;
2105  buffer_info->length = 0;
2106  buffer_info->next_to_watch = 0;
2107  buffer_info->mapped_as_page = false;
2108  if (count)
2109  count--;
2110 
2111  /* clear timestamp and dma mappings for remaining portion of packet */
2112  while (count--) {
2113  if (i==0)
2114  i += tx_ring->count;
2115  i--;
2116  buffer_info = &tx_ring->buffer_info[i];
2117  igbvf_put_txbuf(adapter, buffer_info);
2118  }
2119 
2120  return 0;
2121 }
2122 
2123 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2124  struct igbvf_ring *tx_ring,
2125  int tx_flags, int count, u32 paylen,
2126  u8 hdr_len)
2127 {
2128  union e1000_adv_tx_desc *tx_desc = NULL;
2129  struct igbvf_buffer *buffer_info;
2130  u32 olinfo_status = 0, cmd_type_len;
2131  unsigned int i;
2132 
2135 
2136  if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2137  cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2138 
2139  if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2140  cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2141 
2142  /* insert tcp checksum */
2143  olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2144 
2145  /* insert ip checksum */
2146  if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2147  olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2148 
2149  } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2150  olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2151  }
2152 
2153  olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2154 
2155  i = tx_ring->next_to_use;
2156  while (count--) {
2157  buffer_info = &tx_ring->buffer_info[i];
2158  tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2159  tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2160  tx_desc->read.cmd_type_len =
2161  cpu_to_le32(cmd_type_len | buffer_info->length);
2162  tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2163  i++;
2164  if (i == tx_ring->count)
2165  i = 0;
2166  }
2167 
2168  tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2169  /* Force memory writes to complete before letting h/w
2170  * know there are new descriptors to fetch. (Only
2171  * applicable for weak-ordered memory model archs,
2172  * such as IA-64). */
2173  wmb();
2174 
2175  tx_ring->next_to_use = i;
2176  writel(i, adapter->hw.hw_addr + tx_ring->tail);
2177  /* we need this if more than one processor can write to our tail
2178  * at a time, it syncronizes IO on IA64/Altix systems */
2179  mmiowb();
2180 }
2181 
2182 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2183  struct net_device *netdev,
2184  struct igbvf_ring *tx_ring)
2185 {
2186  struct igbvf_adapter *adapter = netdev_priv(netdev);
2187  unsigned int first, tx_flags = 0;
2188  u8 hdr_len = 0;
2189  int count = 0;
2190  int tso = 0;
2191 
2192  if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2193  dev_kfree_skb_any(skb);
2194  return NETDEV_TX_OK;
2195  }
2196 
2197  if (skb->len <= 0) {
2198  dev_kfree_skb_any(skb);
2199  return NETDEV_TX_OK;
2200  }
2201 
2202  /*
2203  * need: count + 4 desc gap to keep tail from touching
2204  * + 2 desc gap to keep tail from touching head,
2205  * + 1 desc for skb->data,
2206  * + 1 desc for context descriptor,
2207  * head, otherwise try next time
2208  */
2209  if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2210  /* this is a hard error */
2211  return NETDEV_TX_BUSY;
2212  }
2213 
2214  if (vlan_tx_tag_present(skb)) {
2215  tx_flags |= IGBVF_TX_FLAGS_VLAN;
2216  tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2217  }
2218 
2219  if (skb->protocol == htons(ETH_P_IP))
2220  tx_flags |= IGBVF_TX_FLAGS_IPV4;
2221 
2222  first = tx_ring->next_to_use;
2223 
2224  tso = skb_is_gso(skb) ?
2225  igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
2226  if (unlikely(tso < 0)) {
2227  dev_kfree_skb_any(skb);
2228  return NETDEV_TX_OK;
2229  }
2230 
2231  if (tso)
2232  tx_flags |= IGBVF_TX_FLAGS_TSO;
2233  else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2234  (skb->ip_summed == CHECKSUM_PARTIAL))
2235  tx_flags |= IGBVF_TX_FLAGS_CSUM;
2236 
2237  /*
2238  * count reflects descriptors mapped, if 0 then mapping error
2239  * has occurred and we need to rewind the descriptor queue
2240  */
2241  count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
2242 
2243  if (count) {
2244  igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2245  skb->len, hdr_len);
2246  /* Make sure there is space in the ring for the next send. */
2247  igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2248  } else {
2249  dev_kfree_skb_any(skb);
2250  tx_ring->buffer_info[first].time_stamp = 0;
2251  tx_ring->next_to_use = first;
2252  }
2253 
2254  return NETDEV_TX_OK;
2255 }
2256 
2257 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2258  struct net_device *netdev)
2259 {
2260  struct igbvf_adapter *adapter = netdev_priv(netdev);
2261  struct igbvf_ring *tx_ring;
2262 
2263  if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2264  dev_kfree_skb_any(skb);
2265  return NETDEV_TX_OK;
2266  }
2267 
2268  tx_ring = &adapter->tx_ring[0];
2269 
2270  return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2271 }
2272 
2277 static void igbvf_tx_timeout(struct net_device *netdev)
2278 {
2279  struct igbvf_adapter *adapter = netdev_priv(netdev);
2280 
2281  /* Do the reset outside of interrupt context */
2282  adapter->tx_timeout_count++;
2283  schedule_work(&adapter->reset_task);
2284 }
2285 
2286 static void igbvf_reset_task(struct work_struct *work)
2287 {
2288  struct igbvf_adapter *adapter;
2289  adapter = container_of(work, struct igbvf_adapter, reset_task);
2290 
2291  igbvf_reinit_locked(adapter);
2292 }
2293 
2301 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
2302 {
2303  struct igbvf_adapter *adapter = netdev_priv(netdev);
2304 
2305  /* only return the current stats */
2306  return &adapter->net_stats;
2307 }
2308 
2316 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2317 {
2318  struct igbvf_adapter *adapter = netdev_priv(netdev);
2319  int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2320 
2321  if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2322  dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2323  return -EINVAL;
2324  }
2325 
2326 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2327  if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2328  dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2329  return -EINVAL;
2330  }
2331 
2332  while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2333  msleep(1);
2334  /* igbvf_down has a dependency on max_frame_size */
2335  adapter->max_frame_size = max_frame;
2336  if (netif_running(netdev))
2337  igbvf_down(adapter);
2338 
2339  /*
2340  * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2341  * means we reserve 2 more, this pushes us to allocate from the next
2342  * larger slab size.
2343  * i.e. RXBUFFER_2048 --> size-4096 slab
2344  * However with the new *_jumbo_rx* routines, jumbo receives will use
2345  * fragmented skbs
2346  */
2347 
2348  if (max_frame <= 1024)
2349  adapter->rx_buffer_len = 1024;
2350  else if (max_frame <= 2048)
2351  adapter->rx_buffer_len = 2048;
2352  else
2353 #if (PAGE_SIZE / 2) > 16384
2354  adapter->rx_buffer_len = 16384;
2355 #else
2356  adapter->rx_buffer_len = PAGE_SIZE / 2;
2357 #endif
2358 
2359 
2360  /* adjust allocation if LPE protects us, and we aren't using SBP */
2361  if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2362  (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2363  adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2364  ETH_FCS_LEN;
2365 
2366  dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2367  netdev->mtu, new_mtu);
2368  netdev->mtu = new_mtu;
2369 
2370  if (netif_running(netdev))
2371  igbvf_up(adapter);
2372  else
2373  igbvf_reset(adapter);
2374 
2375  clear_bit(__IGBVF_RESETTING, &adapter->state);
2376 
2377  return 0;
2378 }
2379 
2380 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2381 {
2382  switch (cmd) {
2383  default:
2384  return -EOPNOTSUPP;
2385  }
2386 }
2387 
2388 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2389 {
2390  struct net_device *netdev = pci_get_drvdata(pdev);
2391  struct igbvf_adapter *adapter = netdev_priv(netdev);
2392 #ifdef CONFIG_PM
2393  int retval = 0;
2394 #endif
2395 
2396  netif_device_detach(netdev);
2397 
2398  if (netif_running(netdev)) {
2399  WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2400  igbvf_down(adapter);
2401  igbvf_free_irq(adapter);
2402  }
2403 
2404 #ifdef CONFIG_PM
2405  retval = pci_save_state(pdev);
2406  if (retval)
2407  return retval;
2408 #endif
2409 
2410  pci_disable_device(pdev);
2411 
2412  return 0;
2413 }
2414 
2415 #ifdef CONFIG_PM
2416 static int igbvf_resume(struct pci_dev *pdev)
2417 {
2418  struct net_device *netdev = pci_get_drvdata(pdev);
2419  struct igbvf_adapter *adapter = netdev_priv(netdev);
2420  u32 err;
2421 
2422  pci_restore_state(pdev);
2423  err = pci_enable_device_mem(pdev);
2424  if (err) {
2425  dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2426  return err;
2427  }
2428 
2429  pci_set_master(pdev);
2430 
2431  if (netif_running(netdev)) {
2432  err = igbvf_request_irq(adapter);
2433  if (err)
2434  return err;
2435  }
2436 
2437  igbvf_reset(adapter);
2438 
2439  if (netif_running(netdev))
2440  igbvf_up(adapter);
2441 
2442  netif_device_attach(netdev);
2443 
2444  return 0;
2445 }
2446 #endif
2447 
2448 static void igbvf_shutdown(struct pci_dev *pdev)
2449 {
2450  igbvf_suspend(pdev, PMSG_SUSPEND);
2451 }
2452 
2453 #ifdef CONFIG_NET_POLL_CONTROLLER
2454 /*
2455  * Polling 'interrupt' - used by things like netconsole to send skbs
2456  * without having to re-enable interrupts. It's not called while
2457  * the interrupt routine is executing.
2458  */
2459 static void igbvf_netpoll(struct net_device *netdev)
2460 {
2461  struct igbvf_adapter *adapter = netdev_priv(netdev);
2462 
2463  disable_irq(adapter->pdev->irq);
2464 
2465  igbvf_clean_tx_irq(adapter->tx_ring);
2466 
2467  enable_irq(adapter->pdev->irq);
2468 }
2469 #endif
2470 
2479 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2480  pci_channel_state_t state)
2481 {
2482  struct net_device *netdev = pci_get_drvdata(pdev);
2483  struct igbvf_adapter *adapter = netdev_priv(netdev);
2484 
2485  netif_device_detach(netdev);
2486 
2487  if (state == pci_channel_io_perm_failure)
2489 
2490  if (netif_running(netdev))
2491  igbvf_down(adapter);
2492  pci_disable_device(pdev);
2493 
2494  /* Request a slot slot reset. */
2496 }
2497 
2505 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2506 {
2507  struct net_device *netdev = pci_get_drvdata(pdev);
2508  struct igbvf_adapter *adapter = netdev_priv(netdev);
2509 
2510  if (pci_enable_device_mem(pdev)) {
2511  dev_err(&pdev->dev,
2512  "Cannot re-enable PCI device after reset.\n");
2514  }
2515  pci_set_master(pdev);
2516 
2517  igbvf_reset(adapter);
2518 
2519  return PCI_ERS_RESULT_RECOVERED;
2520 }
2521 
2530 static void igbvf_io_resume(struct pci_dev *pdev)
2531 {
2532  struct net_device *netdev = pci_get_drvdata(pdev);
2533  struct igbvf_adapter *adapter = netdev_priv(netdev);
2534 
2535  if (netif_running(netdev)) {
2536  if (igbvf_up(adapter)) {
2537  dev_err(&pdev->dev,
2538  "can't bring device back up after reset\n");
2539  return;
2540  }
2541  }
2542 
2543  netif_device_attach(netdev);
2544 }
2545 
2546 static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2547 {
2548  struct e1000_hw *hw = &adapter->hw;
2549  struct net_device *netdev = adapter->netdev;
2550  struct pci_dev *pdev = adapter->pdev;
2551 
2552  if (hw->mac.type == e1000_vfadapt_i350)
2553  dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2554  else
2555  dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2556  dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2557 }
2558 
2559 static int igbvf_set_features(struct net_device *netdev,
2561 {
2562  struct igbvf_adapter *adapter = netdev_priv(netdev);
2563 
2564  if (features & NETIF_F_RXCSUM)
2565  adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2566  else
2567  adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2568 
2569  return 0;
2570 }
2571 
2572 static const struct net_device_ops igbvf_netdev_ops = {
2573  .ndo_open = igbvf_open,
2574  .ndo_stop = igbvf_close,
2575  .ndo_start_xmit = igbvf_xmit_frame,
2576  .ndo_get_stats = igbvf_get_stats,
2577  .ndo_set_rx_mode = igbvf_set_multi,
2578  .ndo_set_mac_address = igbvf_set_mac,
2579  .ndo_change_mtu = igbvf_change_mtu,
2580  .ndo_do_ioctl = igbvf_ioctl,
2581  .ndo_tx_timeout = igbvf_tx_timeout,
2582  .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2583  .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2584 #ifdef CONFIG_NET_POLL_CONTROLLER
2585  .ndo_poll_controller = igbvf_netpoll,
2586 #endif
2587  .ndo_set_features = igbvf_set_features,
2588 };
2589 
2601 static int __devinit igbvf_probe(struct pci_dev *pdev,
2602  const struct pci_device_id *ent)
2603 {
2604  struct net_device *netdev;
2605  struct igbvf_adapter *adapter;
2606  struct e1000_hw *hw;
2607  const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2608 
2609  static int cards_found;
2610  int err, pci_using_dac;
2611 
2612  err = pci_enable_device_mem(pdev);
2613  if (err)
2614  return err;
2615 
2616  pci_using_dac = 0;
2617  err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2618  if (!err) {
2619  err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2620  if (!err)
2621  pci_using_dac = 1;
2622  } else {
2623  err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2624  if (err) {
2625  err = dma_set_coherent_mask(&pdev->dev,
2626  DMA_BIT_MASK(32));
2627  if (err) {
2628  dev_err(&pdev->dev, "No usable DMA "
2629  "configuration, aborting\n");
2630  goto err_dma;
2631  }
2632  }
2633  }
2634 
2636  if (err)
2637  goto err_pci_reg;
2638 
2639  pci_set_master(pdev);
2640 
2641  err = -ENOMEM;
2642  netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2643  if (!netdev)
2644  goto err_alloc_etherdev;
2645 
2646  SET_NETDEV_DEV(netdev, &pdev->dev);
2647 
2648  pci_set_drvdata(pdev, netdev);
2649  adapter = netdev_priv(netdev);
2650  hw = &adapter->hw;
2651  adapter->netdev = netdev;
2652  adapter->pdev = pdev;
2653  adapter->ei = ei;
2654  adapter->pba = ei->pba;
2655  adapter->flags = ei->flags;
2656  adapter->hw.back = adapter;
2657  adapter->hw.mac.type = ei->mac;
2658  adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2659 
2660  /* PCI config space info */
2661 
2662  hw->vendor_id = pdev->vendor;
2663  hw->device_id = pdev->device;
2666  hw->revision_id = pdev->revision;
2667 
2668  err = -EIO;
2669  adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2670  pci_resource_len(pdev, 0));
2671 
2672  if (!adapter->hw.hw_addr)
2673  goto err_ioremap;
2674 
2675  if (ei->get_variants) {
2676  err = ei->get_variants(adapter);
2677  if (err)
2678  goto err_ioremap;
2679  }
2680 
2681  /* setup adapter struct */
2682  err = igbvf_sw_init(adapter);
2683  if (err)
2684  goto err_sw_init;
2685 
2686  /* construct the net_device struct */
2687  netdev->netdev_ops = &igbvf_netdev_ops;
2688 
2689  igbvf_set_ethtool_ops(netdev);
2690  netdev->watchdog_timeo = 5 * HZ;
2691  strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2692 
2693  adapter->bd_number = cards_found++;
2694 
2695  netdev->hw_features = NETIF_F_SG |
2696  NETIF_F_IP_CSUM |
2698  NETIF_F_TSO |
2699  NETIF_F_TSO6 |
2701 
2702  netdev->features = netdev->hw_features |
2706 
2707  if (pci_using_dac)
2708  netdev->features |= NETIF_F_HIGHDMA;
2709 
2710  netdev->vlan_features |= NETIF_F_TSO;
2711  netdev->vlan_features |= NETIF_F_TSO6;
2712  netdev->vlan_features |= NETIF_F_IP_CSUM;
2713  netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2714  netdev->vlan_features |= NETIF_F_SG;
2715 
2716  /*reset the controller to put the device in a known good state */
2717  err = hw->mac.ops.reset_hw(hw);
2718  if (err) {
2719  dev_info(&pdev->dev,
2720  "PF still in reset state, assigning new address."
2721  " Is the PF interface up?\n");
2722  eth_hw_addr_random(netdev);
2723  memcpy(adapter->hw.mac.addr, netdev->dev_addr,
2724  netdev->addr_len);
2725  } else {
2726  err = hw->mac.ops.read_mac_addr(hw);
2727  if (err) {
2728  dev_err(&pdev->dev, "Error reading MAC address\n");
2729  goto err_hw_init;
2730  }
2731  memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2732  netdev->addr_len);
2733  }
2734 
2735  if (!is_valid_ether_addr(netdev->dev_addr)) {
2736  dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2737  netdev->dev_addr);
2738  err = -EIO;
2739  goto err_hw_init;
2740  }
2741 
2742  memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2743 
2744  setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2745  (unsigned long) adapter);
2746 
2747  INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2748  INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2749 
2750  /* ring size defaults */
2751  adapter->rx_ring->count = 1024;
2752  adapter->tx_ring->count = 1024;
2753 
2754  /* reset the hardware with the new settings */
2755  igbvf_reset(adapter);
2756 
2757  strcpy(netdev->name, "eth%d");
2758  err = register_netdev(netdev);
2759  if (err)
2760  goto err_hw_init;
2761 
2762  /* tell the stack to leave us alone until igbvf_open() is called */
2763  netif_carrier_off(netdev);
2764  netif_stop_queue(netdev);
2765 
2766  igbvf_print_device_info(adapter);
2767 
2768  igbvf_initialize_last_counter_stats(adapter);
2769 
2770  return 0;
2771 
2772 err_hw_init:
2773  kfree(adapter->tx_ring);
2774  kfree(adapter->rx_ring);
2775 err_sw_init:
2776  igbvf_reset_interrupt_capability(adapter);
2777  iounmap(adapter->hw.hw_addr);
2778 err_ioremap:
2779  free_netdev(netdev);
2780 err_alloc_etherdev:
2781  pci_release_regions(pdev);
2782 err_pci_reg:
2783 err_dma:
2784  pci_disable_device(pdev);
2785  return err;
2786 }
2787 
2797 static void __devexit igbvf_remove(struct pci_dev *pdev)
2798 {
2799  struct net_device *netdev = pci_get_drvdata(pdev);
2800  struct igbvf_adapter *adapter = netdev_priv(netdev);
2801  struct e1000_hw *hw = &adapter->hw;
2802 
2803  /*
2804  * The watchdog timer may be rescheduled, so explicitly
2805  * disable it from being rescheduled.
2806  */
2807  set_bit(__IGBVF_DOWN, &adapter->state);
2808  del_timer_sync(&adapter->watchdog_timer);
2809 
2810  cancel_work_sync(&adapter->reset_task);
2811  cancel_work_sync(&adapter->watchdog_task);
2812 
2813  unregister_netdev(netdev);
2814 
2815  igbvf_reset_interrupt_capability(adapter);
2816 
2817  /*
2818  * it is important to delete the napi struct prior to freeing the
2819  * rx ring so that you do not end up with null pointer refs
2820  */
2821  netif_napi_del(&adapter->rx_ring->napi);
2822  kfree(adapter->tx_ring);
2823  kfree(adapter->rx_ring);
2824 
2825  iounmap(hw->hw_addr);
2826  if (hw->flash_address)
2827  iounmap(hw->flash_address);
2828  pci_release_regions(pdev);
2829 
2830  free_netdev(netdev);
2831 
2832  pci_disable_device(pdev);
2833 }
2834 
2835 /* PCI Error Recovery (ERS) */
2836 static const struct pci_error_handlers igbvf_err_handler = {
2837  .error_detected = igbvf_io_error_detected,
2838  .slot_reset = igbvf_io_slot_reset,
2839  .resume = igbvf_io_resume,
2840 };
2841 
2842 static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
2845  { } /* terminate list */
2846 };
2847 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2848 
2849 /* PCI Device API Driver */
2850 static struct pci_driver igbvf_driver = {
2851  .name = igbvf_driver_name,
2852  .id_table = igbvf_pci_tbl,
2853  .probe = igbvf_probe,
2854  .remove = __devexit_p(igbvf_remove),
2855 #ifdef CONFIG_PM
2856  /* Power Management Hooks */
2857  .suspend = igbvf_suspend,
2858  .resume = igbvf_resume,
2859 #endif
2860  .shutdown = igbvf_shutdown,
2861  .err_handler = &igbvf_err_handler
2862 };
2863 
2870 static int __init igbvf_init_module(void)
2871 {
2872  int ret;
2873  pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
2874  pr_info("%s\n", igbvf_copyright);
2875 
2876  ret = pci_register_driver(&igbvf_driver);
2877 
2878  return ret;
2879 }
2880 module_init(igbvf_init_module);
2881 
2888 static void __exit igbvf_exit_module(void)
2889 {
2890  pci_unregister_driver(&igbvf_driver);
2891 }
2892 module_exit(igbvf_exit_module);
2893 
2894 
2895 MODULE_AUTHOR("Intel Corporation, <[email protected]>");
2896 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2897 MODULE_LICENSE("GPL");
2899 
2900 /* netdev.c */