Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ixgbevf_main.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel 82599 Virtual Function driver
4  Copyright(c) 1999 - 2012 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  e1000-devel Mailing List <[email protected]>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 
29 /******************************************************************************
30  Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/ip.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
51 #include <linux/if.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
54 
55 #include "ixgbevf.h"
56 
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59  "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60 
61 #define DRV_VERSION "2.6.0-k"
63 static char ixgbevf_copyright[] =
64  "Copyright (c) 2009 - 2012 Intel Corporation.";
65 
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
69 };
70 
71 /* ixgbevf_pci_tbl - PCI Device ID Table
72  *
73  * Wildcard entries (PCI_ANY_ID) should come last
74  * Last entry must be all 0s
75  *
76  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77  * Class, Class Mask, private data (not used) }
78  */
79 static struct pci_device_id ixgbevf_pci_tbl[] = {
84 
85  /* required last entry */
86  {0, }
87 };
88 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89 
90 MODULE_AUTHOR("Intel Corporation, <[email protected]>");
91 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92 MODULE_LICENSE("GPL");
94 
95 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96 static int debug = -1;
97 module_param(debug, int, 0);
98 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99 
100 /* forward decls */
101 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
102 
103 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
104  struct ixgbevf_ring *rx_ring,
105  u32 val)
106 {
107  /*
108  * Force memory writes to complete before letting h/w
109  * know there are new descriptors to fetch. (Only
110  * applicable for weak-ordered memory model archs,
111  * such as IA-64).
112  */
113  wmb();
114  IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
115 }
116 
125 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
126  u8 queue, u8 msix_vector)
127 {
128  u32 ivar, index;
129  struct ixgbe_hw *hw = &adapter->hw;
130  if (direction == -1) {
131  /* other causes */
132  msix_vector |= IXGBE_IVAR_ALLOC_VAL;
133  ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
134  ivar &= ~0xFF;
135  ivar |= msix_vector;
137  } else {
138  /* tx or rx causes */
139  msix_vector |= IXGBE_IVAR_ALLOC_VAL;
140  index = ((16 * (queue & 1)) + (8 * direction));
141  ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
142  ivar &= ~(0xFF << index);
143  ivar |= (msix_vector << index);
144  IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
145  }
146 }
147 
148 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
149  struct ixgbevf_tx_buffer
150  *tx_buffer_info)
151 {
152  if (tx_buffer_info->dma) {
153  if (tx_buffer_info->mapped_as_page)
154  dma_unmap_page(tx_ring->dev,
155  tx_buffer_info->dma,
156  tx_buffer_info->length,
157  DMA_TO_DEVICE);
158  else
159  dma_unmap_single(tx_ring->dev,
160  tx_buffer_info->dma,
161  tx_buffer_info->length,
162  DMA_TO_DEVICE);
163  tx_buffer_info->dma = 0;
164  }
165  if (tx_buffer_info->skb) {
166  dev_kfree_skb_any(tx_buffer_info->skb);
167  tx_buffer_info->skb = NULL;
168  }
169  tx_buffer_info->time_stamp = 0;
170  /* tx_buffer_info must be completely set up in the transmit path */
171 }
172 
173 #define IXGBE_MAX_TXD_PWR 14
174 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
175 
176 /* Tx Descriptors needed, worst case */
177 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
179 
180 static void ixgbevf_tx_timeout(struct net_device *netdev);
181 
187 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
188  struct ixgbevf_ring *tx_ring)
189 {
190  struct ixgbevf_adapter *adapter = q_vector->adapter;
191  union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192  struct ixgbevf_tx_buffer *tx_buffer_info;
193  unsigned int i, eop, count = 0;
194  unsigned int total_bytes = 0, total_packets = 0;
195 
196  if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197  return true;
198 
199  i = tx_ring->next_to_clean;
200  eop = tx_ring->tx_buffer_info[i].next_to_watch;
201  eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
202 
203  while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
204  (count < tx_ring->count)) {
205  bool cleaned = false;
206  rmb(); /* read buffer_info after eop_desc */
207  /* eop could change between read and DD-check */
208  if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
209  goto cont_loop;
210  for ( ; !cleaned; count++) {
211  struct sk_buff *skb;
212  tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
213  tx_buffer_info = &tx_ring->tx_buffer_info[i];
214  cleaned = (i == eop);
215  skb = tx_buffer_info->skb;
216 
217  if (cleaned && skb) {
218  unsigned int segs, bytecount;
219 
220  /* gso_segs is currently only valid for tcp */
221  segs = skb_shinfo(skb)->gso_segs ?: 1;
222  /* multiply data chunks by size of headers */
223  bytecount = ((segs - 1) * skb_headlen(skb)) +
224  skb->len;
225  total_packets += segs;
226  total_bytes += bytecount;
227  }
228 
229  ixgbevf_unmap_and_free_tx_resource(tx_ring,
230  tx_buffer_info);
231 
232  tx_desc->wb.status = 0;
233 
234  i++;
235  if (i == tx_ring->count)
236  i = 0;
237  }
238 
239 cont_loop:
240  eop = tx_ring->tx_buffer_info[i].next_to_watch;
241  eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
242  }
243 
244  tx_ring->next_to_clean = i;
245 
246 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
247  if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
248  (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
249  /* Make sure that anybody stopping the queue after this
250  * sees the new next_to_clean.
251  */
252  smp_mb();
253  if (__netif_subqueue_stopped(tx_ring->netdev,
254  tx_ring->queue_index) &&
255  !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
256  netif_wake_subqueue(tx_ring->netdev,
257  tx_ring->queue_index);
258  ++adapter->restart_queue;
259  }
260  }
261 
262  u64_stats_update_begin(&tx_ring->syncp);
263  tx_ring->total_bytes += total_bytes;
264  tx_ring->total_packets += total_packets;
265  u64_stats_update_end(&tx_ring->syncp);
266  q_vector->tx.total_bytes += total_bytes;
267  q_vector->tx.total_packets += total_packets;
268 
269  return count < tx_ring->count;
270 }
271 
279 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
280  struct sk_buff *skb, u8 status,
281  union ixgbe_adv_rx_desc *rx_desc)
282 {
283  struct ixgbevf_adapter *adapter = q_vector->adapter;
284  bool is_vlan = (status & IXGBE_RXD_STAT_VP);
285  u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
286 
287  if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
288  __vlan_hwaccel_put_tag(skb, tag);
289 
290  napi_gro_receive(&q_vector->napi, skb);
291 }
292 
299 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
300  struct ixgbevf_ring *ring,
301  u32 status_err, struct sk_buff *skb)
302 {
303  skb_checksum_none_assert(skb);
304 
305  /* Rx csum disabled */
306  if (!(ring->netdev->features & NETIF_F_RXCSUM))
307  return;
308 
309  /* if IP and error */
310  if ((status_err & IXGBE_RXD_STAT_IPCS) &&
311  (status_err & IXGBE_RXDADV_ERR_IPE)) {
312  adapter->hw_csum_rx_error++;
313  return;
314  }
315 
316  if (!(status_err & IXGBE_RXD_STAT_L4CS))
317  return;
318 
319  if (status_err & IXGBE_RXDADV_ERR_TCPE) {
320  adapter->hw_csum_rx_error++;
321  return;
322  }
323 
324  /* It must be a TCP or UDP packet with a valid checksum */
326  adapter->hw_csum_rx_good++;
327 }
328 
333 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
334  struct ixgbevf_ring *rx_ring,
335  int cleaned_count)
336 {
337  struct pci_dev *pdev = adapter->pdev;
338  union ixgbe_adv_rx_desc *rx_desc;
339  struct ixgbevf_rx_buffer *bi;
340  struct sk_buff *skb;
341  unsigned int i = rx_ring->next_to_use;
342 
343  bi = &rx_ring->rx_buffer_info[i];
344 
345  while (cleaned_count--) {
346  rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
347  skb = bi->skb;
348  if (!skb) {
349  skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
350  rx_ring->rx_buf_len);
351  if (!skb) {
352  adapter->alloc_rx_buff_failed++;
353  goto no_buffers;
354  }
355  bi->skb = skb;
356  }
357  if (!bi->dma) {
358  bi->dma = dma_map_single(&pdev->dev, skb->data,
359  rx_ring->rx_buf_len,
361  }
362  rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
363 
364  i++;
365  if (i == rx_ring->count)
366  i = 0;
367  bi = &rx_ring->rx_buffer_info[i];
368  }
369 
370 no_buffers:
371  if (rx_ring->next_to_use != i) {
372  rx_ring->next_to_use = i;
373 
374  ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
375  }
376 }
377 
378 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
379  u32 qmask)
380 {
381  struct ixgbe_hw *hw = &adapter->hw;
382 
383  IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
384 }
385 
386 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
387  struct ixgbevf_ring *rx_ring,
388  int budget)
389 {
390  struct ixgbevf_adapter *adapter = q_vector->adapter;
391  struct pci_dev *pdev = adapter->pdev;
392  union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
393  struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
394  struct sk_buff *skb;
395  unsigned int i;
396  u32 len, staterr;
397  int cleaned_count = 0;
398  unsigned int total_rx_bytes = 0, total_rx_packets = 0;
399 
400  i = rx_ring->next_to_clean;
401  rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
402  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
403  rx_buffer_info = &rx_ring->rx_buffer_info[i];
404 
405  while (staterr & IXGBE_RXD_STAT_DD) {
406  if (!budget)
407  break;
408  budget--;
409 
410  rmb(); /* read descriptor and rx_buffer_info after status DD */
411  len = le16_to_cpu(rx_desc->wb.upper.length);
412  skb = rx_buffer_info->skb;
413  prefetch(skb->data - NET_IP_ALIGN);
414  rx_buffer_info->skb = NULL;
415 
416  if (rx_buffer_info->dma) {
417  dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
418  rx_ring->rx_buf_len,
420  rx_buffer_info->dma = 0;
421  skb_put(skb, len);
422  }
423 
424  i++;
425  if (i == rx_ring->count)
426  i = 0;
427 
428  next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
429  prefetch(next_rxd);
430  cleaned_count++;
431 
432  next_buffer = &rx_ring->rx_buffer_info[i];
433 
434  if (!(staterr & IXGBE_RXD_STAT_EOP)) {
435  skb->next = next_buffer->skb;
436  IXGBE_CB(skb->next)->prev = skb;
437  adapter->non_eop_descs++;
438  goto next_desc;
439  }
440 
441  /* we should not be chaining buffers, if we did drop the skb */
442  if (IXGBE_CB(skb)->prev) {
443  do {
444  struct sk_buff *this = skb;
445  skb = IXGBE_CB(skb)->prev;
446  dev_kfree_skb(this);
447  } while (skb);
448  goto next_desc;
449  }
450 
451  /* ERR_MASK will only have valid bits if EOP set */
452  if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
453  dev_kfree_skb_irq(skb);
454  goto next_desc;
455  }
456 
457  ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
458 
459  /* probably a little skewed due to removing CRC */
460  total_rx_bytes += skb->len;
461  total_rx_packets++;
462 
463  /*
464  * Work around issue of some types of VM to VM loop back
465  * packets not getting split correctly
466  */
467  if (staterr & IXGBE_RXD_STAT_LB) {
468  u32 header_fixup_len = skb_headlen(skb);
469  if (header_fixup_len < 14)
470  skb_push(skb, header_fixup_len);
471  }
472  skb->protocol = eth_type_trans(skb, rx_ring->netdev);
473 
474  ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
475 
476 next_desc:
477  rx_desc->wb.upper.status_error = 0;
478 
479  /* return some buffers to hardware, one at a time is too slow */
480  if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
481  ixgbevf_alloc_rx_buffers(adapter, rx_ring,
482  cleaned_count);
483  cleaned_count = 0;
484  }
485 
486  /* use prefetched values */
487  rx_desc = next_rxd;
488  rx_buffer_info = &rx_ring->rx_buffer_info[i];
489 
490  staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
491  }
492 
493  rx_ring->next_to_clean = i;
494  cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
495 
496  if (cleaned_count)
497  ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
498 
499  u64_stats_update_begin(&rx_ring->syncp);
500  rx_ring->total_packets += total_rx_packets;
501  rx_ring->total_bytes += total_rx_bytes;
502  u64_stats_update_end(&rx_ring->syncp);
503  q_vector->rx.total_packets += total_rx_packets;
504  q_vector->rx.total_bytes += total_rx_bytes;
505 
506  return !!budget;
507 }
508 
517 static int ixgbevf_poll(struct napi_struct *napi, int budget)
518 {
519  struct ixgbevf_q_vector *q_vector =
520  container_of(napi, struct ixgbevf_q_vector, napi);
521  struct ixgbevf_adapter *adapter = q_vector->adapter;
522  struct ixgbevf_ring *ring;
523  int per_ring_budget;
524  bool clean_complete = true;
525 
526  ixgbevf_for_each_ring(ring, q_vector->tx)
527  clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
528 
529  /* attempt to distribute budget to each queue fairly, but don't allow
530  * the budget to go below 1 because we'll exit polling */
531  if (q_vector->rx.count > 1)
532  per_ring_budget = max(budget/q_vector->rx.count, 1);
533  else
534  per_ring_budget = budget;
535 
536  ixgbevf_for_each_ring(ring, q_vector->rx)
537  clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
538  per_ring_budget);
539 
540  /* If all work not completed, return budget and keep polling */
541  if (!clean_complete)
542  return budget;
543  /* all work done, exit the polling mode */
544  napi_complete(napi);
545  if (adapter->rx_itr_setting & 1)
546  ixgbevf_set_itr(q_vector);
547  if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
548  ixgbevf_irq_enable_queues(adapter,
549  1 << q_vector->v_idx);
550 
551  return 0;
552 }
553 
558 static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
559 {
560  struct ixgbevf_adapter *adapter = q_vector->adapter;
561  struct ixgbe_hw *hw = &adapter->hw;
562  int v_idx = q_vector->v_idx;
563  u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
564 
565  /*
566  * set the WDIS bit to not clear the timer bits and cause an
567  * immediate assertion of the interrupt
568  */
569  itr_reg |= IXGBE_EITR_CNT_WDIS;
570 
571  IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
572 }
573 
581 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
582 {
583  struct ixgbevf_q_vector *q_vector;
584  int q_vectors, v_idx;
585 
586  q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
587  adapter->eims_enable_mask = 0;
588 
589  /*
590  * Populate the IVAR table and set the ITR values to the
591  * corresponding register.
592  */
593  for (v_idx = 0; v_idx < q_vectors; v_idx++) {
594  struct ixgbevf_ring *ring;
595  q_vector = adapter->q_vector[v_idx];
596 
597  ixgbevf_for_each_ring(ring, q_vector->rx)
598  ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
599 
600  ixgbevf_for_each_ring(ring, q_vector->tx)
601  ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
602 
603  if (q_vector->tx.ring && !q_vector->rx.ring) {
604  /* tx only vector */
605  if (adapter->tx_itr_setting == 1)
606  q_vector->itr = IXGBE_10K_ITR;
607  else
608  q_vector->itr = adapter->tx_itr_setting;
609  } else {
610  /* rx or rx/tx vector */
611  if (adapter->rx_itr_setting == 1)
612  q_vector->itr = IXGBE_20K_ITR;
613  else
614  q_vector->itr = adapter->rx_itr_setting;
615  }
616 
617  /* add q_vector eims value to global eims_enable_mask */
618  adapter->eims_enable_mask |= 1 << v_idx;
619 
620  ixgbevf_write_eitr(q_vector);
621  }
622 
623  ixgbevf_set_ivar(adapter, -1, 1, v_idx);
624  /* setup eims_other and add value to global eims_enable_mask */
625  adapter->eims_other = 1 << v_idx;
626  adapter->eims_enable_mask |= adapter->eims_other;
627 }
628 
634 };
635 
649 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
650  struct ixgbevf_ring_container *ring_container)
651 {
652  int bytes = ring_container->total_bytes;
653  int packets = ring_container->total_packets;
654  u32 timepassed_us;
655  u64 bytes_perint;
656  u8 itr_setting = ring_container->itr;
657 
658  if (packets == 0)
659  return;
660 
661  /* simple throttlerate management
662  * 0-20MB/s lowest (100000 ints/s)
663  * 20-100MB/s low (20000 ints/s)
664  * 100-1249MB/s bulk (8000 ints/s)
665  */
666  /* what was last interrupt timeslice? */
667  timepassed_us = q_vector->itr >> 2;
668  bytes_perint = bytes / timepassed_us; /* bytes/usec */
669 
670  switch (itr_setting) {
671  case lowest_latency:
672  if (bytes_perint > 10)
673  itr_setting = low_latency;
674  break;
675  case low_latency:
676  if (bytes_perint > 20)
677  itr_setting = bulk_latency;
678  else if (bytes_perint <= 10)
679  itr_setting = lowest_latency;
680  break;
681  case bulk_latency:
682  if (bytes_perint <= 20)
683  itr_setting = low_latency;
684  break;
685  }
686 
687  /* clear work counters since we have the values we need */
688  ring_container->total_bytes = 0;
689  ring_container->total_packets = 0;
690 
691  /* write updated itr to ring container */
692  ring_container->itr = itr_setting;
693 }
694 
695 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
696 {
697  u32 new_itr = q_vector->itr;
698  u8 current_itr;
699 
700  ixgbevf_update_itr(q_vector, &q_vector->tx);
701  ixgbevf_update_itr(q_vector, &q_vector->rx);
702 
703  current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
704 
705  switch (current_itr) {
706  /* counts and packets in update_itr are dependent on these numbers */
707  case lowest_latency:
708  new_itr = IXGBE_100K_ITR;
709  break;
710  case low_latency:
711  new_itr = IXGBE_20K_ITR;
712  break;
713  case bulk_latency:
714  default:
715  new_itr = IXGBE_8K_ITR;
716  break;
717  }
718 
719  if (new_itr != q_vector->itr) {
720  /* do an exponential smoothing */
721  new_itr = (10 * new_itr * q_vector->itr) /
722  ((9 * new_itr) + q_vector->itr);
723 
724  /* save the algorithm value here */
725  q_vector->itr = new_itr;
726 
727  ixgbevf_write_eitr(q_vector);
728  }
729 }
730 
731 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
732 {
733  struct ixgbevf_adapter *adapter = data;
734  struct ixgbe_hw *hw = &adapter->hw;
735 
736  hw->mac.get_link_status = 1;
737 
738  if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
739  mod_timer(&adapter->watchdog_timer, jiffies);
740 
741  IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
742 
743  return IRQ_HANDLED;
744 }
745 
746 
752 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
753 {
754  struct ixgbevf_q_vector *q_vector = data;
755 
756  /* EIAM disabled interrupts (on this vector) for us */
757  if (q_vector->rx.ring || q_vector->tx.ring)
758  napi_schedule(&q_vector->napi);
759 
760  return IRQ_HANDLED;
761 }
762 
763 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
764  int r_idx)
765 {
766  struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
767 
768  a->rx_ring[r_idx].next = q_vector->rx.ring;
769  q_vector->rx.ring = &a->rx_ring[r_idx];
770  q_vector->rx.count++;
771 }
772 
773 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
774  int t_idx)
775 {
776  struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
777 
778  a->tx_ring[t_idx].next = q_vector->tx.ring;
779  q_vector->tx.ring = &a->tx_ring[t_idx];
780  q_vector->tx.count++;
781 }
782 
793 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
794 {
795  int q_vectors;
796  int v_start = 0;
797  int rxr_idx = 0, txr_idx = 0;
798  int rxr_remaining = adapter->num_rx_queues;
799  int txr_remaining = adapter->num_tx_queues;
800  int i, j;
801  int rqpv, tqpv;
802  int err = 0;
803 
804  q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
805 
806  /*
807  * The ideal configuration...
808  * We have enough vectors to map one per queue.
809  */
810  if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
811  for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
812  map_vector_to_rxq(adapter, v_start, rxr_idx);
813 
814  for (; txr_idx < txr_remaining; v_start++, txr_idx++)
815  map_vector_to_txq(adapter, v_start, txr_idx);
816  goto out;
817  }
818 
819  /*
820  * If we don't have enough vectors for a 1-to-1
821  * mapping, we'll have to group them so there are
822  * multiple queues per vector.
823  */
824  /* Re-adjusting *qpv takes care of the remainder. */
825  for (i = v_start; i < q_vectors; i++) {
826  rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
827  for (j = 0; j < rqpv; j++) {
828  map_vector_to_rxq(adapter, i, rxr_idx);
829  rxr_idx++;
830  rxr_remaining--;
831  }
832  }
833  for (i = v_start; i < q_vectors; i++) {
834  tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
835  for (j = 0; j < tqpv; j++) {
836  map_vector_to_txq(adapter, i, txr_idx);
837  txr_idx++;
838  txr_remaining--;
839  }
840  }
841 
842 out:
843  return err;
844 }
845 
853 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
854 {
855  struct net_device *netdev = adapter->netdev;
856  int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
857  int vector, err;
858  int ri = 0, ti = 0;
859 
860  for (vector = 0; vector < q_vectors; vector++) {
861  struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
862  struct msix_entry *entry = &adapter->msix_entries[vector];
863 
864  if (q_vector->tx.ring && q_vector->rx.ring) {
865  snprintf(q_vector->name, sizeof(q_vector->name) - 1,
866  "%s-%s-%d", netdev->name, "TxRx", ri++);
867  ti++;
868  } else if (q_vector->rx.ring) {
869  snprintf(q_vector->name, sizeof(q_vector->name) - 1,
870  "%s-%s-%d", netdev->name, "rx", ri++);
871  } else if (q_vector->tx.ring) {
872  snprintf(q_vector->name, sizeof(q_vector->name) - 1,
873  "%s-%s-%d", netdev->name, "tx", ti++);
874  } else {
875  /* skip this unused q_vector */
876  continue;
877  }
878  err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
879  q_vector->name, q_vector);
880  if (err) {
881  hw_dbg(&adapter->hw,
882  "request_irq failed for MSIX interrupt "
883  "Error: %d\n", err);
884  goto free_queue_irqs;
885  }
886  }
887 
888  err = request_irq(adapter->msix_entries[vector].vector,
889  &ixgbevf_msix_other, 0, netdev->name, adapter);
890  if (err) {
891  hw_dbg(&adapter->hw,
892  "request_irq for msix_other failed: %d\n", err);
893  goto free_queue_irqs;
894  }
895 
896  return 0;
897 
898 free_queue_irqs:
899  while (vector) {
900  vector--;
901  free_irq(adapter->msix_entries[vector].vector,
902  adapter->q_vector[vector]);
903  }
904  pci_disable_msix(adapter->pdev);
905  kfree(adapter->msix_entries);
906  adapter->msix_entries = NULL;
907  return err;
908 }
909 
910 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
911 {
912  int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
913 
914  for (i = 0; i < q_vectors; i++) {
915  struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
916  q_vector->rx.ring = NULL;
917  q_vector->tx.ring = NULL;
918  q_vector->rx.count = 0;
919  q_vector->tx.count = 0;
920  }
921 }
922 
930 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
931 {
932  int err = 0;
933 
934  err = ixgbevf_request_msix_irqs(adapter);
935 
936  if (err)
937  hw_dbg(&adapter->hw,
938  "request_irq failed, Error %d\n", err);
939 
940  return err;
941 }
942 
943 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
944 {
945  int i, q_vectors;
946 
947  q_vectors = adapter->num_msix_vectors;
948  i = q_vectors - 1;
949 
950  free_irq(adapter->msix_entries[i].vector, adapter);
951  i--;
952 
953  for (; i >= 0; i--) {
954  /* free only the irqs that were actually requested */
955  if (!adapter->q_vector[i]->rx.ring &&
956  !adapter->q_vector[i]->tx.ring)
957  continue;
958 
959  free_irq(adapter->msix_entries[i].vector,
960  adapter->q_vector[i]);
961  }
962 
963  ixgbevf_reset_q_vectors(adapter);
964 }
965 
970 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
971 {
972  struct ixgbe_hw *hw = &adapter->hw;
973  int i;
974 
976  IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
978 
979  IXGBE_WRITE_FLUSH(hw);
980 
981  for (i = 0; i < adapter->num_msix_vectors; i++)
982  synchronize_irq(adapter->msix_entries[i].vector);
983 }
984 
989 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
990 {
991  struct ixgbe_hw *hw = &adapter->hw;
992 
996 }
997 
1004 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1005 {
1006  u64 tdba;
1007  struct ixgbe_hw *hw = &adapter->hw;
1008  u32 i, j, tdlen, txctrl;
1009 
1010  /* Setup the HW Tx Head and Tail descriptor pointers */
1011  for (i = 0; i < adapter->num_tx_queues; i++) {
1012  struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1013  j = ring->reg_idx;
1014  tdba = ring->dma;
1015  tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1017  (tdba & DMA_BIT_MASK(32)));
1018  IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1019  IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1020  IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1021  IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1022  adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1023  adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1024  /* Disable Tx Head Writeback RO bit, since this hoses
1025  * bookkeeping if things aren't delivered in order.
1026  */
1027  txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1028  txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1029  IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1030  }
1031 }
1032 
1033 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1034 
1035 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1036 {
1037  struct ixgbevf_ring *rx_ring;
1038  struct ixgbe_hw *hw = &adapter->hw;
1039  u32 srrctl;
1040 
1041  rx_ring = &adapter->rx_ring[index];
1042 
1043  srrctl = IXGBE_SRRCTL_DROP_EN;
1044 
1046 
1047  srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1049 
1050  IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1051 }
1052 
1053 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1054 {
1055  struct ixgbe_hw *hw = &adapter->hw;
1056  struct net_device *netdev = adapter->netdev;
1057  int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1058  int i;
1059  u16 rx_buf_len;
1060 
1061  /* notify the PF of our intent to use this size of frame */
1062  ixgbevf_rlpml_set_vf(hw, max_frame);
1063 
1064  /* PF will allow an extra 4 bytes past for vlan tagged frames */
1065  max_frame += VLAN_HLEN;
1066 
1067  /*
1068  * Make best use of allocation by using all but 1K of a
1069  * power of 2 allocation that will be used for skb->head.
1070  */
1071  if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1072  (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1073  rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1074  else if (max_frame <= IXGBEVF_RXBUFFER_3K)
1075  rx_buf_len = IXGBEVF_RXBUFFER_3K;
1076  else if (max_frame <= IXGBEVF_RXBUFFER_7K)
1077  rx_buf_len = IXGBEVF_RXBUFFER_7K;
1078  else if (max_frame <= IXGBEVF_RXBUFFER_15K)
1079  rx_buf_len = IXGBEVF_RXBUFFER_15K;
1080  else
1081  rx_buf_len = IXGBEVF_MAX_RXBUFFER;
1082 
1083  for (i = 0; i < adapter->num_rx_queues; i++)
1084  adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1085 }
1086 
1093 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1094 {
1095  u64 rdba;
1096  struct ixgbe_hw *hw = &adapter->hw;
1097  int i, j;
1098  u32 rdlen;
1099 
1100  /* PSRTYPE must be initialized in 82599 */
1102 
1103  /* set_rx_buffer_len must be called before ring initialization */
1104  ixgbevf_set_rx_buffer_len(adapter);
1105 
1106  rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1107  /* Setup the HW Rx Head and Tail Descriptor Pointers and
1108  * the Base and Length of the Rx Descriptor Ring */
1109  for (i = 0; i < adapter->num_rx_queues; i++) {
1110  rdba = adapter->rx_ring[i].dma;
1111  j = adapter->rx_ring[i].reg_idx;
1113  (rdba & DMA_BIT_MASK(32)));
1114  IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1115  IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1116  IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1117  IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1118  adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1119  adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1120 
1121  ixgbevf_configure_srrctl(adapter, j);
1122  }
1123 }
1124 
1125 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1126 {
1127  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1128  struct ixgbe_hw *hw = &adapter->hw;
1129  int err;
1130 
1131  if (!hw->mac.ops.set_vfta)
1132  return -EOPNOTSUPP;
1133 
1134  spin_lock(&adapter->mbx_lock);
1135 
1136  /* add VID to filter table */
1137  err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1138 
1139  spin_unlock(&adapter->mbx_lock);
1140 
1141  /* translate error return types so error makes sense */
1142  if (err == IXGBE_ERR_MBX)
1143  return -EIO;
1144 
1145  if (err == IXGBE_ERR_INVALID_ARGUMENT)
1146  return -EACCES;
1147 
1148  set_bit(vid, adapter->active_vlans);
1149 
1150  return err;
1151 }
1152 
1153 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1154 {
1155  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1156  struct ixgbe_hw *hw = &adapter->hw;
1157  int err = -EOPNOTSUPP;
1158 
1159  spin_lock(&adapter->mbx_lock);
1160 
1161  /* remove VID from filter table */
1162  if (hw->mac.ops.set_vfta)
1163  err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1164 
1165  spin_unlock(&adapter->mbx_lock);
1166 
1167  clear_bit(vid, adapter->active_vlans);
1168 
1169  return err;
1170 }
1171 
1172 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1173 {
1174  u16 vid;
1175 
1176  for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1177  ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1178 }
1179 
1180 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1181 {
1182  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1183  struct ixgbe_hw *hw = &adapter->hw;
1184  int count = 0;
1185 
1186  if ((netdev_uc_count(netdev)) > 10) {
1187  pr_err("Too many unicast filters - No Space\n");
1188  return -ENOSPC;
1189  }
1190 
1191  if (!netdev_uc_empty(netdev)) {
1192  struct netdev_hw_addr *ha;
1193  netdev_for_each_uc_addr(ha, netdev) {
1194  hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1195  udelay(200);
1196  }
1197  } else {
1198  /*
1199  * If the list is empty then send message to PF driver to
1200  * clear all macvlans on this VF.
1201  */
1202  hw->mac.ops.set_uc_addr(hw, 0, NULL);
1203  }
1204 
1205  return count;
1206 }
1207 
1216 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1217 {
1218  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1219  struct ixgbe_hw *hw = &adapter->hw;
1220 
1221  spin_lock(&adapter->mbx_lock);
1222 
1223  /* reprogram multicast list */
1224  if (hw->mac.ops.update_mc_addr_list)
1225  hw->mac.ops.update_mc_addr_list(hw, netdev);
1226 
1227  ixgbevf_write_uc_addr_list(netdev);
1228 
1229  spin_unlock(&adapter->mbx_lock);
1230 }
1231 
1232 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1233 {
1234  int q_idx;
1235  struct ixgbevf_q_vector *q_vector;
1236  int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1237 
1238  for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1239  q_vector = adapter->q_vector[q_idx];
1240  napi_enable(&q_vector->napi);
1241  }
1242 }
1243 
1244 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1245 {
1246  int q_idx;
1247  struct ixgbevf_q_vector *q_vector;
1248  int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1249 
1250  for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1251  q_vector = adapter->q_vector[q_idx];
1252  napi_disable(&q_vector->napi);
1253  }
1254 }
1255 
1256 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1257 {
1258  struct net_device *netdev = adapter->netdev;
1259  int i;
1260 
1261  ixgbevf_set_rx_mode(netdev);
1262 
1263  ixgbevf_restore_vlan(adapter);
1264 
1265  ixgbevf_configure_tx(adapter);
1266  ixgbevf_configure_rx(adapter);
1267  for (i = 0; i < adapter->num_rx_queues; i++) {
1268  struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1269  ixgbevf_alloc_rx_buffers(adapter, ring,
1270  IXGBE_DESC_UNUSED(ring));
1271  }
1272 }
1273 
1274 #define IXGBE_MAX_RX_DESC_POLL 10
1275 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1276  int rxr)
1277 {
1278  struct ixgbe_hw *hw = &adapter->hw;
1279  int j = adapter->rx_ring[rxr].reg_idx;
1280  int k;
1281 
1282  for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1284  break;
1285  else
1286  msleep(1);
1287  }
1288  if (k >= IXGBE_MAX_RX_DESC_POLL) {
1289  hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1290  "not set within the polling period\n", rxr);
1291  }
1292 
1293  ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1294  (adapter->rx_ring[rxr].count - 1));
1295 }
1296 
1297 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1298 {
1299  /* Only save pre-reset stats if there are some */
1300  if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1301  adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1302  adapter->stats.base_vfgprc;
1303  adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1304  adapter->stats.base_vfgptc;
1305  adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1306  adapter->stats.base_vfgorc;
1307  adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1308  adapter->stats.base_vfgotc;
1309  adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1310  adapter->stats.base_vfmprc;
1311  }
1312 }
1313 
1314 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1315 {
1316  struct ixgbe_hw *hw = &adapter->hw;
1317 
1318  adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1319  adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1320  adapter->stats.last_vfgorc |=
1321  (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1322  adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1323  adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1324  adapter->stats.last_vfgotc |=
1325  (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1326  adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1327 
1328  adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1329  adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1330  adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1331  adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1332  adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1333 }
1334 
1335 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1336 {
1337  struct ixgbe_hw *hw = &adapter->hw;
1338  int api[] = { ixgbe_mbox_api_10,
1340  int err = 0, idx = 0;
1341 
1342  spin_lock(&adapter->mbx_lock);
1343 
1344  while (api[idx] != ixgbe_mbox_api_unknown) {
1345  err = ixgbevf_negotiate_api_version(hw, api[idx]);
1346  if (!err)
1347  break;
1348  idx++;
1349  }
1350 
1351  spin_unlock(&adapter->mbx_lock);
1352 }
1353 
1354 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1355 {
1356  struct net_device *netdev = adapter->netdev;
1357  struct ixgbe_hw *hw = &adapter->hw;
1358  int i, j = 0;
1359  int num_rx_rings = adapter->num_rx_queues;
1360  u32 txdctl, rxdctl;
1361 
1362  for (i = 0; i < adapter->num_tx_queues; i++) {
1363  j = adapter->tx_ring[i].reg_idx;
1364  txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1365  /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1366  txdctl |= (8 << 16);
1367  IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1368  }
1369 
1370  for (i = 0; i < adapter->num_tx_queues; i++) {
1371  j = adapter->tx_ring[i].reg_idx;
1372  txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1373  txdctl |= IXGBE_TXDCTL_ENABLE;
1374  IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1375  }
1376 
1377  for (i = 0; i < num_rx_rings; i++) {
1378  j = adapter->rx_ring[i].reg_idx;
1379  rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1381  if (hw->mac.type == ixgbe_mac_X540_vf) {
1382  rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1383  rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1385  }
1386  IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1387  ixgbevf_rx_desc_queue_enable(adapter, i);
1388  }
1389 
1390  ixgbevf_configure_msix(adapter);
1391 
1392  spin_lock(&adapter->mbx_lock);
1393 
1394  if (hw->mac.ops.set_rar) {
1395  if (is_valid_ether_addr(hw->mac.addr))
1396  hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1397  else
1398  hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1399  }
1400 
1401  spin_unlock(&adapter->mbx_lock);
1402 
1403  clear_bit(__IXGBEVF_DOWN, &adapter->state);
1404  ixgbevf_napi_enable_all(adapter);
1405 
1406  /* enable transmits */
1407  netif_tx_start_all_queues(netdev);
1408 
1409  ixgbevf_save_reset_stats(adapter);
1410  ixgbevf_init_last_counter_stats(adapter);
1411 
1412  hw->mac.get_link_status = 1;
1413  mod_timer(&adapter->watchdog_timer, jiffies);
1414 }
1415 
1416 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1417 {
1418  struct ixgbe_hw *hw = &adapter->hw;
1419 
1420  ixgbevf_negotiate_api(adapter);
1421 
1422  ixgbevf_configure(adapter);
1423 
1424  ixgbevf_up_complete(adapter);
1425 
1426  /* clear any pending interrupts, may auto mask */
1428 
1429  ixgbevf_irq_enable(adapter);
1430 }
1431 
1437 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1438  struct ixgbevf_ring *rx_ring)
1439 {
1440  struct pci_dev *pdev = adapter->pdev;
1441  unsigned long size;
1442  unsigned int i;
1443 
1444  if (!rx_ring->rx_buffer_info)
1445  return;
1446 
1447  /* Free all the Rx ring sk_buffs */
1448  for (i = 0; i < rx_ring->count; i++) {
1449  struct ixgbevf_rx_buffer *rx_buffer_info;
1450 
1451  rx_buffer_info = &rx_ring->rx_buffer_info[i];
1452  if (rx_buffer_info->dma) {
1453  dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1454  rx_ring->rx_buf_len,
1455  DMA_FROM_DEVICE);
1456  rx_buffer_info->dma = 0;
1457  }
1458  if (rx_buffer_info->skb) {
1459  struct sk_buff *skb = rx_buffer_info->skb;
1460  rx_buffer_info->skb = NULL;
1461  do {
1462  struct sk_buff *this = skb;
1463  skb = IXGBE_CB(skb)->prev;
1464  dev_kfree_skb(this);
1465  } while (skb);
1466  }
1467  }
1468 
1469  size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1470  memset(rx_ring->rx_buffer_info, 0, size);
1471 
1472  /* Zero out the descriptor ring */
1473  memset(rx_ring->desc, 0, rx_ring->size);
1474 
1475  rx_ring->next_to_clean = 0;
1476  rx_ring->next_to_use = 0;
1477 
1478  if (rx_ring->head)
1479  writel(0, adapter->hw.hw_addr + rx_ring->head);
1480  if (rx_ring->tail)
1481  writel(0, adapter->hw.hw_addr + rx_ring->tail);
1482 }
1483 
1489 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1490  struct ixgbevf_ring *tx_ring)
1491 {
1492  struct ixgbevf_tx_buffer *tx_buffer_info;
1493  unsigned long size;
1494  unsigned int i;
1495 
1496  if (!tx_ring->tx_buffer_info)
1497  return;
1498 
1499  /* Free all the Tx ring sk_buffs */
1500 
1501  for (i = 0; i < tx_ring->count; i++) {
1502  tx_buffer_info = &tx_ring->tx_buffer_info[i];
1503  ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1504  }
1505 
1506  size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1507  memset(tx_ring->tx_buffer_info, 0, size);
1508 
1509  memset(tx_ring->desc, 0, tx_ring->size);
1510 
1511  tx_ring->next_to_use = 0;
1512  tx_ring->next_to_clean = 0;
1513 
1514  if (tx_ring->head)
1515  writel(0, adapter->hw.hw_addr + tx_ring->head);
1516  if (tx_ring->tail)
1517  writel(0, adapter->hw.hw_addr + tx_ring->tail);
1518 }
1519 
1524 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1525 {
1526  int i;
1527 
1528  for (i = 0; i < adapter->num_rx_queues; i++)
1529  ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1530 }
1531 
1536 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1537 {
1538  int i;
1539 
1540  for (i = 0; i < adapter->num_tx_queues; i++)
1541  ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1542 }
1543 
1544 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1545 {
1546  struct net_device *netdev = adapter->netdev;
1547  struct ixgbe_hw *hw = &adapter->hw;
1548  u32 txdctl;
1549  int i, j;
1550 
1551  /* signal that we are down to the interrupt handler */
1552  set_bit(__IXGBEVF_DOWN, &adapter->state);
1553  /* disable receives */
1554 
1555  netif_tx_disable(netdev);
1556 
1557  msleep(10);
1558 
1559  netif_tx_stop_all_queues(netdev);
1560 
1561  ixgbevf_irq_disable(adapter);
1562 
1563  ixgbevf_napi_disable_all(adapter);
1564 
1565  del_timer_sync(&adapter->watchdog_timer);
1566  /* can't call flush scheduled work here because it can deadlock
1567  * if linkwatch_event tries to acquire the rtnl_lock which we are
1568  * holding */
1569  while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1570  msleep(1);
1571 
1572  /* disable transmits in the hardware now that interrupts are off */
1573  for (i = 0; i < adapter->num_tx_queues; i++) {
1574  j = adapter->tx_ring[i].reg_idx;
1575  txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1577  (txdctl & ~IXGBE_TXDCTL_ENABLE));
1578  }
1579 
1580  netif_carrier_off(netdev);
1581 
1582  if (!pci_channel_offline(adapter->pdev))
1583  ixgbevf_reset(adapter);
1584 
1585  ixgbevf_clean_all_tx_rings(adapter);
1586  ixgbevf_clean_all_rx_rings(adapter);
1587 }
1588 
1590 {
1591  WARN_ON(in_interrupt());
1592 
1593  while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1594  msleep(1);
1595 
1596  /*
1597  * Check if PF is up before re-init. If not then skip until
1598  * later when the PF is up and ready to service requests from
1599  * the VF via mailbox. If the VF is up and running then the
1600  * watchdog task will continue to schedule reset tasks until
1601  * the PF is up and running.
1602  */
1603  ixgbevf_down(adapter);
1604  ixgbevf_up(adapter);
1605 
1606  clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1607 }
1608 
1609 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1610 {
1611  struct ixgbe_hw *hw = &adapter->hw;
1612  struct net_device *netdev = adapter->netdev;
1613 
1614  spin_lock(&adapter->mbx_lock);
1615 
1616  if (hw->mac.ops.reset_hw(hw))
1617  hw_dbg(hw, "PF still resetting\n");
1618  else
1619  hw->mac.ops.init_hw(hw);
1620 
1621  spin_unlock(&adapter->mbx_lock);
1622 
1623  if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1624  memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1625  netdev->addr_len);
1626  memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1627  netdev->addr_len);
1628  }
1629 }
1630 
1631 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1632  int vectors)
1633 {
1634  int err, vector_threshold;
1635 
1636  /* We'll want at least 2 (vector_threshold):
1637  * 1) TxQ[0] + RxQ[0] handler
1638  * 2) Other (Link Status Change, etc.)
1639  */
1640  vector_threshold = MIN_MSIX_COUNT;
1641 
1642  /* The more we get, the more we will assign to Tx/Rx Cleanup
1643  * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1644  * Right now, we simply care about how many we'll get; we'll
1645  * set them up later while requesting irq's.
1646  */
1647  while (vectors >= vector_threshold) {
1648  err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1649  vectors);
1650  if (!err) /* Success in acquiring all requested vectors. */
1651  break;
1652  else if (err < 0)
1653  vectors = 0; /* Nasty failure, quit now */
1654  else /* err == number of vectors we should try again with */
1655  vectors = err;
1656  }
1657 
1658  if (vectors < vector_threshold) {
1659  /* Can't allocate enough MSI-X interrupts? Oh well.
1660  * This just means we'll go with either a single MSI
1661  * vector or fall back to legacy interrupts.
1662  */
1663  hw_dbg(&adapter->hw,
1664  "Unable to allocate MSI-X interrupts\n");
1665  kfree(adapter->msix_entries);
1666  adapter->msix_entries = NULL;
1667  } else {
1668  /*
1669  * Adjust for only the vectors we'll use, which is minimum
1670  * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1671  * vectors we were allocated.
1672  */
1673  adapter->num_msix_vectors = vectors;
1674  }
1675 }
1676 
1688 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1689 {
1690  /* Start with base case */
1691  adapter->num_rx_queues = 1;
1692  adapter->num_tx_queues = 1;
1693 }
1694 
1703 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1704 {
1705  int i;
1706 
1707  adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1708  sizeof(struct ixgbevf_ring), GFP_KERNEL);
1709  if (!adapter->tx_ring)
1710  goto err_tx_ring_allocation;
1711 
1712  adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1713  sizeof(struct ixgbevf_ring), GFP_KERNEL);
1714  if (!adapter->rx_ring)
1715  goto err_rx_ring_allocation;
1716 
1717  for (i = 0; i < adapter->num_tx_queues; i++) {
1718  adapter->tx_ring[i].count = adapter->tx_ring_count;
1719  adapter->tx_ring[i].queue_index = i;
1720  adapter->tx_ring[i].reg_idx = i;
1721  adapter->tx_ring[i].dev = &adapter->pdev->dev;
1722  adapter->tx_ring[i].netdev = adapter->netdev;
1723  }
1724 
1725  for (i = 0; i < adapter->num_rx_queues; i++) {
1726  adapter->rx_ring[i].count = adapter->rx_ring_count;
1727  adapter->rx_ring[i].queue_index = i;
1728  adapter->rx_ring[i].reg_idx = i;
1729  adapter->rx_ring[i].dev = &adapter->pdev->dev;
1730  adapter->rx_ring[i].netdev = adapter->netdev;
1731  }
1732 
1733  return 0;
1734 
1735 err_rx_ring_allocation:
1736  kfree(adapter->tx_ring);
1737 err_tx_ring_allocation:
1738  return -ENOMEM;
1739 }
1740 
1748 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1749 {
1750  struct net_device *netdev = adapter->netdev;
1751  int err = 0;
1752  int vector, v_budget;
1753 
1754  /*
1755  * It's easy to be greedy for MSI-X vectors, but it really
1756  * doesn't do us much good if we have a lot more vectors
1757  * than CPU's. So let's be conservative and only ask for
1758  * (roughly) the same number of vectors as there are CPU's.
1759  * The default is to use pairs of vectors.
1760  */
1761  v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1762  v_budget = min_t(int, v_budget, num_online_cpus());
1763  v_budget += NON_Q_VECTORS;
1764 
1765  /* A failure in MSI-X entry allocation isn't fatal, but it does
1766  * mean we disable MSI-X capabilities of the adapter. */
1767  adapter->msix_entries = kcalloc(v_budget,
1768  sizeof(struct msix_entry), GFP_KERNEL);
1769  if (!adapter->msix_entries) {
1770  err = -ENOMEM;
1771  goto out;
1772  }
1773 
1774  for (vector = 0; vector < v_budget; vector++)
1775  adapter->msix_entries[vector].entry = vector;
1776 
1777  ixgbevf_acquire_msix_vectors(adapter, v_budget);
1778 
1779  err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1780  if (err)
1781  goto out;
1782 
1783  err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1784 
1785 out:
1786  return err;
1787 }
1788 
1796 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1797 {
1798  int q_idx, num_q_vectors;
1799  struct ixgbevf_q_vector *q_vector;
1800 
1801  num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1802 
1803  for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1804  q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1805  if (!q_vector)
1806  goto err_out;
1807  q_vector->adapter = adapter;
1808  q_vector->v_idx = q_idx;
1809  netif_napi_add(adapter->netdev, &q_vector->napi,
1810  ixgbevf_poll, 64);
1811  adapter->q_vector[q_idx] = q_vector;
1812  }
1813 
1814  return 0;
1815 
1816 err_out:
1817  while (q_idx) {
1818  q_idx--;
1819  q_vector = adapter->q_vector[q_idx];
1820  netif_napi_del(&q_vector->napi);
1821  kfree(q_vector);
1822  adapter->q_vector[q_idx] = NULL;
1823  }
1824  return -ENOMEM;
1825 }
1826 
1835 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1836 {
1837  int q_idx, num_q_vectors;
1838  int napi_vectors;
1839 
1840  num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1841  napi_vectors = adapter->num_rx_queues;
1842 
1843  for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1844  struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1845 
1846  adapter->q_vector[q_idx] = NULL;
1847  if (q_idx < napi_vectors)
1848  netif_napi_del(&q_vector->napi);
1849  kfree(q_vector);
1850  }
1851 }
1852 
1858 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1859 {
1860  pci_disable_msix(adapter->pdev);
1861  kfree(adapter->msix_entries);
1862  adapter->msix_entries = NULL;
1863 }
1864 
1870 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1871 {
1872  int err;
1873 
1874  /* Number of supported queues */
1875  ixgbevf_set_num_queues(adapter);
1876 
1877  err = ixgbevf_set_interrupt_capability(adapter);
1878  if (err) {
1879  hw_dbg(&adapter->hw,
1880  "Unable to setup interrupt capabilities\n");
1881  goto err_set_interrupt;
1882  }
1883 
1884  err = ixgbevf_alloc_q_vectors(adapter);
1885  if (err) {
1886  hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1887  "vectors\n");
1888  goto err_alloc_q_vectors;
1889  }
1890 
1891  err = ixgbevf_alloc_queues(adapter);
1892  if (err) {
1893  pr_err("Unable to allocate memory for queues\n");
1894  goto err_alloc_queues;
1895  }
1896 
1897  hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1898  "Tx Queue count = %u\n",
1899  (adapter->num_rx_queues > 1) ? "Enabled" :
1900  "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1901 
1902  set_bit(__IXGBEVF_DOWN, &adapter->state);
1903 
1904  return 0;
1905 err_alloc_queues:
1906  ixgbevf_free_q_vectors(adapter);
1907 err_alloc_q_vectors:
1908  ixgbevf_reset_interrupt_capability(adapter);
1909 err_set_interrupt:
1910  return err;
1911 }
1912 
1920 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
1921 {
1922  adapter->num_tx_queues = 0;
1923  adapter->num_rx_queues = 0;
1924 
1925  ixgbevf_free_q_vectors(adapter);
1926  ixgbevf_reset_interrupt_capability(adapter);
1927 }
1928 
1938 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1939 {
1940  struct ixgbe_hw *hw = &adapter->hw;
1941  struct pci_dev *pdev = adapter->pdev;
1942  int err;
1943 
1944  /* PCI config space info */
1945 
1946  hw->vendor_id = pdev->vendor;
1947  hw->device_id = pdev->device;
1948  hw->revision_id = pdev->revision;
1951 
1952  hw->mbx.ops.init_params(hw);
1953  hw->mac.max_tx_queues = MAX_TX_QUEUES;
1954  hw->mac.max_rx_queues = MAX_RX_QUEUES;
1955  err = hw->mac.ops.reset_hw(hw);
1956  if (err) {
1957  dev_info(&pdev->dev,
1958  "PF still in reset state, assigning new address\n");
1959  eth_hw_addr_random(adapter->netdev);
1960  memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
1961  adapter->netdev->addr_len);
1962  } else {
1963  err = hw->mac.ops.init_hw(hw);
1964  if (err) {
1965  pr_err("init_shared_code failed: %d\n", err);
1966  goto out;
1967  }
1968  memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
1969  adapter->netdev->addr_len);
1970  }
1971 
1972  /* lock to protect mailbox accesses */
1973  spin_lock_init(&adapter->mbx_lock);
1974 
1975  /* Enable dynamic interrupt throttling rates */
1976  adapter->rx_itr_setting = 1;
1977  adapter->tx_itr_setting = 1;
1978 
1979  /* set default ring sizes */
1982 
1983  set_bit(__IXGBEVF_DOWN, &adapter->state);
1984  return 0;
1985 
1986 out:
1987  return err;
1988 }
1989 
1990 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
1991  { \
1992  u32 current_counter = IXGBE_READ_REG(hw, reg); \
1993  if (current_counter < last_counter) \
1994  counter += 0x100000000LL; \
1995  last_counter = current_counter; \
1996  counter &= 0xFFFFFFFF00000000LL; \
1997  counter |= current_counter; \
1998  }
1999 
2000 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2001  { \
2002  u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2003  u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2004  u64 current_counter = (current_counter_msb << 32) | \
2005  current_counter_lsb; \
2006  if (current_counter < last_counter) \
2007  counter += 0x1000000000LL; \
2008  last_counter = current_counter; \
2009  counter &= 0xFFFFFFF000000000LL; \
2010  counter |= current_counter; \
2011  }
2012 
2017 {
2018  struct ixgbe_hw *hw = &adapter->hw;
2019 
2020  UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2021  adapter->stats.vfgprc);
2022  UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2023  adapter->stats.vfgptc);
2025  adapter->stats.last_vfgorc,
2026  adapter->stats.vfgorc);
2028  adapter->stats.last_vfgotc,
2029  adapter->stats.vfgotc);
2030  UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2031  adapter->stats.vfmprc);
2032 }
2033 
2038 static void ixgbevf_watchdog(unsigned long data)
2039 {
2040  struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2041  struct ixgbe_hw *hw = &adapter->hw;
2042  u32 eics = 0;
2043  int i;
2044 
2045  /*
2046  * Do the watchdog outside of interrupt context due to the lovely
2047  * delays that some of the newer hardware requires
2048  */
2049 
2050  if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2051  goto watchdog_short_circuit;
2052 
2053  /* get one bit for every active tx/rx interrupt vector */
2054  for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2055  struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2056  if (qv->rx.ring || qv->tx.ring)
2057  eics |= 1 << i;
2058  }
2059 
2060  IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2061 
2062 watchdog_short_circuit:
2063  schedule_work(&adapter->watchdog_task);
2064 }
2065 
2070 static void ixgbevf_tx_timeout(struct net_device *netdev)
2071 {
2072  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2073 
2074  /* Do the reset outside of interrupt context */
2075  schedule_work(&adapter->reset_task);
2076 }
2077 
2078 static void ixgbevf_reset_task(struct work_struct *work)
2079 {
2080  struct ixgbevf_adapter *adapter;
2081  adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2082 
2083  /* If we're already down or resetting, just bail */
2084  if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2085  test_bit(__IXGBEVF_RESETTING, &adapter->state))
2086  return;
2087 
2088  adapter->tx_timeout_count++;
2089 
2090  ixgbevf_reinit_locked(adapter);
2091 }
2092 
2097 static void ixgbevf_watchdog_task(struct work_struct *work)
2098 {
2099  struct ixgbevf_adapter *adapter = container_of(work,
2100  struct ixgbevf_adapter,
2101  watchdog_task);
2102  struct net_device *netdev = adapter->netdev;
2103  struct ixgbe_hw *hw = &adapter->hw;
2104  u32 link_speed = adapter->link_speed;
2105  bool link_up = adapter->link_up;
2106 
2107  adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2108 
2109  /*
2110  * Always check the link on the watchdog because we have
2111  * no LSC interrupt
2112  */
2113  if (hw->mac.ops.check_link) {
2114  s32 need_reset;
2115 
2116  spin_lock(&adapter->mbx_lock);
2117 
2118  need_reset = hw->mac.ops.check_link(hw, &link_speed,
2119  &link_up, false);
2120 
2121  spin_unlock(&adapter->mbx_lock);
2122 
2123  if (need_reset) {
2124  adapter->link_up = link_up;
2125  adapter->link_speed = link_speed;
2126  netif_carrier_off(netdev);
2127  netif_tx_stop_all_queues(netdev);
2128  schedule_work(&adapter->reset_task);
2129  goto pf_has_reset;
2130  }
2131  } else {
2132  /* always assume link is up, if no check link
2133  * function */
2134  link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2135  link_up = true;
2136  }
2137  adapter->link_up = link_up;
2138  adapter->link_speed = link_speed;
2139 
2140  if (link_up) {
2141  if (!netif_carrier_ok(netdev)) {
2142  hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2143  (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2144  10 : 1);
2145  netif_carrier_on(netdev);
2146  netif_tx_wake_all_queues(netdev);
2147  }
2148  } else {
2149  adapter->link_up = false;
2150  adapter->link_speed = 0;
2151  if (netif_carrier_ok(netdev)) {
2152  hw_dbg(&adapter->hw, "NIC Link is Down\n");
2153  netif_carrier_off(netdev);
2154  netif_tx_stop_all_queues(netdev);
2155  }
2156  }
2157 
2158  ixgbevf_update_stats(adapter);
2159 
2160 pf_has_reset:
2161  /* Reset the timer */
2162  if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2163  mod_timer(&adapter->watchdog_timer,
2164  round_jiffies(jiffies + (2 * HZ)));
2165 
2166  adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2167 }
2168 
2177  struct ixgbevf_ring *tx_ring)
2178 {
2179  struct pci_dev *pdev = adapter->pdev;
2180 
2181  ixgbevf_clean_tx_ring(adapter, tx_ring);
2182 
2183  vfree(tx_ring->tx_buffer_info);
2184  tx_ring->tx_buffer_info = NULL;
2185 
2186  dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2187  tx_ring->dma);
2188 
2189  tx_ring->desc = NULL;
2190 }
2191 
2198 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2199 {
2200  int i;
2201 
2202  for (i = 0; i < adapter->num_tx_queues; i++)
2203  if (adapter->tx_ring[i].desc)
2204  ixgbevf_free_tx_resources(adapter,
2205  &adapter->tx_ring[i]);
2206 
2207 }
2208 
2217  struct ixgbevf_ring *tx_ring)
2218 {
2219  struct pci_dev *pdev = adapter->pdev;
2220  int size;
2221 
2222  size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2223  tx_ring->tx_buffer_info = vzalloc(size);
2224  if (!tx_ring->tx_buffer_info)
2225  goto err;
2226 
2227  /* round up to nearest 4K */
2228  tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2229  tx_ring->size = ALIGN(tx_ring->size, 4096);
2230 
2231  tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2232  &tx_ring->dma, GFP_KERNEL);
2233  if (!tx_ring->desc)
2234  goto err;
2235 
2236  tx_ring->next_to_use = 0;
2237  tx_ring->next_to_clean = 0;
2238  return 0;
2239 
2240 err:
2241  vfree(tx_ring->tx_buffer_info);
2242  tx_ring->tx_buffer_info = NULL;
2243  hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2244  "descriptor ring\n");
2245  return -ENOMEM;
2246 }
2247 
2258 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2259 {
2260  int i, err = 0;
2261 
2262  for (i = 0; i < adapter->num_tx_queues; i++) {
2263  err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2264  if (!err)
2265  continue;
2266  hw_dbg(&adapter->hw,
2267  "Allocation for Tx Queue %u failed\n", i);
2268  break;
2269  }
2270 
2271  return err;
2272 }
2273 
2282  struct ixgbevf_ring *rx_ring)
2283 {
2284  struct pci_dev *pdev = adapter->pdev;
2285  int size;
2286 
2287  size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2288  rx_ring->rx_buffer_info = vzalloc(size);
2289  if (!rx_ring->rx_buffer_info)
2290  goto alloc_failed;
2291 
2292  /* Round up to nearest 4K */
2293  rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2294  rx_ring->size = ALIGN(rx_ring->size, 4096);
2295 
2296  rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2297  &rx_ring->dma, GFP_KERNEL);
2298 
2299  if (!rx_ring->desc) {
2300  hw_dbg(&adapter->hw,
2301  "Unable to allocate memory for "
2302  "the receive descriptor ring\n");
2303  vfree(rx_ring->rx_buffer_info);
2304  rx_ring->rx_buffer_info = NULL;
2305  goto alloc_failed;
2306  }
2307 
2308  rx_ring->next_to_clean = 0;
2309  rx_ring->next_to_use = 0;
2310 
2311  return 0;
2312 alloc_failed:
2313  return -ENOMEM;
2314 }
2315 
2326 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2327 {
2328  int i, err = 0;
2329 
2330  for (i = 0; i < adapter->num_rx_queues; i++) {
2331  err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2332  if (!err)
2333  continue;
2334  hw_dbg(&adapter->hw,
2335  "Allocation for Rx Queue %u failed\n", i);
2336  break;
2337  }
2338  return err;
2339 }
2340 
2349  struct ixgbevf_ring *rx_ring)
2350 {
2351  struct pci_dev *pdev = adapter->pdev;
2352 
2353  ixgbevf_clean_rx_ring(adapter, rx_ring);
2354 
2355  vfree(rx_ring->rx_buffer_info);
2356  rx_ring->rx_buffer_info = NULL;
2357 
2358  dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2359  rx_ring->dma);
2360 
2361  rx_ring->desc = NULL;
2362 }
2363 
2370 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2371 {
2372  int i;
2373 
2374  for (i = 0; i < adapter->num_rx_queues; i++)
2375  if (adapter->rx_ring[i].desc)
2376  ixgbevf_free_rx_resources(adapter,
2377  &adapter->rx_ring[i]);
2378 }
2379 
2392 static int ixgbevf_open(struct net_device *netdev)
2393 {
2394  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2395  struct ixgbe_hw *hw = &adapter->hw;
2396  int err;
2397 
2398  /* disallow open during test */
2399  if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2400  return -EBUSY;
2401 
2402  if (hw->adapter_stopped) {
2403  ixgbevf_reset(adapter);
2404  /* if adapter is still stopped then PF isn't up and
2405  * the vf can't start. */
2406  if (hw->adapter_stopped) {
2407  err = IXGBE_ERR_MBX;
2408  pr_err("Unable to start - perhaps the PF Driver isn't "
2409  "up yet\n");
2410  goto err_setup_reset;
2411  }
2412  }
2413 
2414  ixgbevf_negotiate_api(adapter);
2415 
2416  /* allocate transmit descriptors */
2417  err = ixgbevf_setup_all_tx_resources(adapter);
2418  if (err)
2419  goto err_setup_tx;
2420 
2421  /* allocate receive descriptors */
2422  err = ixgbevf_setup_all_rx_resources(adapter);
2423  if (err)
2424  goto err_setup_rx;
2425 
2426  ixgbevf_configure(adapter);
2427 
2428  /*
2429  * Map the Tx/Rx rings to the vectors we were allotted.
2430  * if request_irq will be called in this function map_rings
2431  * must be called *before* up_complete
2432  */
2433  ixgbevf_map_rings_to_vectors(adapter);
2434 
2435  ixgbevf_up_complete(adapter);
2436 
2437  /* clear any pending interrupts, may auto mask */
2439  err = ixgbevf_request_irq(adapter);
2440  if (err)
2441  goto err_req_irq;
2442 
2443  ixgbevf_irq_enable(adapter);
2444 
2445  return 0;
2446 
2447 err_req_irq:
2448  ixgbevf_down(adapter);
2449  ixgbevf_free_irq(adapter);
2450 err_setup_rx:
2451  ixgbevf_free_all_rx_resources(adapter);
2452 err_setup_tx:
2453  ixgbevf_free_all_tx_resources(adapter);
2454  ixgbevf_reset(adapter);
2455 
2456 err_setup_reset:
2457 
2458  return err;
2459 }
2460 
2472 static int ixgbevf_close(struct net_device *netdev)
2473 {
2474  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2475 
2476  ixgbevf_down(adapter);
2477  ixgbevf_free_irq(adapter);
2478 
2479  ixgbevf_free_all_tx_resources(adapter);
2480  ixgbevf_free_all_rx_resources(adapter);
2481 
2482  return 0;
2483 }
2484 
2485 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2486  u32 vlan_macip_lens, u32 type_tucmd,
2487  u32 mss_l4len_idx)
2488 {
2489  struct ixgbe_adv_tx_context_desc *context_desc;
2490  u16 i = tx_ring->next_to_use;
2491 
2492  context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2493 
2494  i++;
2495  tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2496 
2497  /* set bits to identify this as an advanced context descriptor */
2499 
2500  context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2501  context_desc->seqnum_seed = 0;
2502  context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2503  context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2504 }
2505 
2506 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2507  struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2508 {
2509  u32 vlan_macip_lens, type_tucmd;
2510  u32 mss_l4len_idx, l4len;
2511 
2512  if (!skb_is_gso(skb))
2513  return 0;
2514 
2515  if (skb_header_cloned(skb)) {
2516  int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2517  if (err)
2518  return err;
2519  }
2520 
2521  /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2522  type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2523 
2524  if (skb->protocol == htons(ETH_P_IP)) {
2525  struct iphdr *iph = ip_hdr(skb);
2526  iph->tot_len = 0;
2527  iph->check = 0;
2528  tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2529  iph->daddr, 0,
2530  IPPROTO_TCP,
2531  0);
2532  type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2533  } else if (skb_is_gso_v6(skb)) {
2534  ipv6_hdr(skb)->payload_len = 0;
2535  tcp_hdr(skb)->check =
2536  ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2537  &ipv6_hdr(skb)->daddr,
2538  0, IPPROTO_TCP, 0);
2539  }
2540 
2541  /* compute header lengths */
2542  l4len = tcp_hdrlen(skb);
2543  *hdr_len += l4len;
2544  *hdr_len = skb_transport_offset(skb) + l4len;
2545 
2546  /* mss_l4len_id: use 1 as index for TSO */
2547  mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2548  mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2549  mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2550 
2551  /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2552  vlan_macip_lens = skb_network_header_len(skb);
2553  vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2554  vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2555 
2556  ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2557  type_tucmd, mss_l4len_idx);
2558 
2559  return 1;
2560 }
2561 
2562 static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2563  struct sk_buff *skb, u32 tx_flags)
2564 {
2565 
2566 
2567 
2568  u32 vlan_macip_lens = 0;
2569  u32 mss_l4len_idx = 0;
2570  u32 type_tucmd = 0;
2571 
2572  if (skb->ip_summed == CHECKSUM_PARTIAL) {
2573  u8 l4_hdr = 0;
2574  switch (skb->protocol) {
2575  case __constant_htons(ETH_P_IP):
2576  vlan_macip_lens |= skb_network_header_len(skb);
2577  type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2578  l4_hdr = ip_hdr(skb)->protocol;
2579  break;
2581  vlan_macip_lens |= skb_network_header_len(skb);
2582  l4_hdr = ipv6_hdr(skb)->nexthdr;
2583  break;
2584  default:
2585  if (unlikely(net_ratelimit())) {
2586  dev_warn(tx_ring->dev,
2587  "partial checksum but proto=%x!\n",
2588  skb->protocol);
2589  }
2590  break;
2591  }
2592 
2593  switch (l4_hdr) {
2594  case IPPROTO_TCP:
2595  type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2596  mss_l4len_idx = tcp_hdrlen(skb) <<
2598  break;
2599  case IPPROTO_SCTP:
2600  type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2601  mss_l4len_idx = sizeof(struct sctphdr) <<
2603  break;
2604  case IPPROTO_UDP:
2605  mss_l4len_idx = sizeof(struct udphdr) <<
2606  IXGBE_ADVTXD_L4LEN_SHIFT;
2607  break;
2608  default:
2609  if (unlikely(net_ratelimit())) {
2610  dev_warn(tx_ring->dev,
2611  "partial checksum but l4 proto=%x!\n",
2612  l4_hdr);
2613  }
2614  break;
2615  }
2616  }
2617 
2618  /* vlan_macip_lens: MACLEN, VLAN tag */
2619  vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2620  vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2621 
2622  ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2623  type_tucmd, mss_l4len_idx);
2624 
2625  return (skb->ip_summed == CHECKSUM_PARTIAL);
2626 }
2627 
2628 static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2629  struct sk_buff *skb, u32 tx_flags,
2630  unsigned int first)
2631 {
2632  struct ixgbevf_tx_buffer *tx_buffer_info;
2633  unsigned int len;
2634  unsigned int total = skb->len;
2635  unsigned int offset = 0, size;
2636  int count = 0;
2637  unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2638  unsigned int f;
2639  int i;
2640 
2641  i = tx_ring->next_to_use;
2642 
2643  len = min(skb_headlen(skb), total);
2644  while (len) {
2645  tx_buffer_info = &tx_ring->tx_buffer_info[i];
2646  size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2647 
2648  tx_buffer_info->length = size;
2649  tx_buffer_info->mapped_as_page = false;
2650  tx_buffer_info->dma = dma_map_single(tx_ring->dev,
2651  skb->data + offset,
2652  size, DMA_TO_DEVICE);
2653  if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2654  goto dma_error;
2655  tx_buffer_info->next_to_watch = i;
2656 
2657  len -= size;
2658  total -= size;
2659  offset += size;
2660  count++;
2661  i++;
2662  if (i == tx_ring->count)
2663  i = 0;
2664  }
2665 
2666  for (f = 0; f < nr_frags; f++) {
2667  const struct skb_frag_struct *frag;
2668 
2669  frag = &skb_shinfo(skb)->frags[f];
2670  len = min((unsigned int)skb_frag_size(frag), total);
2671  offset = 0;
2672 
2673  while (len) {
2674  tx_buffer_info = &tx_ring->tx_buffer_info[i];
2675  size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2676 
2677  tx_buffer_info->length = size;
2678  tx_buffer_info->dma =
2679  skb_frag_dma_map(tx_ring->dev, frag,
2680  offset, size, DMA_TO_DEVICE);
2681  tx_buffer_info->mapped_as_page = true;
2682  if (dma_mapping_error(tx_ring->dev,
2683  tx_buffer_info->dma))
2684  goto dma_error;
2685  tx_buffer_info->next_to_watch = i;
2686 
2687  len -= size;
2688  total -= size;
2689  offset += size;
2690  count++;
2691  i++;
2692  if (i == tx_ring->count)
2693  i = 0;
2694  }
2695  if (total == 0)
2696  break;
2697  }
2698 
2699  if (i == 0)
2700  i = tx_ring->count - 1;
2701  else
2702  i = i - 1;
2703  tx_ring->tx_buffer_info[i].skb = skb;
2704  tx_ring->tx_buffer_info[first].next_to_watch = i;
2705  tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2706 
2707  return count;
2708 
2709 dma_error:
2710  dev_err(tx_ring->dev, "TX DMA map failed\n");
2711 
2712  /* clear timestamp and dma mappings for failed tx_buffer_info map */
2713  tx_buffer_info->dma = 0;
2714  tx_buffer_info->next_to_watch = 0;
2715  count--;
2716 
2717  /* clear timestamp and dma mappings for remaining portion of packet */
2718  while (count >= 0) {
2719  count--;
2720  i--;
2721  if (i < 0)
2722  i += tx_ring->count;
2723  tx_buffer_info = &tx_ring->tx_buffer_info[i];
2724  ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2725  }
2726 
2727  return count;
2728 }
2729 
2730 static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2731  int count, u32 paylen, u8 hdr_len)
2732 {
2733  union ixgbe_adv_tx_desc *tx_desc = NULL;
2734  struct ixgbevf_tx_buffer *tx_buffer_info;
2735  u32 olinfo_status = 0, cmd_type_len = 0;
2736  unsigned int i;
2737 
2739 
2740  cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2741 
2743 
2744  if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2745  cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2746 
2747  if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2748  olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2749 
2750  if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2751  cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2752 
2753  /* use index 1 context for tso */
2754  olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2755  if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2756  olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2757 
2758  }
2759 
2760  /*
2761  * Check Context must be set if Tx switch is enabled, which it
2762  * always is for case where virtual functions are running
2763  */
2764  olinfo_status |= IXGBE_ADVTXD_CC;
2765 
2766  olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2767 
2768  i = tx_ring->next_to_use;
2769  while (count--) {
2770  tx_buffer_info = &tx_ring->tx_buffer_info[i];
2771  tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2772  tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2773  tx_desc->read.cmd_type_len =
2774  cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2775  tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2776  i++;
2777  if (i == tx_ring->count)
2778  i = 0;
2779  }
2780 
2781  tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2782 
2783  tx_ring->next_to_use = i;
2784 }
2785 
2786 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2787 {
2788  struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
2789 
2790  netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2791  /* Herbert's original patch had:
2792  * smp_mb__after_netif_stop_queue();
2793  * but since that doesn't exist yet, just open code it. */
2794  smp_mb();
2795 
2796  /* We need to check again in a case another CPU has just
2797  * made room available. */
2798  if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2799  return -EBUSY;
2800 
2801  /* A reprieve! - use start_queue because it doesn't call schedule */
2802  netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2803  ++adapter->restart_queue;
2804  return 0;
2805 }
2806 
2807 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
2808 {
2809  if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2810  return 0;
2811  return __ixgbevf_maybe_stop_tx(tx_ring, size);
2812 }
2813 
2814 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2815 {
2816  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2817  struct ixgbevf_ring *tx_ring;
2818  unsigned int first;
2819  unsigned int tx_flags = 0;
2820  u8 hdr_len = 0;
2821  int r_idx = 0, tso;
2822  u16 count = TXD_USE_COUNT(skb_headlen(skb));
2823 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2824  unsigned short f;
2825 #endif
2826 
2827  tx_ring = &adapter->tx_ring[r_idx];
2828 
2829  /*
2830  * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2831  * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2832  * + 2 desc gap to keep tail from touching head,
2833  * + 1 desc for context descriptor,
2834  * otherwise try next time
2835  */
2836 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2837  for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2838  count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2839 #else
2840  count += skb_shinfo(skb)->nr_frags;
2841 #endif
2842  if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
2843  adapter->tx_busy++;
2844  return NETDEV_TX_BUSY;
2845  }
2846 
2847  if (vlan_tx_tag_present(skb)) {
2848  tx_flags |= vlan_tx_tag_get(skb);
2849  tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2850  tx_flags |= IXGBE_TX_FLAGS_VLAN;
2851  }
2852 
2853  first = tx_ring->next_to_use;
2854 
2855  if (skb->protocol == htons(ETH_P_IP))
2856  tx_flags |= IXGBE_TX_FLAGS_IPV4;
2857  tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
2858  if (tso < 0) {
2859  dev_kfree_skb_any(skb);
2860  return NETDEV_TX_OK;
2861  }
2862 
2863  if (tso)
2864  tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
2865  else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
2866  tx_flags |= IXGBE_TX_FLAGS_CSUM;
2867 
2868  ixgbevf_tx_queue(tx_ring, tx_flags,
2869  ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
2870  skb->len, hdr_len);
2871  /*
2872  * Force memory writes to complete before letting h/w
2873  * know there are new descriptors to fetch. (Only
2874  * applicable for weak-ordered memory model archs,
2875  * such as IA-64).
2876  */
2877  wmb();
2878 
2879  writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
2880 
2881  ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
2882 
2883  return NETDEV_TX_OK;
2884 }
2885 
2893 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2894 {
2895  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2896  struct ixgbe_hw *hw = &adapter->hw;
2897  struct sockaddr *addr = p;
2898 
2899  if (!is_valid_ether_addr(addr->sa_data))
2900  return -EADDRNOTAVAIL;
2901 
2902  memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2903  memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2904 
2905  spin_lock(&adapter->mbx_lock);
2906 
2907  if (hw->mac.ops.set_rar)
2908  hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2909 
2910  spin_unlock(&adapter->mbx_lock);
2911 
2912  return 0;
2913 }
2914 
2922 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2923 {
2924  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2925  int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2926  int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2927 
2928  if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2929  max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
2930 
2931  /* MTU < 68 is an error and causes problems on some kernels */
2932  if ((new_mtu < 68) || (max_frame > max_possible_frame))
2933  return -EINVAL;
2934 
2935  hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
2936  netdev->mtu, new_mtu);
2937  /* must set new MTU before calling down or up */
2938  netdev->mtu = new_mtu;
2939 
2940  if (netif_running(netdev))
2941  ixgbevf_reinit_locked(adapter);
2942 
2943  return 0;
2944 }
2945 
2946 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
2947 {
2948  struct net_device *netdev = pci_get_drvdata(pdev);
2949  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2950 #ifdef CONFIG_PM
2951  int retval = 0;
2952 #endif
2953 
2954  netif_device_detach(netdev);
2955 
2956  if (netif_running(netdev)) {
2957  rtnl_lock();
2958  ixgbevf_down(adapter);
2959  ixgbevf_free_irq(adapter);
2960  ixgbevf_free_all_tx_resources(adapter);
2961  ixgbevf_free_all_rx_resources(adapter);
2962  rtnl_unlock();
2963  }
2964 
2965  ixgbevf_clear_interrupt_scheme(adapter);
2966 
2967 #ifdef CONFIG_PM
2968  retval = pci_save_state(pdev);
2969  if (retval)
2970  return retval;
2971 
2972 #endif
2973  pci_disable_device(pdev);
2974 
2975  return 0;
2976 }
2977 
2978 #ifdef CONFIG_PM
2979 static int ixgbevf_resume(struct pci_dev *pdev)
2980 {
2981  struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
2982  struct net_device *netdev = adapter->netdev;
2983  u32 err;
2984 
2985  pci_set_power_state(pdev, PCI_D0);
2986  pci_restore_state(pdev);
2987  /*
2988  * pci_restore_state clears dev->state_saved so call
2989  * pci_save_state to restore it.
2990  */
2991  pci_save_state(pdev);
2992 
2993  err = pci_enable_device_mem(pdev);
2994  if (err) {
2995  dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2996  return err;
2997  }
2998  pci_set_master(pdev);
2999 
3000  rtnl_lock();
3001  err = ixgbevf_init_interrupt_scheme(adapter);
3002  rtnl_unlock();
3003  if (err) {
3004  dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3005  return err;
3006  }
3007 
3008  ixgbevf_reset(adapter);
3009 
3010  if (netif_running(netdev)) {
3011  err = ixgbevf_open(netdev);
3012  if (err)
3013  return err;
3014  }
3015 
3016  netif_device_attach(netdev);
3017 
3018  return err;
3019 }
3020 
3021 #endif /* CONFIG_PM */
3022 static void ixgbevf_shutdown(struct pci_dev *pdev)
3023 {
3024  ixgbevf_suspend(pdev, PMSG_SUSPEND);
3025 }
3026 
3027 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3028  struct rtnl_link_stats64 *stats)
3029 {
3030  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3031  unsigned int start;
3032  u64 bytes, packets;
3033  const struct ixgbevf_ring *ring;
3034  int i;
3035 
3036  ixgbevf_update_stats(adapter);
3037 
3038  stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3039 
3040  for (i = 0; i < adapter->num_rx_queues; i++) {
3041  ring = &adapter->rx_ring[i];
3042  do {
3043  start = u64_stats_fetch_begin_bh(&ring->syncp);
3044  bytes = ring->total_bytes;
3045  packets = ring->total_packets;
3046  } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3047  stats->rx_bytes += bytes;
3048  stats->rx_packets += packets;
3049  }
3050 
3051  for (i = 0; i < adapter->num_tx_queues; i++) {
3052  ring = &adapter->tx_ring[i];
3053  do {
3054  start = u64_stats_fetch_begin_bh(&ring->syncp);
3055  bytes = ring->total_bytes;
3056  packets = ring->total_packets;
3057  } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3058  stats->tx_bytes += bytes;
3059  stats->tx_packets += packets;
3060  }
3061 
3062  return stats;
3063 }
3064 
3065 static const struct net_device_ops ixgbevf_netdev_ops = {
3066  .ndo_open = ixgbevf_open,
3067  .ndo_stop = ixgbevf_close,
3068  .ndo_start_xmit = ixgbevf_xmit_frame,
3069  .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3070  .ndo_get_stats64 = ixgbevf_get_stats,
3071  .ndo_validate_addr = eth_validate_addr,
3072  .ndo_set_mac_address = ixgbevf_set_mac,
3073  .ndo_change_mtu = ixgbevf_change_mtu,
3074  .ndo_tx_timeout = ixgbevf_tx_timeout,
3075  .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3076  .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3077 };
3078 
3079 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3080 {
3081  dev->netdev_ops = &ixgbevf_netdev_ops;
3083  dev->watchdog_timeo = 5 * HZ;
3084 }
3085 
3097 static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3098  const struct pci_device_id *ent)
3099 {
3100  struct net_device *netdev;
3101  struct ixgbevf_adapter *adapter = NULL;
3102  struct ixgbe_hw *hw = NULL;
3103  const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3104  static int cards_found;
3105  int err, pci_using_dac;
3106 
3107  err = pci_enable_device(pdev);
3108  if (err)
3109  return err;
3110 
3111  if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3112  !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3113  pci_using_dac = 1;
3114  } else {
3115  err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3116  if (err) {
3117  err = dma_set_coherent_mask(&pdev->dev,
3118  DMA_BIT_MASK(32));
3119  if (err) {
3120  dev_err(&pdev->dev, "No usable DMA "
3121  "configuration, aborting\n");
3122  goto err_dma;
3123  }
3124  }
3125  pci_using_dac = 0;
3126  }
3127 
3129  if (err) {
3130  dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3131  goto err_pci_reg;
3132  }
3133 
3134  pci_set_master(pdev);
3135 
3136  netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3137  MAX_TX_QUEUES);
3138  if (!netdev) {
3139  err = -ENOMEM;
3140  goto err_alloc_etherdev;
3141  }
3142 
3143  SET_NETDEV_DEV(netdev, &pdev->dev);
3144 
3145  pci_set_drvdata(pdev, netdev);
3146  adapter = netdev_priv(netdev);
3147 
3148  adapter->netdev = netdev;
3149  adapter->pdev = pdev;
3150  hw = &adapter->hw;
3151  hw->back = adapter;
3152  adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3153 
3154  /*
3155  * call save state here in standalone driver because it relies on
3156  * adapter struct to exist, and needs to call netdev_priv
3157  */
3158  pci_save_state(pdev);
3159 
3160  hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3161  pci_resource_len(pdev, 0));
3162  if (!hw->hw_addr) {
3163  err = -EIO;
3164  goto err_ioremap;
3165  }
3166 
3167  ixgbevf_assign_netdev_ops(netdev);
3168 
3169  adapter->bd_number = cards_found;
3170 
3171  /* Setup hw api */
3172  memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3173  hw->mac.type = ii->mac;
3174 
3175  memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3176  sizeof(struct ixgbe_mbx_operations));
3177 
3178  /* setup the private structure */
3179  err = ixgbevf_sw_init(adapter);
3180  if (err)
3181  goto err_sw_init;
3182 
3183  /* The HW MAC address was set and/or determined in sw_init */
3184  memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3185 
3186  if (!is_valid_ether_addr(netdev->dev_addr)) {
3187  pr_err("invalid MAC address\n");
3188  err = -EIO;
3189  goto err_sw_init;
3190  }
3191 
3192  netdev->hw_features = NETIF_F_SG |
3193  NETIF_F_IP_CSUM |
3195  NETIF_F_TSO |
3196  NETIF_F_TSO6 |
3198 
3199  netdev->features = netdev->hw_features |
3203 
3204  netdev->vlan_features |= NETIF_F_TSO;
3205  netdev->vlan_features |= NETIF_F_TSO6;
3206  netdev->vlan_features |= NETIF_F_IP_CSUM;
3207  netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3208  netdev->vlan_features |= NETIF_F_SG;
3209 
3210  if (pci_using_dac)
3211  netdev->features |= NETIF_F_HIGHDMA;
3212 
3213  netdev->priv_flags |= IFF_UNICAST_FLT;
3214 
3215  init_timer(&adapter->watchdog_timer);
3216  adapter->watchdog_timer.function = ixgbevf_watchdog;
3217  adapter->watchdog_timer.data = (unsigned long)adapter;
3218 
3219  INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3220  INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3221 
3222  err = ixgbevf_init_interrupt_scheme(adapter);
3223  if (err)
3224  goto err_sw_init;
3225 
3226  /* pick up the PCI bus settings for reporting later */
3227  if (hw->mac.ops.get_bus_info)
3228  hw->mac.ops.get_bus_info(hw);
3229 
3230  strcpy(netdev->name, "eth%d");
3231 
3232  err = register_netdev(netdev);
3233  if (err)
3234  goto err_register;
3235 
3236  netif_carrier_off(netdev);
3237 
3238  ixgbevf_init_last_counter_stats(adapter);
3239 
3240  /* print the MAC address */
3241  hw_dbg(hw, "%pM\n", netdev->dev_addr);
3242 
3243  hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3244 
3245  hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3246  cards_found++;
3247  return 0;
3248 
3249 err_register:
3250  ixgbevf_clear_interrupt_scheme(adapter);
3251 err_sw_init:
3252  ixgbevf_reset_interrupt_capability(adapter);
3253  iounmap(hw->hw_addr);
3254 err_ioremap:
3255  free_netdev(netdev);
3256 err_alloc_etherdev:
3257  pci_release_regions(pdev);
3258 err_pci_reg:
3259 err_dma:
3260  pci_disable_device(pdev);
3261  return err;
3262 }
3263 
3273 static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3274 {
3275  struct net_device *netdev = pci_get_drvdata(pdev);
3276  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3277 
3278  set_bit(__IXGBEVF_DOWN, &adapter->state);
3279 
3280  del_timer_sync(&adapter->watchdog_timer);
3281 
3282  cancel_work_sync(&adapter->reset_task);
3283  cancel_work_sync(&adapter->watchdog_task);
3284 
3285  if (netdev->reg_state == NETREG_REGISTERED)
3286  unregister_netdev(netdev);
3287 
3288  ixgbevf_clear_interrupt_scheme(adapter);
3289  ixgbevf_reset_interrupt_capability(adapter);
3290 
3291  iounmap(adapter->hw.hw_addr);
3292  pci_release_regions(pdev);
3293 
3294  hw_dbg(&adapter->hw, "Remove complete\n");
3295 
3296  kfree(adapter->tx_ring);
3297  kfree(adapter->rx_ring);
3298 
3299  free_netdev(netdev);
3300 
3301  pci_disable_device(pdev);
3302 }
3303 
3312 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3313  pci_channel_state_t state)
3314 {
3315  struct net_device *netdev = pci_get_drvdata(pdev);
3316  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3317 
3318  netif_device_detach(netdev);
3319 
3320  if (state == pci_channel_io_perm_failure)
3322 
3323  if (netif_running(netdev))
3324  ixgbevf_down(adapter);
3325 
3326  pci_disable_device(pdev);
3327 
3328  /* Request a slot slot reset. */
3330 }
3331 
3339 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3340 {
3341  struct net_device *netdev = pci_get_drvdata(pdev);
3342  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3343 
3344  if (pci_enable_device_mem(pdev)) {
3345  dev_err(&pdev->dev,
3346  "Cannot re-enable PCI device after reset.\n");
3348  }
3349 
3350  pci_set_master(pdev);
3351 
3352  ixgbevf_reset(adapter);
3353 
3354  return PCI_ERS_RESULT_RECOVERED;
3355 }
3356 
3365 static void ixgbevf_io_resume(struct pci_dev *pdev)
3366 {
3367  struct net_device *netdev = pci_get_drvdata(pdev);
3368  struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3369 
3370  if (netif_running(netdev))
3371  ixgbevf_up(adapter);
3372 
3373  netif_device_attach(netdev);
3374 }
3375 
3376 /* PCI Error Recovery (ERS) */
3377 static const struct pci_error_handlers ixgbevf_err_handler = {
3378  .error_detected = ixgbevf_io_error_detected,
3379  .slot_reset = ixgbevf_io_slot_reset,
3380  .resume = ixgbevf_io_resume,
3381 };
3382 
3383 static struct pci_driver ixgbevf_driver = {
3384  .name = ixgbevf_driver_name,
3385  .id_table = ixgbevf_pci_tbl,
3386  .probe = ixgbevf_probe,
3387  .remove = __devexit_p(ixgbevf_remove),
3388 #ifdef CONFIG_PM
3389  /* Power Management Hooks */
3390  .suspend = ixgbevf_suspend,
3391  .resume = ixgbevf_resume,
3392 #endif
3393  .shutdown = ixgbevf_shutdown,
3394  .err_handler = &ixgbevf_err_handler
3395 };
3396 
3403 static int __init ixgbevf_init_module(void)
3404 {
3405  int ret;
3406  pr_info("%s - version %s\n", ixgbevf_driver_string,
3408 
3409  pr_info("%s\n", ixgbevf_copyright);
3410 
3411  ret = pci_register_driver(&ixgbevf_driver);
3412  return ret;
3413 }
3414 
3415 module_init(ixgbevf_init_module);
3416 
3423 static void __exit ixgbevf_exit_module(void)
3424 {
3425  pci_unregister_driver(&ixgbevf_driver);
3426 }
3427 
3428 #ifdef DEBUG
3429 
3433 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3434 {
3435  struct ixgbevf_adapter *adapter = hw->back;
3436  return adapter->netdev->name;
3437 }
3438 
3439 #endif
3440 module_exit(ixgbevf_exit_module);
3441 
3442 /* ixgbevf_main.c */