Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ixgb_main.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel PRO/10GbE Linux driver
4  Copyright(c) 1999 - 2008 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  Linux NICS <[email protected]>
24  e1000-devel Mailing List <[email protected]>
25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include <linux/prefetch.h>
32 #include "ixgb.h"
33 
34 char ixgb_driver_name[] = "ixgb";
35 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
36 
37 #define DRIVERNAPI "-NAPI"
38 #define DRV_VERSION "1.0.135-k2" DRIVERNAPI
40 static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
41 
42 #define IXGB_CB_LENGTH 256
43 static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
46  "Maximum size of packet that is copied to a new buffer on receive");
47 
48 /* ixgb_pci_tbl - PCI Device ID Table
49  *
50  * Wildcard entries (PCI_ANY_ID) should come last
51  * Last entry must be all 0s
52  *
53  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
54  * Class, Class Mask, private data (not used) }
55  */
56 static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
58  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
60  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
62  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
64  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
65 
66  /* required last entry */
67  {0,}
68 };
69 
70 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
71 
72 /* Local Function Prototypes */
73 static int ixgb_init_module(void);
74 static void ixgb_exit_module(void);
75 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
76 static void __devexit ixgb_remove(struct pci_dev *pdev);
77 static int ixgb_sw_init(struct ixgb_adapter *adapter);
78 static int ixgb_open(struct net_device *netdev);
79 static int ixgb_close(struct net_device *netdev);
80 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
81 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
82 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
83 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
84 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
85 static void ixgb_set_multi(struct net_device *netdev);
86 static void ixgb_watchdog(unsigned long data);
87 static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
88  struct net_device *netdev);
89 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
90 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
91 static int ixgb_set_mac(struct net_device *netdev, void *p);
92 static irqreturn_t ixgb_intr(int irq, void *data);
93 static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
94 
95 static int ixgb_clean(struct napi_struct *, int);
96 static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
97 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
98 
99 static void ixgb_tx_timeout(struct net_device *dev);
100 static void ixgb_tx_timeout_task(struct work_struct *work);
101 
102 static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
103 static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
104 static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
105 static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
106 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
107 
108 #ifdef CONFIG_NET_POLL_CONTROLLER
109 /* for netdump / net console */
110 static void ixgb_netpoll(struct net_device *dev);
111 #endif
112 
113 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
114  enum pci_channel_state state);
115 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
116 static void ixgb_io_resume (struct pci_dev *pdev);
117 
118 static const struct pci_error_handlers ixgb_err_handler = {
119  .error_detected = ixgb_io_error_detected,
120  .slot_reset = ixgb_io_slot_reset,
121  .resume = ixgb_io_resume,
122 };
123 
124 static struct pci_driver ixgb_driver = {
125  .name = ixgb_driver_name,
126  .id_table = ixgb_pci_tbl,
127  .probe = ixgb_probe,
128  .remove = __devexit_p(ixgb_remove),
129  .err_handler = &ixgb_err_handler
130 };
131 
132 MODULE_AUTHOR("Intel Corporation, <[email protected]>");
133 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
134 MODULE_LICENSE("GPL");
136 
137 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
138 static int debug = -1;
139 module_param(debug, int, 0);
140 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
141 
149 static int __init
150 ixgb_init_module(void)
151 {
152  pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
153  pr_info("%s\n", ixgb_copyright);
154 
155  return pci_register_driver(&ixgb_driver);
156 }
157 
158 module_init(ixgb_init_module);
159 
167 static void __exit
168 ixgb_exit_module(void)
169 {
170  pci_unregister_driver(&ixgb_driver);
171 }
172 
173 module_exit(ixgb_exit_module);
174 
180 static void
181 ixgb_irq_disable(struct ixgb_adapter *adapter)
182 {
183  IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
184  IXGB_WRITE_FLUSH(&adapter->hw);
185  synchronize_irq(adapter->pdev->irq);
186 }
187 
193 static void
194 ixgb_irq_enable(struct ixgb_adapter *adapter)
195 {
198  if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
199  val |= IXGB_INT_GPI0;
200  IXGB_WRITE_REG(&adapter->hw, IMS, val);
201  IXGB_WRITE_FLUSH(&adapter->hw);
202 }
203 
204 int
205 ixgb_up(struct ixgb_adapter *adapter)
206 {
207  struct net_device *netdev = adapter->netdev;
208  int err, irq_flags = IRQF_SHARED;
209  int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
210  struct ixgb_hw *hw = &adapter->hw;
211 
212  /* hardware has been reset, we need to reload some things */
213 
214  ixgb_rar_set(hw, netdev->dev_addr, 0);
215  ixgb_set_multi(netdev);
216 
217  ixgb_restore_vlan(adapter);
218 
219  ixgb_configure_tx(adapter);
220  ixgb_setup_rctl(adapter);
221  ixgb_configure_rx(adapter);
222  ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
223 
224  /* disable interrupts and get the hardware into a known state */
225  IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
226 
227  /* only enable MSI if bus is in PCI-X mode */
228  if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
229  err = pci_enable_msi(adapter->pdev);
230  if (!err) {
231  adapter->have_msi = true;
232  irq_flags = 0;
233  }
234  /* proceed to try to request regular interrupt */
235  }
236 
237  err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
238  netdev->name, netdev);
239  if (err) {
240  if (adapter->have_msi)
241  pci_disable_msi(adapter->pdev);
242  netif_err(adapter, probe, adapter->netdev,
243  "Unable to allocate interrupt Error: %d\n", err);
244  return err;
245  }
246 
247  if ((hw->max_frame_size != max_frame) ||
248  (hw->max_frame_size !=
249  (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
250 
251  hw->max_frame_size = max_frame;
252 
254 
255  if (hw->max_frame_size >
256  IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
257  u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
258 
259  if (!(ctrl0 & IXGB_CTRL0_JFE)) {
260  ctrl0 |= IXGB_CTRL0_JFE;
261  IXGB_WRITE_REG(hw, CTRL0, ctrl0);
262  }
263  }
264  }
265 
266  clear_bit(__IXGB_DOWN, &adapter->flags);
267 
268  napi_enable(&adapter->napi);
269  ixgb_irq_enable(adapter);
270 
271  netif_wake_queue(netdev);
272 
273  mod_timer(&adapter->watchdog_timer, jiffies);
274 
275  return 0;
276 }
277 
278 void
279 ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
280 {
281  struct net_device *netdev = adapter->netdev;
282 
283  /* prevent the interrupt handler from restarting watchdog */
284  set_bit(__IXGB_DOWN, &adapter->flags);
285 
286  napi_disable(&adapter->napi);
287  /* waiting for NAPI to complete can re-enable interrupts */
288  ixgb_irq_disable(adapter);
289  free_irq(adapter->pdev->irq, netdev);
290 
291  if (adapter->have_msi)
292  pci_disable_msi(adapter->pdev);
293 
294  if (kill_watchdog)
295  del_timer_sync(&adapter->watchdog_timer);
296 
297  adapter->link_speed = 0;
298  adapter->link_duplex = 0;
299  netif_carrier_off(netdev);
300  netif_stop_queue(netdev);
301 
302  ixgb_reset(adapter);
303  ixgb_clean_tx_ring(adapter);
304  ixgb_clean_rx_ring(adapter);
305 }
306 
307 void
308 ixgb_reset(struct ixgb_adapter *adapter)
309 {
310  struct ixgb_hw *hw = &adapter->hw;
311 
312  ixgb_adapter_stop(hw);
313  if (!ixgb_init_hw(hw))
314  netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
315 
316  /* restore frame size information */
318  if (hw->max_frame_size >
320  u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
321  if (!(ctrl0 & IXGB_CTRL0_JFE)) {
322  ctrl0 |= IXGB_CTRL0_JFE;
323  IXGB_WRITE_REG(hw, CTRL0, ctrl0);
324  }
325  }
326 }
327 
328 static netdev_features_t
329 ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
330 {
331  /*
332  * Tx VLAN insertion does not work per HW design when Rx stripping is
333  * disabled.
334  */
335  if (!(features & NETIF_F_HW_VLAN_RX))
336  features &= ~NETIF_F_HW_VLAN_TX;
337 
338  return features;
339 }
340 
341 static int
342 ixgb_set_features(struct net_device *netdev, netdev_features_t features)
343 {
344  struct ixgb_adapter *adapter = netdev_priv(netdev);
345  netdev_features_t changed = features ^ netdev->features;
346 
347  if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
348  return 0;
349 
350  adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
351 
352  if (netif_running(netdev)) {
353  ixgb_down(adapter, true);
354  ixgb_up(adapter);
355  ixgb_set_speed_duplex(netdev);
356  } else
357  ixgb_reset(adapter);
358 
359  return 0;
360 }
361 
362 
363 static const struct net_device_ops ixgb_netdev_ops = {
364  .ndo_open = ixgb_open,
365  .ndo_stop = ixgb_close,
366  .ndo_start_xmit = ixgb_xmit_frame,
367  .ndo_get_stats = ixgb_get_stats,
368  .ndo_set_rx_mode = ixgb_set_multi,
369  .ndo_validate_addr = eth_validate_addr,
370  .ndo_set_mac_address = ixgb_set_mac,
371  .ndo_change_mtu = ixgb_change_mtu,
372  .ndo_tx_timeout = ixgb_tx_timeout,
373  .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
374  .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
375 #ifdef CONFIG_NET_POLL_CONTROLLER
376  .ndo_poll_controller = ixgb_netpoll,
377 #endif
378  .ndo_fix_features = ixgb_fix_features,
379  .ndo_set_features = ixgb_set_features,
380 };
381 
394 static int __devinit
395 ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
396 {
397  struct net_device *netdev = NULL;
398  struct ixgb_adapter *adapter;
399  static int cards_found = 0;
400  int pci_using_dac;
401  int i;
402  int err;
403 
404  err = pci_enable_device(pdev);
405  if (err)
406  return err;
407 
408  pci_using_dac = 0;
409  err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
410  if (!err) {
411  err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
412  if (!err)
413  pci_using_dac = 1;
414  } else {
415  err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
416  if (err) {
417  err = dma_set_coherent_mask(&pdev->dev,
418  DMA_BIT_MASK(32));
419  if (err) {
420  pr_err("No usable DMA configuration, aborting\n");
421  goto err_dma_mask;
422  }
423  }
424  }
425 
427  if (err)
428  goto err_request_regions;
429 
430  pci_set_master(pdev);
431 
432  netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
433  if (!netdev) {
434  err = -ENOMEM;
435  goto err_alloc_etherdev;
436  }
437 
438  SET_NETDEV_DEV(netdev, &pdev->dev);
439 
440  pci_set_drvdata(pdev, netdev);
441  adapter = netdev_priv(netdev);
442  adapter->netdev = netdev;
443  adapter->pdev = pdev;
444  adapter->hw.back = adapter;
445  adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
446 
447  adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
448  if (!adapter->hw.hw_addr) {
449  err = -EIO;
450  goto err_ioremap;
451  }
452 
453  for (i = BAR_1; i <= BAR_5; i++) {
454  if (pci_resource_len(pdev, i) == 0)
455  continue;
456  if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
457  adapter->hw.io_base = pci_resource_start(pdev, i);
458  break;
459  }
460  }
461 
462  netdev->netdev_ops = &ixgb_netdev_ops;
463  ixgb_set_ethtool_ops(netdev);
464  netdev->watchdog_timeo = 5 * HZ;
465  netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
466 
467  strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
468 
469  adapter->bd_number = cards_found;
470  adapter->link_speed = 0;
471  adapter->link_duplex = 0;
472 
473  /* setup the private structure */
474 
475  err = ixgb_sw_init(adapter);
476  if (err)
477  goto err_sw_init;
478 
479  netdev->hw_features = NETIF_F_SG |
480  NETIF_F_TSO |
484  netdev->features = netdev->hw_features |
486  netdev->hw_features |= NETIF_F_RXCSUM;
487 
488  if (pci_using_dac) {
489  netdev->features |= NETIF_F_HIGHDMA;
490  netdev->vlan_features |= NETIF_F_HIGHDMA;
491  }
492 
493  /* make sure the EEPROM is good */
494 
495  if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
496  netif_err(adapter, probe, adapter->netdev,
497  "The EEPROM Checksum Is Not Valid\n");
498  err = -EIO;
499  goto err_eeprom;
500  }
501 
502  ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
503  memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
504 
505  if (!is_valid_ether_addr(netdev->perm_addr)) {
506  netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
507  err = -EIO;
508  goto err_eeprom;
509  }
510 
511  adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
512 
513  init_timer(&adapter->watchdog_timer);
514  adapter->watchdog_timer.function = ixgb_watchdog;
515  adapter->watchdog_timer.data = (unsigned long)adapter;
516 
517  INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
518 
519  strcpy(netdev->name, "eth%d");
520  err = register_netdev(netdev);
521  if (err)
522  goto err_register;
523 
524  /* carrier off reporting is important to ethtool even BEFORE open */
525  netif_carrier_off(netdev);
526 
527  netif_info(adapter, probe, adapter->netdev,
528  "Intel(R) PRO/10GbE Network Connection\n");
529  ixgb_check_options(adapter);
530  /* reset the hardware with the new settings */
531 
532  ixgb_reset(adapter);
533 
534  cards_found++;
535  return 0;
536 
537 err_register:
538 err_sw_init:
539 err_eeprom:
540  iounmap(adapter->hw.hw_addr);
541 err_ioremap:
542  free_netdev(netdev);
543 err_alloc_etherdev:
544  pci_release_regions(pdev);
545 err_request_regions:
546 err_dma_mask:
547  pci_disable_device(pdev);
548  return err;
549 }
550 
561 static void __devexit
562 ixgb_remove(struct pci_dev *pdev)
563 {
564  struct net_device *netdev = pci_get_drvdata(pdev);
565  struct ixgb_adapter *adapter = netdev_priv(netdev);
566 
568 
569  unregister_netdev(netdev);
570 
571  iounmap(adapter->hw.hw_addr);
572  pci_release_regions(pdev);
573 
574  free_netdev(netdev);
575  pci_disable_device(pdev);
576 }
577 
587 static int __devinit
588 ixgb_sw_init(struct ixgb_adapter *adapter)
589 {
590  struct ixgb_hw *hw = &adapter->hw;
591  struct net_device *netdev = adapter->netdev;
592  struct pci_dev *pdev = adapter->pdev;
593 
594  /* PCI config space info */
595 
596  hw->vendor_id = pdev->vendor;
597  hw->device_id = pdev->device;
599  hw->subsystem_id = pdev->subsystem_device;
600 
602  adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
603 
604  if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
608  hw->mac_type = ixgb_82597;
609  else {
610  /* should never have loaded on this device */
611  netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
612  }
613 
614  /* enable flow control to be programmed */
615  hw->fc.send_xon = 1;
616 
617  set_bit(__IXGB_DOWN, &adapter->flags);
618  return 0;
619 }
620 
634 static int
635 ixgb_open(struct net_device *netdev)
636 {
637  struct ixgb_adapter *adapter = netdev_priv(netdev);
638  int err;
639 
640  /* allocate transmit descriptors */
641  err = ixgb_setup_tx_resources(adapter);
642  if (err)
643  goto err_setup_tx;
644 
645  netif_carrier_off(netdev);
646 
647  /* allocate receive descriptors */
648 
649  err = ixgb_setup_rx_resources(adapter);
650  if (err)
651  goto err_setup_rx;
652 
653  err = ixgb_up(adapter);
654  if (err)
655  goto err_up;
656 
657  netif_start_queue(netdev);
658 
659  return 0;
660 
661 err_up:
662  ixgb_free_rx_resources(adapter);
663 err_setup_rx:
664  ixgb_free_tx_resources(adapter);
665 err_setup_tx:
666  ixgb_reset(adapter);
667 
668  return err;
669 }
670 
683 static int
684 ixgb_close(struct net_device *netdev)
685 {
686  struct ixgb_adapter *adapter = netdev_priv(netdev);
687 
688  ixgb_down(adapter, true);
689 
690  ixgb_free_tx_resources(adapter);
691  ixgb_free_rx_resources(adapter);
692 
693  return 0;
694 }
695 
703 int
705 {
706  struct ixgb_desc_ring *txdr = &adapter->tx_ring;
707  struct pci_dev *pdev = adapter->pdev;
708  int size;
709 
710  size = sizeof(struct ixgb_buffer) * txdr->count;
711  txdr->buffer_info = vzalloc(size);
712  if (!txdr->buffer_info) {
713  netif_err(adapter, probe, adapter->netdev,
714  "Unable to allocate transmit descriptor ring memory\n");
715  return -ENOMEM;
716  }
717 
718  /* round up to nearest 4K */
719 
720  txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
721  txdr->size = ALIGN(txdr->size, 4096);
722 
723  txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
724  GFP_KERNEL);
725  if (!txdr->desc) {
726  vfree(txdr->buffer_info);
727  netif_err(adapter, probe, adapter->netdev,
728  "Unable to allocate transmit descriptor memory\n");
729  return -ENOMEM;
730  }
731  memset(txdr->desc, 0, txdr->size);
732 
733  txdr->next_to_use = 0;
734  txdr->next_to_clean = 0;
735 
736  return 0;
737 }
738 
746 static void
747 ixgb_configure_tx(struct ixgb_adapter *adapter)
748 {
749  u64 tdba = adapter->tx_ring.dma;
750  u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
751  u32 tctl;
752  struct ixgb_hw *hw = &adapter->hw;
753 
754  /* Setup the Base and Length of the Tx Descriptor Ring
755  * tx_ring.dma can be either a 32 or 64 bit value
756  */
757 
758  IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
759  IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
760 
761  IXGB_WRITE_REG(hw, TDLEN, tdlen);
762 
763  /* Setup the HW Tx Head and Tail descriptor pointers */
764 
765  IXGB_WRITE_REG(hw, TDH, 0);
766  IXGB_WRITE_REG(hw, TDT, 0);
767 
768  /* don't set up txdctl, it induces performance problems if configured
769  * incorrectly */
770  /* Set the Tx Interrupt Delay register */
771 
772  IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
773 
774  /* Program the Transmit Control Register */
775 
777  IXGB_WRITE_REG(hw, TCTL, tctl);
778 
779  /* Setup Transmit Descriptor Settings for this adapter */
780  adapter->tx_cmd_type =
782  (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
783 }
784 
792 int
794 {
795  struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
796  struct pci_dev *pdev = adapter->pdev;
797  int size;
798 
799  size = sizeof(struct ixgb_buffer) * rxdr->count;
800  rxdr->buffer_info = vzalloc(size);
801  if (!rxdr->buffer_info) {
802  netif_err(adapter, probe, adapter->netdev,
803  "Unable to allocate receive descriptor ring\n");
804  return -ENOMEM;
805  }
806 
807  /* Round up to nearest 4K */
808 
809  rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
810  rxdr->size = ALIGN(rxdr->size, 4096);
811 
812  rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
813  GFP_KERNEL);
814 
815  if (!rxdr->desc) {
816  vfree(rxdr->buffer_info);
817  netif_err(adapter, probe, adapter->netdev,
818  "Unable to allocate receive descriptors\n");
819  return -ENOMEM;
820  }
821  memset(rxdr->desc, 0, rxdr->size);
822 
823  rxdr->next_to_clean = 0;
824  rxdr->next_to_use = 0;
825 
826  return 0;
827 }
828 
834 static void
835 ixgb_setup_rctl(struct ixgb_adapter *adapter)
836 {
837  u32 rctl;
838 
839  rctl = IXGB_READ_REG(&adapter->hw, RCTL);
840 
841  rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
842 
843  rctl |=
846  (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
847 
848  rctl |= IXGB_RCTL_SECRC;
849 
850  if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
851  rctl |= IXGB_RCTL_BSIZE_2048;
852  else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
853  rctl |= IXGB_RCTL_BSIZE_4096;
854  else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
855  rctl |= IXGB_RCTL_BSIZE_8192;
856  else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
857  rctl |= IXGB_RCTL_BSIZE_16384;
858 
859  IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
860 }
861 
869 static void
870 ixgb_configure_rx(struct ixgb_adapter *adapter)
871 {
872  u64 rdba = adapter->rx_ring.dma;
873  u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
874  struct ixgb_hw *hw = &adapter->hw;
875  u32 rctl;
876  u32 rxcsum;
877 
878  /* make sure receives are disabled while setting up the descriptors */
879 
880  rctl = IXGB_READ_REG(hw, RCTL);
881  IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
882 
883  /* set the Receive Delay Timer Register */
884 
885  IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
886 
887  /* Setup the Base and Length of the Rx Descriptor Ring */
888 
889  IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
890  IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
891 
892  IXGB_WRITE_REG(hw, RDLEN, rdlen);
893 
894  /* Setup the HW Rx Head and Tail Descriptor Pointers */
895  IXGB_WRITE_REG(hw, RDH, 0);
896  IXGB_WRITE_REG(hw, RDT, 0);
897 
898  /* due to the hardware errata with RXDCTL, we are unable to use any of
899  * the performance enhancing features of it without causing other
900  * subtle bugs, some of the bugs could include receive length
901  * corruption at high data rates (WTHRESH > 0) and/or receive
902  * descriptor ring irregularites (particularly in hardware cache) */
903  IXGB_WRITE_REG(hw, RXDCTL, 0);
904 
905  /* Enable Receive Checksum Offload for TCP and UDP */
906  if (adapter->rx_csum) {
907  rxcsum = IXGB_READ_REG(hw, RXCSUM);
908  rxcsum |= IXGB_RXCSUM_TUOFL;
909  IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
910  }
911 
912  /* Enable Receives */
913 
914  IXGB_WRITE_REG(hw, RCTL, rctl);
915 }
916 
924 void
926 {
927  struct pci_dev *pdev = adapter->pdev;
928 
929  ixgb_clean_tx_ring(adapter);
930 
931  vfree(adapter->tx_ring.buffer_info);
932  adapter->tx_ring.buffer_info = NULL;
933 
934  dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
935  adapter->tx_ring.desc, adapter->tx_ring.dma);
936 
937  adapter->tx_ring.desc = NULL;
938 }
939 
940 static void
941 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
942  struct ixgb_buffer *buffer_info)
943 {
944  if (buffer_info->dma) {
945  if (buffer_info->mapped_as_page)
946  dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
947  buffer_info->length, DMA_TO_DEVICE);
948  else
949  dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
950  buffer_info->length, DMA_TO_DEVICE);
951  buffer_info->dma = 0;
952  }
953 
954  if (buffer_info->skb) {
955  dev_kfree_skb_any(buffer_info->skb);
956  buffer_info->skb = NULL;
957  }
958  buffer_info->time_stamp = 0;
959  /* these fields must always be initialized in tx
960  * buffer_info->length = 0;
961  * buffer_info->next_to_watch = 0; */
962 }
963 
969 static void
970 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
971 {
972  struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
973  struct ixgb_buffer *buffer_info;
974  unsigned long size;
975  unsigned int i;
976 
977  /* Free all the Tx ring sk_buffs */
978 
979  for (i = 0; i < tx_ring->count; i++) {
980  buffer_info = &tx_ring->buffer_info[i];
981  ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
982  }
983 
984  size = sizeof(struct ixgb_buffer) * tx_ring->count;
985  memset(tx_ring->buffer_info, 0, size);
986 
987  /* Zero out the descriptor ring */
988 
989  memset(tx_ring->desc, 0, tx_ring->size);
990 
991  tx_ring->next_to_use = 0;
992  tx_ring->next_to_clean = 0;
993 
994  IXGB_WRITE_REG(&adapter->hw, TDH, 0);
995  IXGB_WRITE_REG(&adapter->hw, TDT, 0);
996 }
997 
1005 void
1007 {
1008  struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1009  struct pci_dev *pdev = adapter->pdev;
1010 
1011  ixgb_clean_rx_ring(adapter);
1012 
1013  vfree(rx_ring->buffer_info);
1014  rx_ring->buffer_info = NULL;
1015 
1016  dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1017  rx_ring->dma);
1018 
1019  rx_ring->desc = NULL;
1020 }
1021 
1027 static void
1028 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1029 {
1030  struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1031  struct ixgb_buffer *buffer_info;
1032  struct pci_dev *pdev = adapter->pdev;
1033  unsigned long size;
1034  unsigned int i;
1035 
1036  /* Free all the Rx ring sk_buffs */
1037 
1038  for (i = 0; i < rx_ring->count; i++) {
1039  buffer_info = &rx_ring->buffer_info[i];
1040  if (buffer_info->dma) {
1041  dma_unmap_single(&pdev->dev,
1042  buffer_info->dma,
1043  buffer_info->length,
1044  DMA_FROM_DEVICE);
1045  buffer_info->dma = 0;
1046  buffer_info->length = 0;
1047  }
1048 
1049  if (buffer_info->skb) {
1050  dev_kfree_skb(buffer_info->skb);
1051  buffer_info->skb = NULL;
1052  }
1053  }
1054 
1055  size = sizeof(struct ixgb_buffer) * rx_ring->count;
1056  memset(rx_ring->buffer_info, 0, size);
1057 
1058  /* Zero out the descriptor ring */
1059 
1060  memset(rx_ring->desc, 0, rx_ring->size);
1061 
1062  rx_ring->next_to_clean = 0;
1063  rx_ring->next_to_use = 0;
1064 
1065  IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1066  IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1067 }
1068 
1077 static int
1078 ixgb_set_mac(struct net_device *netdev, void *p)
1079 {
1080  struct ixgb_adapter *adapter = netdev_priv(netdev);
1081  struct sockaddr *addr = p;
1082 
1083  if (!is_valid_ether_addr(addr->sa_data))
1084  return -EADDRNOTAVAIL;
1085 
1086  memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1087 
1088  ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1089 
1090  return 0;
1091 }
1092 
1103 static void
1104 ixgb_set_multi(struct net_device *netdev)
1105 {
1106  struct ixgb_adapter *adapter = netdev_priv(netdev);
1107  struct ixgb_hw *hw = &adapter->hw;
1108  struct netdev_hw_addr *ha;
1109  u32 rctl;
1110 
1111  /* Check for Promiscuous and All Multicast modes */
1112 
1113  rctl = IXGB_READ_REG(hw, RCTL);
1114 
1115  if (netdev->flags & IFF_PROMISC) {
1116  rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1117  /* disable VLAN filtering */
1118  rctl &= ~IXGB_RCTL_CFIEN;
1119  rctl &= ~IXGB_RCTL_VFE;
1120  } else {
1121  if (netdev->flags & IFF_ALLMULTI) {
1122  rctl |= IXGB_RCTL_MPE;
1123  rctl &= ~IXGB_RCTL_UPE;
1124  } else {
1125  rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1126  }
1127  /* enable VLAN filtering */
1128  rctl |= IXGB_RCTL_VFE;
1129  rctl &= ~IXGB_RCTL_CFIEN;
1130  }
1131 
1133  rctl |= IXGB_RCTL_MPE;
1134  IXGB_WRITE_REG(hw, RCTL, rctl);
1135  } else {
1137  ETH_ALEN, GFP_ATOMIC);
1138  u8 *addr;
1139  if (!mta)
1140  goto alloc_failed;
1141 
1142  IXGB_WRITE_REG(hw, RCTL, rctl);
1143 
1144  addr = mta;
1145  netdev_for_each_mc_addr(ha, netdev) {
1146  memcpy(addr, ha->addr, ETH_ALEN);
1147  addr += ETH_ALEN;
1148  }
1149 
1150  ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1151  kfree(mta);
1152  }
1153 
1154 alloc_failed:
1155  if (netdev->features & NETIF_F_HW_VLAN_RX)
1156  ixgb_vlan_strip_enable(adapter);
1157  else
1158  ixgb_vlan_strip_disable(adapter);
1159 
1160 }
1161 
1167 static void
1168 ixgb_watchdog(unsigned long data)
1169 {
1170  struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1171  struct net_device *netdev = adapter->netdev;
1172  struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1173 
1174  ixgb_check_for_link(&adapter->hw);
1175 
1176  if (ixgb_check_for_bad_link(&adapter->hw)) {
1177  /* force the reset path */
1178  netif_stop_queue(netdev);
1179  }
1180 
1181  if (adapter->hw.link_up) {
1182  if (!netif_carrier_ok(netdev)) {
1183  netdev_info(netdev,
1184  "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1185  (adapter->hw.fc.type == ixgb_fc_full) ?
1186  "RX/TX" :
1187  (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1188  "RX" :
1189  (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1190  "TX" : "None");
1191  adapter->link_speed = 10000;
1192  adapter->link_duplex = FULL_DUPLEX;
1193  netif_carrier_on(netdev);
1194  }
1195  } else {
1196  if (netif_carrier_ok(netdev)) {
1197  adapter->link_speed = 0;
1198  adapter->link_duplex = 0;
1199  netdev_info(netdev, "NIC Link is Down\n");
1200  netif_carrier_off(netdev);
1201  }
1202  }
1203 
1204  ixgb_update_stats(adapter);
1205 
1206  if (!netif_carrier_ok(netdev)) {
1207  if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1208  /* We've lost link, so the controller stops DMA,
1209  * but we've got queued Tx work that's never going
1210  * to get done, so reset controller to flush Tx.
1211  * (Do the reset outside of interrupt context). */
1212  schedule_work(&adapter->tx_timeout_task);
1213  /* return immediately since reset is imminent */
1214  return;
1215  }
1216  }
1217 
1218  /* Force detection of hung controller every watchdog period */
1219  adapter->detect_tx_hung = true;
1220 
1221  /* generate an interrupt to force clean up of any stragglers */
1222  IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1223 
1224  /* Reset the timer */
1225  mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1226 }
1227 
1228 #define IXGB_TX_FLAGS_CSUM 0x00000001
1229 #define IXGB_TX_FLAGS_VLAN 0x00000002
1230 #define IXGB_TX_FLAGS_TSO 0x00000004
1231 
1232 static int
1233 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1234 {
1235  struct ixgb_context_desc *context_desc;
1236  unsigned int i;
1238  u16 ipcse, tucse, mss;
1239  int err;
1240 
1241  if (likely(skb_is_gso(skb))) {
1242  struct ixgb_buffer *buffer_info;
1243  struct iphdr *iph;
1244 
1245  if (skb_header_cloned(skb)) {
1246  err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1247  if (err)
1248  return err;
1249  }
1250 
1251  hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1252  mss = skb_shinfo(skb)->gso_size;
1253  iph = ip_hdr(skb);
1254  iph->tot_len = 0;
1255  iph->check = 0;
1256  tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1257  iph->daddr, 0,
1258  IPPROTO_TCP, 0);
1259  ipcss = skb_network_offset(skb);
1260  ipcso = (void *)&(iph->check) - (void *)skb->data;
1261  ipcse = skb_transport_offset(skb) - 1;
1262  tucss = skb_transport_offset(skb);
1263  tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
1264  tucse = 0;
1265 
1266  i = adapter->tx_ring.next_to_use;
1267  context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1268  buffer_info = &adapter->tx_ring.buffer_info[i];
1269  WARN_ON(buffer_info->dma != 0);
1270 
1271  context_desc->ipcss = ipcss;
1272  context_desc->ipcso = ipcso;
1273  context_desc->ipcse = cpu_to_le16(ipcse);
1274  context_desc->tucss = tucss;
1275  context_desc->tucso = tucso;
1276  context_desc->tucse = cpu_to_le16(tucse);
1277  context_desc->mss = cpu_to_le16(mss);
1278  context_desc->hdr_len = hdr_len;
1279  context_desc->status = 0;
1280  context_desc->cmd_type_len = cpu_to_le32(
1286  | (skb->len - (hdr_len)));
1287 
1288 
1289  if (++i == adapter->tx_ring.count) i = 0;
1290  adapter->tx_ring.next_to_use = i;
1291 
1292  return 1;
1293  }
1294 
1295  return 0;
1296 }
1297 
1298 static bool
1299 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1300 {
1301  struct ixgb_context_desc *context_desc;
1302  unsigned int i;
1303  u8 css, cso;
1304 
1305  if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1306  struct ixgb_buffer *buffer_info;
1307  css = skb_checksum_start_offset(skb);
1308  cso = css + skb->csum_offset;
1309 
1310  i = adapter->tx_ring.next_to_use;
1311  context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1312  buffer_info = &adapter->tx_ring.buffer_info[i];
1313  WARN_ON(buffer_info->dma != 0);
1314 
1315  context_desc->tucss = css;
1316  context_desc->tucso = cso;
1317  context_desc->tucse = 0;
1318  /* zero out any previously existing data in one instruction */
1319  *(u32 *)&(context_desc->ipcss) = 0;
1320  context_desc->status = 0;
1321  context_desc->hdr_len = 0;
1322  context_desc->mss = 0;
1323  context_desc->cmd_type_len =
1326 
1327  if (++i == adapter->tx_ring.count) i = 0;
1328  adapter->tx_ring.next_to_use = i;
1329 
1330  return true;
1331  }
1332 
1333  return false;
1334 }
1335 
1336 #define IXGB_MAX_TXD_PWR 14
1337 #define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1338 
1339 static int
1340 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1341  unsigned int first)
1342 {
1343  struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1344  struct pci_dev *pdev = adapter->pdev;
1345  struct ixgb_buffer *buffer_info;
1346  int len = skb_headlen(skb);
1347  unsigned int offset = 0, size, count = 0, i;
1348  unsigned int mss = skb_shinfo(skb)->gso_size;
1349  unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1350  unsigned int f;
1351 
1352  i = tx_ring->next_to_use;
1353 
1354  while (len) {
1355  buffer_info = &tx_ring->buffer_info[i];
1356  size = min(len, IXGB_MAX_DATA_PER_TXD);
1357  /* Workaround for premature desc write-backs
1358  * in TSO mode. Append 4-byte sentinel desc */
1359  if (unlikely(mss && !nr_frags && size == len && size > 8))
1360  size -= 4;
1361 
1362  buffer_info->length = size;
1363  WARN_ON(buffer_info->dma != 0);
1364  buffer_info->time_stamp = jiffies;
1365  buffer_info->mapped_as_page = false;
1366  buffer_info->dma = dma_map_single(&pdev->dev,
1367  skb->data + offset,
1368  size, DMA_TO_DEVICE);
1369  if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1370  goto dma_error;
1371  buffer_info->next_to_watch = 0;
1372 
1373  len -= size;
1374  offset += size;
1375  count++;
1376  if (len) {
1377  i++;
1378  if (i == tx_ring->count)
1379  i = 0;
1380  }
1381  }
1382 
1383  for (f = 0; f < nr_frags; f++) {
1384  const struct skb_frag_struct *frag;
1385 
1386  frag = &skb_shinfo(skb)->frags[f];
1387  len = skb_frag_size(frag);
1388  offset = 0;
1389 
1390  while (len) {
1391  i++;
1392  if (i == tx_ring->count)
1393  i = 0;
1394 
1395  buffer_info = &tx_ring->buffer_info[i];
1396  size = min(len, IXGB_MAX_DATA_PER_TXD);
1397 
1398  /* Workaround for premature desc write-backs
1399  * in TSO mode. Append 4-byte sentinel desc */
1400  if (unlikely(mss && (f == (nr_frags - 1))
1401  && size == len && size > 8))
1402  size -= 4;
1403 
1404  buffer_info->length = size;
1405  buffer_info->time_stamp = jiffies;
1406  buffer_info->mapped_as_page = true;
1407  buffer_info->dma =
1408  skb_frag_dma_map(&pdev->dev, frag, offset, size,
1409  DMA_TO_DEVICE);
1410  if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1411  goto dma_error;
1412  buffer_info->next_to_watch = 0;
1413 
1414  len -= size;
1415  offset += size;
1416  count++;
1417  }
1418  }
1419  tx_ring->buffer_info[i].skb = skb;
1420  tx_ring->buffer_info[first].next_to_watch = i;
1421 
1422  return count;
1423 
1424 dma_error:
1425  dev_err(&pdev->dev, "TX DMA map failed\n");
1426  buffer_info->dma = 0;
1427  if (count)
1428  count--;
1429 
1430  while (count--) {
1431  if (i==0)
1432  i += tx_ring->count;
1433  i--;
1434  buffer_info = &tx_ring->buffer_info[i];
1435  ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1436  }
1437 
1438  return 0;
1439 }
1440 
1441 static void
1442 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1443 {
1444  struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1445  struct ixgb_tx_desc *tx_desc = NULL;
1446  struct ixgb_buffer *buffer_info;
1447  u32 cmd_type_len = adapter->tx_cmd_type;
1448  u8 status = 0;
1449  u8 popts = 0;
1450  unsigned int i;
1451 
1452  if (tx_flags & IXGB_TX_FLAGS_TSO) {
1453  cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1455  }
1456 
1457  if (tx_flags & IXGB_TX_FLAGS_CSUM)
1458  popts |= IXGB_TX_DESC_POPTS_TXSM;
1459 
1460  if (tx_flags & IXGB_TX_FLAGS_VLAN)
1461  cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1462 
1463  i = tx_ring->next_to_use;
1464 
1465  while (count--) {
1466  buffer_info = &tx_ring->buffer_info[i];
1467  tx_desc = IXGB_TX_DESC(*tx_ring, i);
1468  tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1469  tx_desc->cmd_type_len =
1470  cpu_to_le32(cmd_type_len | buffer_info->length);
1471  tx_desc->status = status;
1472  tx_desc->popts = popts;
1473  tx_desc->vlan = cpu_to_le16(vlan_id);
1474 
1475  if (++i == tx_ring->count) i = 0;
1476  }
1477 
1478  tx_desc->cmd_type_len |=
1480 
1481  /* Force memory writes to complete before letting h/w
1482  * know there are new descriptors to fetch. (Only
1483  * applicable for weak-ordered memory model archs,
1484  * such as IA-64). */
1485  wmb();
1486 
1487  tx_ring->next_to_use = i;
1488  IXGB_WRITE_REG(&adapter->hw, TDT, i);
1489 }
1490 
1491 static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1492 {
1493  struct ixgb_adapter *adapter = netdev_priv(netdev);
1494  struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1495 
1496  netif_stop_queue(netdev);
1497  /* Herbert's original patch had:
1498  * smp_mb__after_netif_stop_queue();
1499  * but since that doesn't exist yet, just open code it. */
1500  smp_mb();
1501 
1502  /* We need to check again in a case another CPU has just
1503  * made room available. */
1504  if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1505  return -EBUSY;
1506 
1507  /* A reprieve! */
1508  netif_start_queue(netdev);
1509  ++adapter->restart_queue;
1510  return 0;
1511 }
1512 
1513 static int ixgb_maybe_stop_tx(struct net_device *netdev,
1514  struct ixgb_desc_ring *tx_ring, int size)
1515 {
1516  if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1517  return 0;
1518  return __ixgb_maybe_stop_tx(netdev, size);
1519 }
1520 
1521 
1522 /* Tx Descriptors needed, worst case */
1523 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1524  (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1525 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1526  MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1527  + 1 /* one more needed for sentinel TSO workaround */
1528 
1529 static netdev_tx_t
1530 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1531 {
1532  struct ixgb_adapter *adapter = netdev_priv(netdev);
1533  unsigned int first;
1534  unsigned int tx_flags = 0;
1535  int vlan_id = 0;
1536  int count = 0;
1537  int tso;
1538 
1539  if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1540  dev_kfree_skb(skb);
1541  return NETDEV_TX_OK;
1542  }
1543 
1544  if (skb->len <= 0) {
1545  dev_kfree_skb(skb);
1546  return NETDEV_TX_OK;
1547  }
1548 
1549  if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1550  DESC_NEEDED)))
1551  return NETDEV_TX_BUSY;
1552 
1553  if (vlan_tx_tag_present(skb)) {
1554  tx_flags |= IXGB_TX_FLAGS_VLAN;
1555  vlan_id = vlan_tx_tag_get(skb);
1556  }
1557 
1558  first = adapter->tx_ring.next_to_use;
1559 
1560  tso = ixgb_tso(adapter, skb);
1561  if (tso < 0) {
1562  dev_kfree_skb(skb);
1563  return NETDEV_TX_OK;
1564  }
1565 
1566  if (likely(tso))
1567  tx_flags |= IXGB_TX_FLAGS_TSO;
1568  else if (ixgb_tx_csum(adapter, skb))
1569  tx_flags |= IXGB_TX_FLAGS_CSUM;
1570 
1571  count = ixgb_tx_map(adapter, skb, first);
1572 
1573  if (count) {
1574  ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1575  /* Make sure there is space in the ring for the next send. */
1576  ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1577 
1578  } else {
1579  dev_kfree_skb_any(skb);
1580  adapter->tx_ring.buffer_info[first].time_stamp = 0;
1581  adapter->tx_ring.next_to_use = first;
1582  }
1583 
1584  return NETDEV_TX_OK;
1585 }
1586 
1592 static void
1593 ixgb_tx_timeout(struct net_device *netdev)
1594 {
1595  struct ixgb_adapter *adapter = netdev_priv(netdev);
1596 
1597  /* Do the reset outside of interrupt context */
1598  schedule_work(&adapter->tx_timeout_task);
1599 }
1600 
1601 static void
1602 ixgb_tx_timeout_task(struct work_struct *work)
1603 {
1604  struct ixgb_adapter *adapter =
1605  container_of(work, struct ixgb_adapter, tx_timeout_task);
1606 
1607  adapter->tx_timeout_count++;
1608  ixgb_down(adapter, true);
1609  ixgb_up(adapter);
1610 }
1611 
1620 static struct net_device_stats *
1621 ixgb_get_stats(struct net_device *netdev)
1622 {
1623  return &netdev->stats;
1624 }
1625 
1634 static int
1635 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1636 {
1637  struct ixgb_adapter *adapter = netdev_priv(netdev);
1638  int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1639  int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1640 
1641  /* MTU < 68 is an error for IPv4 traffic, just don't allow it */
1642  if ((new_mtu < 68) ||
1643  (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1644  netif_err(adapter, probe, adapter->netdev,
1645  "Invalid MTU setting %d\n", new_mtu);
1646  return -EINVAL;
1647  }
1648 
1649  if (old_max_frame == max_frame)
1650  return 0;
1651 
1652  if (netif_running(netdev))
1653  ixgb_down(adapter, true);
1654 
1655  adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
1656 
1657  netdev->mtu = new_mtu;
1658 
1659  if (netif_running(netdev))
1660  ixgb_up(adapter);
1661 
1662  return 0;
1663 }
1664 
1670 void
1672 {
1673  struct net_device *netdev = adapter->netdev;
1674  struct pci_dev *pdev = adapter->pdev;
1675 
1676  /* Prevent stats update while adapter is being reset */
1677  if (pci_channel_offline(pdev))
1678  return;
1679 
1680  if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1682  u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1683  u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1684  u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1685  u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1686 
1687  multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1688  /* fix up multicast stats by removing broadcasts */
1689  if (multi >= bcast)
1690  multi -= bcast;
1691 
1692  adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1693  adapter->stats.mprch += (multi >> 32);
1694  adapter->stats.bprcl += bcast_l;
1695  adapter->stats.bprch += bcast_h;
1696  } else {
1697  adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1698  adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1699  adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1700  adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1701  }
1702  adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1703  adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1704  adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1705  adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1706  adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1707  adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1708  adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1709  adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1710  adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1711  adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1712  adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1713  adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1714  adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1715  adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1716  adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1717  adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1718  adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1719  adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1720  adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1721  adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1722  adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1723  adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1724  adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1725  adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1726  adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1727  adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1728  adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1729  adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1730  adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1731  adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1732  adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1733  adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1734  adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1735  adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1736  adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1737  adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1738  adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1739  adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1740  adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1741  adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1742  adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1743  adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1744  adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1745  adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1746  adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1747  adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1748  adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1749  adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1750  adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1751  adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1752  adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1753  adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1754  adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1755  adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1756  adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1757  adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1758 
1759  /* Fill out the OS statistics structure */
1760 
1761  netdev->stats.rx_packets = adapter->stats.gprcl;
1762  netdev->stats.tx_packets = adapter->stats.gptcl;
1763  netdev->stats.rx_bytes = adapter->stats.gorcl;
1764  netdev->stats.tx_bytes = adapter->stats.gotcl;
1765  netdev->stats.multicast = adapter->stats.mprcl;
1766  netdev->stats.collisions = 0;
1767 
1768  /* ignore RLEC as it reports errors for padded (<64bytes) frames
1769  * with a length in the type/len field */
1770  netdev->stats.rx_errors =
1771  /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1772  adapter->stats.ruc +
1773  adapter->stats.roc /*+ adapter->stats.rlec */ +
1774  adapter->stats.icbc +
1775  adapter->stats.ecbc + adapter->stats.mpc;
1776 
1777  /* see above
1778  * netdev->stats.rx_length_errors = adapter->stats.rlec;
1779  */
1780 
1781  netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1782  netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1783  netdev->stats.rx_missed_errors = adapter->stats.mpc;
1784  netdev->stats.rx_over_errors = adapter->stats.mpc;
1785 
1786  netdev->stats.tx_errors = 0;
1787  netdev->stats.rx_frame_errors = 0;
1788  netdev->stats.tx_aborted_errors = 0;
1789  netdev->stats.tx_carrier_errors = 0;
1790  netdev->stats.tx_fifo_errors = 0;
1791  netdev->stats.tx_heartbeat_errors = 0;
1792  netdev->stats.tx_window_errors = 0;
1793 }
1794 
1795 #define IXGB_MAX_INTR 10
1796 
1802 static irqreturn_t
1803 ixgb_intr(int irq, void *data)
1804 {
1805  struct net_device *netdev = data;
1806  struct ixgb_adapter *adapter = netdev_priv(netdev);
1807  struct ixgb_hw *hw = &adapter->hw;
1808  u32 icr = IXGB_READ_REG(hw, ICR);
1809 
1810  if (unlikely(!icr))
1811  return IRQ_NONE; /* Not our interrupt */
1812 
1813  if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1814  if (!test_bit(__IXGB_DOWN, &adapter->flags))
1815  mod_timer(&adapter->watchdog_timer, jiffies);
1816 
1817  if (napi_schedule_prep(&adapter->napi)) {
1818 
1819  /* Disable interrupts and register for poll. The flush
1820  of the posted write is intentionally left out.
1821  */
1822 
1823  IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1824  __napi_schedule(&adapter->napi);
1825  }
1826  return IRQ_HANDLED;
1827 }
1828 
1834 static int
1835 ixgb_clean(struct napi_struct *napi, int budget)
1836 {
1837  struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1838  int work_done = 0;
1839 
1840  ixgb_clean_tx_irq(adapter);
1841  ixgb_clean_rx_irq(adapter, &work_done, budget);
1842 
1843  /* If budget not fully consumed, exit the polling mode */
1844  if (work_done < budget) {
1845  napi_complete(napi);
1846  if (!test_bit(__IXGB_DOWN, &adapter->flags))
1847  ixgb_irq_enable(adapter);
1848  }
1849 
1850  return work_done;
1851 }
1852 
1858 static bool
1859 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1860 {
1861  struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1862  struct net_device *netdev = adapter->netdev;
1863  struct ixgb_tx_desc *tx_desc, *eop_desc;
1864  struct ixgb_buffer *buffer_info;
1865  unsigned int i, eop;
1866  bool cleaned = false;
1867 
1868  i = tx_ring->next_to_clean;
1869  eop = tx_ring->buffer_info[i].next_to_watch;
1870  eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1871 
1872  while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1873 
1874  rmb(); /* read buffer_info after eop_desc */
1875  for (cleaned = false; !cleaned; ) {
1876  tx_desc = IXGB_TX_DESC(*tx_ring, i);
1877  buffer_info = &tx_ring->buffer_info[i];
1878 
1879  if (tx_desc->popts &
1882  adapter->hw_csum_tx_good++;
1883 
1884  ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1885 
1886  *(u32 *)&(tx_desc->status) = 0;
1887 
1888  cleaned = (i == eop);
1889  if (++i == tx_ring->count) i = 0;
1890  }
1891 
1892  eop = tx_ring->buffer_info[i].next_to_watch;
1893  eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1894  }
1895 
1896  tx_ring->next_to_clean = i;
1897 
1898  if (unlikely(cleaned && netif_carrier_ok(netdev) &&
1899  IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
1900  /* Make sure that anybody stopping the queue after this
1901  * sees the new next_to_clean. */
1902  smp_mb();
1903 
1904  if (netif_queue_stopped(netdev) &&
1905  !(test_bit(__IXGB_DOWN, &adapter->flags))) {
1906  netif_wake_queue(netdev);
1907  ++adapter->restart_queue;
1908  }
1909  }
1910 
1911  if (adapter->detect_tx_hung) {
1912  /* detect a transmit hang in hardware, this serializes the
1913  * check with the clearing of time_stamp and movement of i */
1914  adapter->detect_tx_hung = false;
1915  if (tx_ring->buffer_info[eop].time_stamp &&
1916  time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1917  && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1918  IXGB_STATUS_TXOFF)) {
1919  /* detected Tx unit hang */
1920  netif_err(adapter, drv, adapter->netdev,
1921  "Detected Tx Unit Hang\n"
1922  " TDH <%x>\n"
1923  " TDT <%x>\n"
1924  " next_to_use <%x>\n"
1925  " next_to_clean <%x>\n"
1926  "buffer_info[next_to_clean]\n"
1927  " time_stamp <%lx>\n"
1928  " next_to_watch <%x>\n"
1929  " jiffies <%lx>\n"
1930  " next_to_watch.status <%x>\n",
1931  IXGB_READ_REG(&adapter->hw, TDH),
1932  IXGB_READ_REG(&adapter->hw, TDT),
1933  tx_ring->next_to_use,
1934  tx_ring->next_to_clean,
1935  tx_ring->buffer_info[eop].time_stamp,
1936  eop,
1937  jiffies,
1938  eop_desc->status);
1939  netif_stop_queue(netdev);
1940  }
1941  }
1942 
1943  return cleaned;
1944 }
1945 
1953 static void
1954 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1955  struct ixgb_rx_desc *rx_desc,
1956  struct sk_buff *skb)
1957 {
1958  /* Ignore Checksum bit is set OR
1959  * TCP Checksum has not been calculated
1960  */
1961  if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1962  (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1963  skb_checksum_none_assert(skb);
1964  return;
1965  }
1966 
1967  /* At this point we know the hardware did the TCP checksum */
1968  /* now look at the TCP checksum error bit */
1969  if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1970  /* let the stack verify checksum errors */
1971  skb_checksum_none_assert(skb);
1972  adapter->hw_csum_rx_error++;
1973  } else {
1974  /* TCP checksum is good */
1976  adapter->hw_csum_rx_good++;
1977  }
1978 }
1979 
1980 /*
1981  * this should improve performance for small packets with large amounts
1982  * of reassembly being done in the stack
1983  */
1984 static void ixgb_check_copybreak(struct net_device *netdev,
1985  struct ixgb_buffer *buffer_info,
1986  u32 length, struct sk_buff **skb)
1987 {
1988  struct sk_buff *new_skb;
1989 
1990  if (length > copybreak)
1991  return;
1992 
1993  new_skb = netdev_alloc_skb_ip_align(netdev, length);
1994  if (!new_skb)
1995  return;
1996 
1997  skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1998  (*skb)->data - NET_IP_ALIGN,
1999  length + NET_IP_ALIGN);
2000  /* save the skb in buffer_info as good */
2001  buffer_info->skb = *skb;
2002  *skb = new_skb;
2003 }
2004 
2010 static bool
2011 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
2012 {
2013  struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2014  struct net_device *netdev = adapter->netdev;
2015  struct pci_dev *pdev = adapter->pdev;
2016  struct ixgb_rx_desc *rx_desc, *next_rxd;
2017  struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
2018  u32 length;
2019  unsigned int i, j;
2020  int cleaned_count = 0;
2021  bool cleaned = false;
2022 
2023  i = rx_ring->next_to_clean;
2024  rx_desc = IXGB_RX_DESC(*rx_ring, i);
2025  buffer_info = &rx_ring->buffer_info[i];
2026 
2027  while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
2028  struct sk_buff *skb;
2029  u8 status;
2030 
2031  if (*work_done >= work_to_do)
2032  break;
2033 
2034  (*work_done)++;
2035  rmb(); /* read descriptor and rx_buffer_info after status DD */
2036  status = rx_desc->status;
2037  skb = buffer_info->skb;
2038  buffer_info->skb = NULL;
2039 
2040  prefetch(skb->data - NET_IP_ALIGN);
2041 
2042  if (++i == rx_ring->count)
2043  i = 0;
2044  next_rxd = IXGB_RX_DESC(*rx_ring, i);
2045  prefetch(next_rxd);
2046 
2047  j = i + 1;
2048  if (j == rx_ring->count)
2049  j = 0;
2050  next2_buffer = &rx_ring->buffer_info[j];
2051  prefetch(next2_buffer);
2052 
2053  next_buffer = &rx_ring->buffer_info[i];
2054 
2055  cleaned = true;
2056  cleaned_count++;
2057 
2058  dma_unmap_single(&pdev->dev,
2059  buffer_info->dma,
2060  buffer_info->length,
2061  DMA_FROM_DEVICE);
2062  buffer_info->dma = 0;
2063 
2064  length = le16_to_cpu(rx_desc->length);
2065  rx_desc->length = 0;
2066 
2067  if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
2068 
2069  /* All receives must fit into a single buffer */
2070 
2071  pr_debug("Receive packet consumed multiple buffers length<%x>\n",
2072  length);
2073 
2074  dev_kfree_skb_irq(skb);
2075  goto rxdesc_done;
2076  }
2077 
2078  if (unlikely(rx_desc->errors &
2081  dev_kfree_skb_irq(skb);
2082  goto rxdesc_done;
2083  }
2084 
2085  ixgb_check_copybreak(netdev, buffer_info, length, &skb);
2086 
2087  /* Good Receive */
2088  skb_put(skb, length);
2089 
2090  /* Receive Checksum Offload */
2091  ixgb_rx_checksum(adapter, rx_desc, skb);
2092 
2093  skb->protocol = eth_type_trans(skb, netdev);
2094  if (status & IXGB_RX_DESC_STATUS_VP)
2095  __vlan_hwaccel_put_tag(skb,
2096  le16_to_cpu(rx_desc->special));
2097 
2098  netif_receive_skb(skb);
2099 
2100 rxdesc_done:
2101  /* clean up descriptor, might be written over by hw */
2102  rx_desc->status = 0;
2103 
2104  /* return some buffers to hardware, one at a time is too slow */
2105  if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
2106  ixgb_alloc_rx_buffers(adapter, cleaned_count);
2107  cleaned_count = 0;
2108  }
2109 
2110  /* use prefetched values */
2111  rx_desc = next_rxd;
2112  buffer_info = next_buffer;
2113  }
2114 
2115  rx_ring->next_to_clean = i;
2116 
2117  cleaned_count = IXGB_DESC_UNUSED(rx_ring);
2118  if (cleaned_count)
2119  ixgb_alloc_rx_buffers(adapter, cleaned_count);
2120 
2121  return cleaned;
2122 }
2123 
2129 static void
2130 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2131 {
2132  struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2133  struct net_device *netdev = adapter->netdev;
2134  struct pci_dev *pdev = adapter->pdev;
2135  struct ixgb_rx_desc *rx_desc;
2136  struct ixgb_buffer *buffer_info;
2137  struct sk_buff *skb;
2138  unsigned int i;
2139  long cleancount;
2140 
2141  i = rx_ring->next_to_use;
2142  buffer_info = &rx_ring->buffer_info[i];
2143  cleancount = IXGB_DESC_UNUSED(rx_ring);
2144 
2145 
2146  /* leave three descriptors unused */
2147  while (--cleancount > 2 && cleaned_count--) {
2148  /* recycle! its good for you */
2149  skb = buffer_info->skb;
2150  if (skb) {
2151  skb_trim(skb, 0);
2152  goto map_skb;
2153  }
2154 
2155  skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2156  if (unlikely(!skb)) {
2157  /* Better luck next round */
2158  adapter->alloc_rx_buff_failed++;
2159  break;
2160  }
2161 
2162  buffer_info->skb = skb;
2163  buffer_info->length = adapter->rx_buffer_len;
2164 map_skb:
2165  buffer_info->dma = dma_map_single(&pdev->dev,
2166  skb->data,
2167  adapter->rx_buffer_len,
2168  DMA_FROM_DEVICE);
2169 
2170  rx_desc = IXGB_RX_DESC(*rx_ring, i);
2171  rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2172  /* guarantee DD bit not set now before h/w gets descriptor
2173  * this is the rest of the workaround for h/w double
2174  * writeback. */
2175  rx_desc->status = 0;
2176 
2177 
2178  if (++i == rx_ring->count) i = 0;
2179  buffer_info = &rx_ring->buffer_info[i];
2180  }
2181 
2182  if (likely(rx_ring->next_to_use != i)) {
2183  rx_ring->next_to_use = i;
2184  if (unlikely(i-- == 0))
2185  i = (rx_ring->count - 1);
2186 
2187  /* Force memory writes to complete before letting h/w
2188  * know there are new descriptors to fetch. (Only
2189  * applicable for weak-ordered memory model archs, such
2190  * as IA-64). */
2191  wmb();
2192  IXGB_WRITE_REG(&adapter->hw, RDT, i);
2193  }
2194 }
2195 
2196 static void
2197 ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2198 {
2199  u32 ctrl;
2200 
2201  /* enable VLAN tag insert/strip */
2202  ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2203  ctrl |= IXGB_CTRL0_VME;
2204  IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2205 }
2206 
2207 static void
2208 ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2209 {
2210  u32 ctrl;
2211 
2212  /* disable VLAN tag insert/strip */
2213  ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2214  ctrl &= ~IXGB_CTRL0_VME;
2215  IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2216 }
2217 
2218 static int
2219 ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2220 {
2221  struct ixgb_adapter *adapter = netdev_priv(netdev);
2222  u32 vfta, index;
2223 
2224  /* add VID to filter table */
2225 
2226  index = (vid >> 5) & 0x7F;
2227  vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2228  vfta |= (1 << (vid & 0x1F));
2229  ixgb_write_vfta(&adapter->hw, index, vfta);
2230  set_bit(vid, adapter->active_vlans);
2231 
2232  return 0;
2233 }
2234 
2235 static int
2236 ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2237 {
2238  struct ixgb_adapter *adapter = netdev_priv(netdev);
2239  u32 vfta, index;
2240 
2241  /* remove VID from filter table */
2242 
2243  index = (vid >> 5) & 0x7F;
2244  vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2245  vfta &= ~(1 << (vid & 0x1F));
2246  ixgb_write_vfta(&adapter->hw, index, vfta);
2247  clear_bit(vid, adapter->active_vlans);
2248 
2249  return 0;
2250 }
2251 
2252 static void
2253 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2254 {
2255  u16 vid;
2256 
2257  for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2258  ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2259 }
2260 
2261 #ifdef CONFIG_NET_POLL_CONTROLLER
2262 /*
2263  * Polling 'interrupt' - used by things like netconsole to send skbs
2264  * without having to re-enable interrupts. It's not called while
2265  * the interrupt routine is executing.
2266  */
2267 
2268 static void ixgb_netpoll(struct net_device *dev)
2269 {
2270  struct ixgb_adapter *adapter = netdev_priv(dev);
2271 
2272  disable_irq(adapter->pdev->irq);
2273  ixgb_intr(adapter->pdev->irq, dev);
2274  enable_irq(adapter->pdev->irq);
2275 }
2276 #endif
2277 
2286 static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
2287  enum pci_channel_state state)
2288 {
2289  struct net_device *netdev = pci_get_drvdata(pdev);
2290  struct ixgb_adapter *adapter = netdev_priv(netdev);
2291 
2292  netif_device_detach(netdev);
2293 
2294  if (state == pci_channel_io_perm_failure)
2296 
2297  if (netif_running(netdev))
2298  ixgb_down(adapter, true);
2299 
2300  pci_disable_device(pdev);
2301 
2302  /* Request a slot reset. */
2304 }
2305 
2315 static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
2316 {
2317  struct net_device *netdev = pci_get_drvdata(pdev);
2318  struct ixgb_adapter *adapter = netdev_priv(netdev);
2319 
2320  if (pci_enable_device(pdev)) {
2321  netif_err(adapter, probe, adapter->netdev,
2322  "Cannot re-enable PCI device after reset\n");
2324  }
2325 
2326  /* Perform card reset only on one instance of the card */
2327  if (0 != PCI_FUNC (pdev->devfn))
2328  return PCI_ERS_RESULT_RECOVERED;
2329 
2330  pci_set_master(pdev);
2331 
2332  netif_carrier_off(netdev);
2333  netif_stop_queue(netdev);
2334  ixgb_reset(adapter);
2335 
2336  /* Make sure the EEPROM is good */
2337  if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2338  netif_err(adapter, probe, adapter->netdev,
2339  "After reset, the EEPROM checksum is not valid\n");
2341  }
2342  ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2343  memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2344 
2345  if (!is_valid_ether_addr(netdev->perm_addr)) {
2346  netif_err(adapter, probe, adapter->netdev,
2347  "After reset, invalid MAC address\n");
2349  }
2350 
2351  return PCI_ERS_RESULT_RECOVERED;
2352 }
2353 
2362 static void ixgb_io_resume(struct pci_dev *pdev)
2363 {
2364  struct net_device *netdev = pci_get_drvdata(pdev);
2365  struct ixgb_adapter *adapter = netdev_priv(netdev);
2366 
2367  pci_set_master(pdev);
2368 
2369  if (netif_running(netdev)) {
2370  if (ixgb_up(adapter)) {
2371  pr_err("can't bring device back up after reset\n");
2372  return;
2373  }
2374  }
2375 
2376  netif_device_attach(netdev);
2377  mod_timer(&adapter->watchdog_timer, jiffies);
2378 }
2379 
2380 /* ixgb_main.c */