Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
e1000_main.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel PRO/1000 Linux driver
4  Copyright(c) 1999 - 2006 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  Linux NICS <[email protected]>
24  e1000-devel Mailing List <[email protected]>
25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35 
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41 
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
87  /* required last entry */
88  {0,}
89 };
90 
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92 
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102  struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104  struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106  struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108  struct e1000_rx_ring *rx_ring);
110 
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void __devexit e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125  struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127  struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133  struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139  struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142  struct e1000_rx_ring *rx_ring,
143  int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145  struct e1000_rx_ring *rx_ring,
146  int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148  struct e1000_rx_ring *rx_ring,
149  int cleaned_count);
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151  struct e1000_rx_ring *rx_ring,
152  int cleaned_count);
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155  int cmd);
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162  struct sk_buff *skb);
163 
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
167 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168  bool filter_on);
169 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
171 static void e1000_restore_vlan(struct e1000_adapter *adapter);
172 
173 #ifdef CONFIG_PM
174 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
175 static int e1000_resume(struct pci_dev *pdev);
176 #endif
177 static void e1000_shutdown(struct pci_dev *pdev);
178 
179 #ifdef CONFIG_NET_POLL_CONTROLLER
180 /* for netdump / net console */
181 static void e1000_netpoll (struct net_device *netdev);
182 #endif
183 
184 #define COPYBREAK_DEFAULT 256
185 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186 module_param(copybreak, uint, 0644);
188  "Maximum size of packet that is copied to a new buffer on receive");
189 
190 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
192 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193 static void e1000_io_resume(struct pci_dev *pdev);
194 
195 static const struct pci_error_handlers e1000_err_handler = {
196  .error_detected = e1000_io_error_detected,
197  .slot_reset = e1000_io_slot_reset,
198  .resume = e1000_io_resume,
199 };
200 
201 static struct pci_driver e1000_driver = {
202  .name = e1000_driver_name,
203  .id_table = e1000_pci_tbl,
204  .probe = e1000_probe,
205  .remove = __devexit_p(e1000_remove),
206 #ifdef CONFIG_PM
207  /* Power Management Hooks */
208  .suspend = e1000_suspend,
209  .resume = e1000_resume,
210 #endif
211  .shutdown = e1000_shutdown,
212  .err_handler = &e1000_err_handler
213 };
214 
215 MODULE_AUTHOR("Intel Corporation, <[email protected]>");
216 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217 MODULE_LICENSE("GPL");
219 
220 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221 static int debug = -1;
222 module_param(debug, int, 0);
223 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224 
231 {
232  struct e1000_adapter *adapter = hw->back;
233  return adapter->netdev;
234 }
235 
243 static int __init e1000_init_module(void)
244 {
245  int ret;
246  pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
247 
248  pr_info("%s\n", e1000_copyright);
249 
250  ret = pci_register_driver(&e1000_driver);
251  if (copybreak != COPYBREAK_DEFAULT) {
252  if (copybreak == 0)
253  pr_info("copybreak disabled\n");
254  else
255  pr_info("copybreak enabled for "
256  "packets <= %u bytes\n", copybreak);
257  }
258  return ret;
259 }
260 
261 module_init(e1000_init_module);
262 
270 static void __exit e1000_exit_module(void)
271 {
272  pci_unregister_driver(&e1000_driver);
273 }
274 
275 module_exit(e1000_exit_module);
276 
277 static int e1000_request_irq(struct e1000_adapter *adapter)
278 {
279  struct net_device *netdev = adapter->netdev;
280  irq_handler_t handler = e1000_intr;
281  int irq_flags = IRQF_SHARED;
282  int err;
283 
284  err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285  netdev);
286  if (err) {
287  e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
288  }
289 
290  return err;
291 }
292 
293 static void e1000_free_irq(struct e1000_adapter *adapter)
294 {
295  struct net_device *netdev = adapter->netdev;
296 
297  free_irq(adapter->pdev->irq, netdev);
298 }
299 
305 static void e1000_irq_disable(struct e1000_adapter *adapter)
306 {
307  struct e1000_hw *hw = &adapter->hw;
308 
309  ew32(IMC, ~0);
311  synchronize_irq(adapter->pdev->irq);
312 }
313 
319 static void e1000_irq_enable(struct e1000_adapter *adapter)
320 {
321  struct e1000_hw *hw = &adapter->hw;
322 
323  ew32(IMS, IMS_ENABLE_MASK);
325 }
326 
327 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
328 {
329  struct e1000_hw *hw = &adapter->hw;
330  struct net_device *netdev = adapter->netdev;
331  u16 vid = hw->mng_cookie.vlan_id;
332  u16 old_vid = adapter->mng_vlan_id;
333 
334  if (!e1000_vlan_used(adapter))
335  return;
336 
337  if (!test_bit(vid, adapter->active_vlans)) {
338  if (hw->mng_cookie.status &
340  e1000_vlan_rx_add_vid(netdev, vid);
341  adapter->mng_vlan_id = vid;
342  } else {
343  adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
344  }
345  if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
346  (vid != old_vid) &&
347  !test_bit(old_vid, adapter->active_vlans))
348  e1000_vlan_rx_kill_vid(netdev, old_vid);
349  } else {
350  adapter->mng_vlan_id = vid;
351  }
352 }
353 
354 static void e1000_init_manageability(struct e1000_adapter *adapter)
355 {
356  struct e1000_hw *hw = &adapter->hw;
357 
358  if (adapter->en_mng_pt) {
359  u32 manc = er32(MANC);
360 
361  /* disable hardware interception of ARP */
362  manc &= ~(E1000_MANC_ARP_EN);
363 
364  ew32(MANC, manc);
365  }
366 }
367 
368 static void e1000_release_manageability(struct e1000_adapter *adapter)
369 {
370  struct e1000_hw *hw = &adapter->hw;
371 
372  if (adapter->en_mng_pt) {
373  u32 manc = er32(MANC);
374 
375  /* re-enable hardware interception of ARP */
376  manc |= E1000_MANC_ARP_EN;
377 
378  ew32(MANC, manc);
379  }
380 }
381 
386 static void e1000_configure(struct e1000_adapter *adapter)
387 {
388  struct net_device *netdev = adapter->netdev;
389  int i;
390 
391  e1000_set_rx_mode(netdev);
392 
393  e1000_restore_vlan(adapter);
394  e1000_init_manageability(adapter);
395 
396  e1000_configure_tx(adapter);
397  e1000_setup_rctl(adapter);
398  e1000_configure_rx(adapter);
399  /* call E1000_DESC_UNUSED which always leaves
400  * at least 1 descriptor unused to make sure
401  * next_to_use != next_to_clean */
402  for (i = 0; i < adapter->num_rx_queues; i++) {
403  struct e1000_rx_ring *ring = &adapter->rx_ring[i];
404  adapter->alloc_rx_buf(adapter, ring,
405  E1000_DESC_UNUSED(ring));
406  }
407 }
408 
409 int e1000_up(struct e1000_adapter *adapter)
410 {
411  struct e1000_hw *hw = &adapter->hw;
412 
413  /* hardware has been reset, we need to reload some things */
414  e1000_configure(adapter);
415 
416  clear_bit(__E1000_DOWN, &adapter->flags);
417 
418  napi_enable(&adapter->napi);
419 
420  e1000_irq_enable(adapter);
421 
422  netif_wake_queue(adapter->netdev);
423 
424  /* fire a link change interrupt to start the watchdog */
426  return 0;
427 }
428 
439 void e1000_power_up_phy(struct e1000_adapter *adapter)
440 {
441  struct e1000_hw *hw = &adapter->hw;
442  u16 mii_reg = 0;
443 
444  /* Just clear the power down bit to wake the phy back up */
445  if (hw->media_type == e1000_media_type_copper) {
446  /* according to the manual, the phy will retain its
447  * settings across a power-down/up cycle */
448  e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
449  mii_reg &= ~MII_CR_POWER_DOWN;
450  e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
451  }
452 }
453 
454 static void e1000_power_down_phy(struct e1000_adapter *adapter)
455 {
456  struct e1000_hw *hw = &adapter->hw;
457 
458  /* Power down the PHY so no link is implied when interface is down *
459  * The PHY cannot be powered down if any of the following is true *
460  * (a) WoL is enabled
461  * (b) AMT is active
462  * (c) SoL/IDER session is active */
463  if (!adapter->wol && hw->mac_type >= e1000_82540 &&
465  u16 mii_reg = 0;
466 
467  switch (hw->mac_type) {
468  case e1000_82540:
469  case e1000_82545:
470  case e1000_82545_rev_3:
471  case e1000_82546:
472  case e1000_ce4100:
473  case e1000_82546_rev_3:
474  case e1000_82541:
475  case e1000_82541_rev_2:
476  case e1000_82547:
477  case e1000_82547_rev_2:
478  if (er32(MANC) & E1000_MANC_SMBUS_EN)
479  goto out;
480  break;
481  default:
482  goto out;
483  }
484  e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
485  mii_reg |= MII_CR_POWER_DOWN;
486  e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
487  msleep(1);
488  }
489 out:
490  return;
491 }
492 
493 static void e1000_down_and_stop(struct e1000_adapter *adapter)
494 {
495  set_bit(__E1000_DOWN, &adapter->flags);
496 
497  /* Only kill reset task if adapter is not resetting */
498  if (!test_bit(__E1000_RESETTING, &adapter->flags))
499  cancel_work_sync(&adapter->reset_task);
500 
504 }
505 
506 void e1000_down(struct e1000_adapter *adapter)
507 {
508  struct e1000_hw *hw = &adapter->hw;
509  struct net_device *netdev = adapter->netdev;
510  u32 rctl, tctl;
511 
512 
513  /* disable receives in the hardware */
514  rctl = er32(RCTL);
515  ew32(RCTL, rctl & ~E1000_RCTL_EN);
516  /* flush and sleep below */
517 
518  netif_tx_disable(netdev);
519 
520  /* disable transmits in the hardware */
521  tctl = er32(TCTL);
522  tctl &= ~E1000_TCTL_EN;
523  ew32(TCTL, tctl);
524  /* flush both disables and wait for them to finish */
526  msleep(10);
527 
528  napi_disable(&adapter->napi);
529 
530  e1000_irq_disable(adapter);
531 
532  /*
533  * Setting DOWN must be after irq_disable to prevent
534  * a screaming interrupt. Setting DOWN also prevents
535  * tasks from rescheduling.
536  */
537  e1000_down_and_stop(adapter);
538 
539  adapter->link_speed = 0;
540  adapter->link_duplex = 0;
541  netif_carrier_off(netdev);
542 
543  e1000_reset(adapter);
544  e1000_clean_all_tx_rings(adapter);
545  e1000_clean_all_rx_rings(adapter);
546 }
547 
548 static void e1000_reinit_safe(struct e1000_adapter *adapter)
549 {
550  while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
551  msleep(1);
552  mutex_lock(&adapter->mutex);
553  e1000_down(adapter);
554  e1000_up(adapter);
555  mutex_unlock(&adapter->mutex);
556  clear_bit(__E1000_RESETTING, &adapter->flags);
557 }
558 
559 void e1000_reinit_locked(struct e1000_adapter *adapter)
560 {
561  /* if rtnl_lock is not held the call path is bogus */
562  ASSERT_RTNL();
564  while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
565  msleep(1);
566  e1000_down(adapter);
567  e1000_up(adapter);
568  clear_bit(__E1000_RESETTING, &adapter->flags);
569 }
570 
571 void e1000_reset(struct e1000_adapter *adapter)
572 {
573  struct e1000_hw *hw = &adapter->hw;
574  u32 pba = 0, tx_space, min_tx_space, min_rx_space;
575  bool legacy_pba_adjust = false;
576  u16 hwm;
577 
578  /* Repartition Pba for greater than 9k mtu
579  * To take effect CTRL.RST is required.
580  */
581 
582  switch (hw->mac_type) {
583  case e1000_82542_rev2_0:
584  case e1000_82542_rev2_1:
585  case e1000_82543:
586  case e1000_82544:
587  case e1000_82540:
588  case e1000_82541:
589  case e1000_82541_rev_2:
590  legacy_pba_adjust = true;
591  pba = E1000_PBA_48K;
592  break;
593  case e1000_82545:
594  case e1000_82545_rev_3:
595  case e1000_82546:
596  case e1000_ce4100:
597  case e1000_82546_rev_3:
598  pba = E1000_PBA_48K;
599  break;
600  case e1000_82547:
601  case e1000_82547_rev_2:
602  legacy_pba_adjust = true;
603  pba = E1000_PBA_30K;
604  break;
605  case e1000_undefined:
606  case e1000_num_macs:
607  break;
608  }
609 
610  if (legacy_pba_adjust) {
612  pba -= 8; /* allocate more FIFO for Tx */
613 
614  if (hw->mac_type == e1000_82547) {
615  adapter->tx_fifo_head = 0;
616  adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
617  adapter->tx_fifo_size =
619  atomic_set(&adapter->tx_fifo_stall, 0);
620  }
621  } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
622  /* adjust PBA for jumbo frames */
623  ew32(PBA, pba);
624 
625  /* To maintain wire speed transmits, the Tx FIFO should be
626  * large enough to accommodate two full transmit packets,
627  * rounded up to the next 1KB and expressed in KB. Likewise,
628  * the Rx FIFO should be large enough to accommodate at least
629  * one full receive packet and is similarly rounded up and
630  * expressed in KB. */
631  pba = er32(PBA);
632  /* upper 16 bits has Tx packet buffer allocation size in KB */
633  tx_space = pba >> 16;
634  /* lower 16 bits has Rx packet buffer allocation size in KB */
635  pba &= 0xffff;
636  /*
637  * the tx fifo also stores 16 bytes of information about the tx
638  * but don't include ethernet FCS because hardware appends it
639  */
640  min_tx_space = (hw->max_frame_size +
641  sizeof(struct e1000_tx_desc) -
642  ETH_FCS_LEN) * 2;
643  min_tx_space = ALIGN(min_tx_space, 1024);
644  min_tx_space >>= 10;
645  /* software strips receive CRC, so leave room for it */
646  min_rx_space = hw->max_frame_size;
647  min_rx_space = ALIGN(min_rx_space, 1024);
648  min_rx_space >>= 10;
649 
650  /* If current Tx allocation is less than the min Tx FIFO size,
651  * and the min Tx FIFO size is less than the current Rx FIFO
652  * allocation, take space away from current Rx allocation */
653  if (tx_space < min_tx_space &&
654  ((min_tx_space - tx_space) < pba)) {
655  pba = pba - (min_tx_space - tx_space);
656 
657  /* PCI/PCIx hardware has PBA alignment constraints */
658  switch (hw->mac_type) {
660  pba &= ~(E1000_PBA_8K - 1);
661  break;
662  default:
663  break;
664  }
665 
666  /* if short on rx space, rx wins and must trump tx
667  * adjustment or use Early Receive if available */
668  if (pba < min_rx_space)
669  pba = min_rx_space;
670  }
671  }
672 
673  ew32(PBA, pba);
674 
675  /*
676  * flow control settings:
677  * The high water mark must be low enough to fit one full frame
678  * (or the size used for early receive) above it in the Rx FIFO.
679  * Set it to the lower of:
680  * - 90% of the Rx FIFO size, and
681  * - the full Rx FIFO size minus the early receive size (for parts
682  * with ERT support assuming ERT set to E1000_ERT_2048), or
683  * - the full Rx FIFO size minus one full frame
684  */
685  hwm = min(((pba << 10) * 9 / 10),
686  ((pba << 10) - hw->max_frame_size));
687 
688  hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
689  hw->fc_low_water = hw->fc_high_water - 8;
691  hw->fc_send_xon = 1;
692  hw->fc = hw->original_fc;
693 
694  /* Allow time for pending master requests to run */
695  e1000_reset_hw(hw);
696  if (hw->mac_type >= e1000_82544)
697  ew32(WUC, 0);
698 
699  if (e1000_init_hw(hw))
700  e_dev_err("Hardware Error\n");
701  e1000_update_mng_vlan(adapter);
702 
703  /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
704  if (hw->mac_type >= e1000_82544 &&
705  hw->autoneg == 1 &&
707  u32 ctrl = er32(CTRL);
708  /* clear phy power management bit if we are in gig only mode,
709  * which if enabled will attempt negotiation to 100Mb, which
710  * can cause a loss of link at power off or driver unload */
711  ctrl &= ~E1000_CTRL_SWDPIN3;
712  ew32(CTRL, ctrl);
713  }
714 
715  /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
717 
719  e1000_phy_get_info(hw, &adapter->phy_info);
720 
721  e1000_release_manageability(adapter);
722 }
723 
724 /* Dump the eeprom for users having checksum issues */
725 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
726 {
727  struct net_device *netdev = adapter->netdev;
728  struct ethtool_eeprom eeprom;
729  const struct ethtool_ops *ops = netdev->ethtool_ops;
730  u8 *data;
731  int i;
732  u16 csum_old, csum_new = 0;
733 
734  eeprom.len = ops->get_eeprom_len(netdev);
735  eeprom.offset = 0;
736 
737  data = kmalloc(eeprom.len, GFP_KERNEL);
738  if (!data)
739  return;
740 
741  ops->get_eeprom(netdev, &eeprom, data);
742 
743  csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
744  (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
745  for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
746  csum_new += data[i] + (data[i + 1] << 8);
747  csum_new = EEPROM_SUM - csum_new;
748 
749  pr_err("/*********************/\n");
750  pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
751  pr_err("Calculated : 0x%04x\n", csum_new);
752 
753  pr_err("Offset Values\n");
754  pr_err("======== ======\n");
755  print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
756 
757  pr_err("Include this output when contacting your support provider.\n");
758  pr_err("This is not a software error! Something bad happened to\n");
759  pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
760  pr_err("result in further problems, possibly loss of data,\n");
761  pr_err("corruption or system hangs!\n");
762  pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
763  pr_err("which is invalid and requires you to set the proper MAC\n");
764  pr_err("address manually before continuing to enable this network\n");
765  pr_err("device. Please inspect the EEPROM dump and report the\n");
766  pr_err("issue to your hardware vendor or Intel Customer Support.\n");
767  pr_err("/*********************/\n");
768 
769  kfree(data);
770 }
771 
778 static int e1000_is_need_ioport(struct pci_dev *pdev)
779 {
780  switch (pdev->device) {
802  return true;
803  default:
804  return false;
805  }
806 }
807 
808 static netdev_features_t e1000_fix_features(struct net_device *netdev,
810 {
811  /*
812  * Since there is no support for separate rx/tx vlan accel
813  * enable/disable make sure tx flag is always in same state as rx.
814  */
815  if (features & NETIF_F_HW_VLAN_RX)
816  features |= NETIF_F_HW_VLAN_TX;
817  else
818  features &= ~NETIF_F_HW_VLAN_TX;
819 
820  return features;
821 }
822 
823 static int e1000_set_features(struct net_device *netdev,
824  netdev_features_t features)
825 {
826  struct e1000_adapter *adapter = netdev_priv(netdev);
827  netdev_features_t changed = features ^ netdev->features;
828 
829  if (changed & NETIF_F_HW_VLAN_RX)
830  e1000_vlan_mode(netdev, features);
831 
832  if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
833  return 0;
834 
835  netdev->features = features;
836  adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
837 
838  if (netif_running(netdev))
839  e1000_reinit_locked(adapter);
840  else
841  e1000_reset(adapter);
842 
843  return 0;
844 }
845 
846 static const struct net_device_ops e1000_netdev_ops = {
847  .ndo_open = e1000_open,
848  .ndo_stop = e1000_close,
849  .ndo_start_xmit = e1000_xmit_frame,
850  .ndo_get_stats = e1000_get_stats,
851  .ndo_set_rx_mode = e1000_set_rx_mode,
852  .ndo_set_mac_address = e1000_set_mac,
853  .ndo_tx_timeout = e1000_tx_timeout,
854  .ndo_change_mtu = e1000_change_mtu,
855  .ndo_do_ioctl = e1000_ioctl,
856  .ndo_validate_addr = eth_validate_addr,
857  .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
858  .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
859 #ifdef CONFIG_NET_POLL_CONTROLLER
860  .ndo_poll_controller = e1000_netpoll,
861 #endif
862  .ndo_fix_features = e1000_fix_features,
863  .ndo_set_features = e1000_set_features,
864 };
865 
877 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
878  struct e1000_hw *hw)
879 {
880  struct pci_dev *pdev = adapter->pdev;
881 
882  /* PCI config space info */
883  hw->vendor_id = pdev->vendor;
884  hw->device_id = pdev->device;
886  hw->subsystem_id = pdev->subsystem_device;
887  hw->revision_id = pdev->revision;
888 
889  pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
890 
891  hw->max_frame_size = adapter->netdev->mtu +
894 
895  /* identify the MAC */
896  if (e1000_set_mac_type(hw)) {
897  e_err(probe, "Unknown MAC Type\n");
898  return -EIO;
899  }
900 
901  switch (hw->mac_type) {
902  default:
903  break;
904  case e1000_82541:
905  case e1000_82547:
906  case e1000_82541_rev_2:
907  case e1000_82547_rev_2:
908  hw->phy_init_script = 1;
909  break;
910  }
911 
913  e1000_get_bus_info(hw);
914 
915  hw->wait_autoneg_complete = false;
916  hw->tbi_compatibility_en = true;
917  hw->adaptive_ifs = true;
918 
919  /* Copper options */
920 
921  if (hw->media_type == e1000_media_type_copper) {
922  hw->mdix = AUTO_ALL_MODES;
923  hw->disable_polarity_correction = false;
925  }
926 
927  return 0;
928 }
929 
941 static int __devinit e1000_probe(struct pci_dev *pdev,
942  const struct pci_device_id *ent)
943 {
944  struct net_device *netdev;
945  struct e1000_adapter *adapter;
946  struct e1000_hw *hw;
947 
948  static int cards_found = 0;
949  static int global_quad_port_a = 0; /* global ksp3 port a indication */
950  int i, err, pci_using_dac;
951  u16 eeprom_data = 0;
952  u16 tmp = 0;
953  u16 eeprom_apme_mask = E1000_EEPROM_APME;
954  int bars, need_ioport;
955 
956  /* do not allocate ioport bars when not needed */
957  need_ioport = e1000_is_need_ioport(pdev);
958  if (need_ioport) {
960  err = pci_enable_device(pdev);
961  } else {
962  bars = pci_select_bars(pdev, IORESOURCE_MEM);
963  err = pci_enable_device_mem(pdev);
964  }
965  if (err)
966  return err;
967 
969  if (err)
970  goto err_pci_reg;
971 
972  pci_set_master(pdev);
973  err = pci_save_state(pdev);
974  if (err)
975  goto err_alloc_etherdev;
976 
977  err = -ENOMEM;
978  netdev = alloc_etherdev(sizeof(struct e1000_adapter));
979  if (!netdev)
980  goto err_alloc_etherdev;
981 
982  SET_NETDEV_DEV(netdev, &pdev->dev);
983 
984  pci_set_drvdata(pdev, netdev);
985  adapter = netdev_priv(netdev);
986  adapter->netdev = netdev;
987  adapter->pdev = pdev;
988  adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
989  adapter->bars = bars;
990  adapter->need_ioport = need_ioport;
991 
992  hw = &adapter->hw;
993  hw->back = adapter;
994 
995  err = -EIO;
996  hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
997  if (!hw->hw_addr)
998  goto err_ioremap;
999 
1000  if (adapter->need_ioport) {
1001  for (i = BAR_1; i <= BAR_5; i++) {
1002  if (pci_resource_len(pdev, i) == 0)
1003  continue;
1004  if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1005  hw->io_base = pci_resource_start(pdev, i);
1006  break;
1007  }
1008  }
1009  }
1010 
1011  /* make ready for any if (hw->...) below */
1012  err = e1000_init_hw_struct(adapter, hw);
1013  if (err)
1014  goto err_sw_init;
1015 
1016  /*
1017  * there is a workaround being applied below that limits
1018  * 64-bit DMA addresses to 64-bit hardware. There are some
1019  * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1020  */
1021  pci_using_dac = 0;
1022  if ((hw->bus_type == e1000_bus_type_pcix) &&
1023  !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1024  /*
1025  * according to DMA-API-HOWTO, coherent calls will always
1026  * succeed if the set call did
1027  */
1028  dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1029  pci_using_dac = 1;
1030  } else {
1031  err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1032  if (err) {
1033  pr_err("No usable DMA config, aborting\n");
1034  goto err_dma;
1035  }
1036  dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1037  }
1038 
1039  netdev->netdev_ops = &e1000_netdev_ops;
1040  e1000_set_ethtool_ops(netdev);
1041  netdev->watchdog_timeo = 5 * HZ;
1042  netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1043 
1044  strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1045 
1046  adapter->bd_number = cards_found;
1047 
1048  /* setup the private structure */
1049 
1050  err = e1000_sw_init(adapter);
1051  if (err)
1052  goto err_sw_init;
1053 
1054  err = -EIO;
1055  if (hw->mac_type == e1000_ce4100) {
1058  pci_resource_len(pdev, BAR_1));
1059 
1060  if (!hw->ce4100_gbe_mdio_base_virt)
1061  goto err_mdio_ioremap;
1062  }
1063 
1064  if (hw->mac_type >= e1000_82543) {
1065  netdev->hw_features = NETIF_F_SG |
1066  NETIF_F_HW_CSUM |
1068  netdev->features = NETIF_F_HW_VLAN_TX |
1070  }
1071 
1072  if ((hw->mac_type >= e1000_82544) &&
1073  (hw->mac_type != e1000_82547))
1074  netdev->hw_features |= NETIF_F_TSO;
1075 
1076  netdev->priv_flags |= IFF_SUPP_NOFCS;
1077 
1078  netdev->features |= netdev->hw_features;
1079  netdev->hw_features |= (NETIF_F_RXCSUM |
1080  NETIF_F_RXALL |
1081  NETIF_F_RXFCS);
1082 
1083  if (pci_using_dac) {
1084  netdev->features |= NETIF_F_HIGHDMA;
1085  netdev->vlan_features |= NETIF_F_HIGHDMA;
1086  }
1087 
1088  netdev->vlan_features |= (NETIF_F_TSO |
1089  NETIF_F_HW_CSUM |
1090  NETIF_F_SG);
1091 
1092  netdev->priv_flags |= IFF_UNICAST_FLT;
1093 
1094  adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1095 
1096  /* initialize eeprom parameters */
1097  if (e1000_init_eeprom_params(hw)) {
1098  e_err(probe, "EEPROM initialization failed\n");
1099  goto err_eeprom;
1100  }
1101 
1102  /* before reading the EEPROM, reset the controller to
1103  * put the device in a known good starting state */
1104 
1105  e1000_reset_hw(hw);
1106 
1107  /* make sure the EEPROM is good */
1108  if (e1000_validate_eeprom_checksum(hw) < 0) {
1109  e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1110  e1000_dump_eeprom(adapter);
1111  /*
1112  * set MAC address to all zeroes to invalidate and temporary
1113  * disable this device for the user. This blocks regular
1114  * traffic while still permitting ethtool ioctls from reaching
1115  * the hardware as well as allowing the user to run the
1116  * interface after manually setting a hw addr using
1117  * `ip set address`
1118  */
1119  memset(hw->mac_addr, 0, netdev->addr_len);
1120  } else {
1121  /* copy the MAC address out of the EEPROM */
1122  if (e1000_read_mac_addr(hw))
1123  e_err(probe, "EEPROM Read Error\n");
1124  }
1125  /* don't block initalization here due to bad MAC address */
1126  memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1127  memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1128 
1129  if (!is_valid_ether_addr(netdev->perm_addr))
1130  e_err(probe, "Invalid MAC Address\n");
1131 
1132 
1133  INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1135  e1000_82547_tx_fifo_stall_task);
1136  INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1137  INIT_WORK(&adapter->reset_task, e1000_reset_task);
1138 
1139  e1000_check_options(adapter);
1140 
1141  /* Initial Wake on LAN setting
1142  * If APM wake is enabled in the EEPROM,
1143  * enable the ACPI Magic Packet filter
1144  */
1145 
1146  switch (hw->mac_type) {
1147  case e1000_82542_rev2_0:
1148  case e1000_82542_rev2_1:
1149  case e1000_82543:
1150  break;
1151  case e1000_82544:
1152  e1000_read_eeprom(hw,
1153  EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1154  eeprom_apme_mask = E1000_EEPROM_82544_APM;
1155  break;
1156  case e1000_82546:
1157  case e1000_82546_rev_3:
1159  e1000_read_eeprom(hw,
1160  EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1161  break;
1162  }
1163  /* Fall Through */
1164  default:
1165  e1000_read_eeprom(hw,
1166  EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1167  break;
1168  }
1169  if (eeprom_data & eeprom_apme_mask)
1170  adapter->eeprom_wol |= E1000_WUFC_MAG;
1171 
1172  /* now that we have the eeprom settings, apply the special cases
1173  * where the eeprom may be wrong or the board simply won't support
1174  * wake on lan on a particular port */
1175  switch (pdev->device) {
1177  adapter->eeprom_wol = 0;
1178  break;
1181  /* Wake events only supported on port A for dual fiber
1182  * regardless of eeprom setting */
1184  adapter->eeprom_wol = 0;
1185  break;
1187  /* if quad port adapter, disable WoL on all but port A */
1188  if (global_quad_port_a != 0)
1189  adapter->eeprom_wol = 0;
1190  else
1191  adapter->quad_port_a = true;
1192  /* Reset for multiple quad port adapters */
1193  if (++global_quad_port_a == 4)
1194  global_quad_port_a = 0;
1195  break;
1196  }
1197 
1198  /* initialize the wol settings based on the eeprom settings */
1199  adapter->wol = adapter->eeprom_wol;
1200  device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1201 
1202  /* Auto detect PHY address */
1203  if (hw->mac_type == e1000_ce4100) {
1204  for (i = 0; i < 32; i++) {
1205  hw->phy_addr = i;
1206  e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1207  if (tmp == 0 || tmp == 0xFF) {
1208  if (i == 31)
1209  goto err_eeprom;
1210  continue;
1211  } else
1212  break;
1213  }
1214  }
1215 
1216  /* reset the hardware with the new settings */
1217  e1000_reset(adapter);
1218 
1219  strcpy(netdev->name, "eth%d");
1220  err = register_netdev(netdev);
1221  if (err)
1222  goto err_register;
1223 
1224  e1000_vlan_filter_on_off(adapter, false);
1225 
1226  /* print bus type/speed/width info */
1227  e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1228  ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1229  ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1230  (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1231  (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1232  (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1233  ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1234  netdev->dev_addr);
1235 
1236  /* carrier off reporting is important to ethtool even BEFORE open */
1237  netif_carrier_off(netdev);
1238 
1239  e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1240 
1241  cards_found++;
1242  return 0;
1243 
1244 err_register:
1245 err_eeprom:
1246  e1000_phy_hw_reset(hw);
1247 
1248  if (hw->flash_address)
1249  iounmap(hw->flash_address);
1250  kfree(adapter->tx_ring);
1251  kfree(adapter->rx_ring);
1252 err_dma:
1253 err_sw_init:
1254 err_mdio_ioremap:
1256  iounmap(hw->hw_addr);
1257 err_ioremap:
1258  free_netdev(netdev);
1259 err_alloc_etherdev:
1260  pci_release_selected_regions(pdev, bars);
1261 err_pci_reg:
1262  pci_disable_device(pdev);
1263  return err;
1264 }
1265 
1276 static void __devexit e1000_remove(struct pci_dev *pdev)
1277 {
1278  struct net_device *netdev = pci_get_drvdata(pdev);
1279  struct e1000_adapter *adapter = netdev_priv(netdev);
1280  struct e1000_hw *hw = &adapter->hw;
1281 
1282  e1000_down_and_stop(adapter);
1283  e1000_release_manageability(adapter);
1284 
1285  unregister_netdev(netdev);
1286 
1287  e1000_phy_hw_reset(hw);
1288 
1289  kfree(adapter->tx_ring);
1290  kfree(adapter->rx_ring);
1291 
1292  if (hw->mac_type == e1000_ce4100)
1294  iounmap(hw->hw_addr);
1295  if (hw->flash_address)
1296  iounmap(hw->flash_address);
1297  pci_release_selected_regions(pdev, adapter->bars);
1298 
1299  free_netdev(netdev);
1300 
1301  pci_disable_device(pdev);
1302 }
1303 
1312 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1313 {
1315 
1316  adapter->num_tx_queues = 1;
1317  adapter->num_rx_queues = 1;
1318 
1319  if (e1000_alloc_queues(adapter)) {
1320  e_err(probe, "Unable to allocate memory for queues\n");
1321  return -ENOMEM;
1322  }
1323 
1324  /* Explicitly disable IRQ since the NIC can be in any state. */
1325  e1000_irq_disable(adapter);
1326 
1327  spin_lock_init(&adapter->stats_lock);
1328  mutex_init(&adapter->mutex);
1329 
1330  set_bit(__E1000_DOWN, &adapter->flags);
1331 
1332  return 0;
1333 }
1334 
1343 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1344 {
1345  adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1346  sizeof(struct e1000_tx_ring), GFP_KERNEL);
1347  if (!adapter->tx_ring)
1348  return -ENOMEM;
1349 
1350  adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1351  sizeof(struct e1000_rx_ring), GFP_KERNEL);
1352  if (!adapter->rx_ring) {
1353  kfree(adapter->tx_ring);
1354  return -ENOMEM;
1355  }
1356 
1357  return E1000_SUCCESS;
1358 }
1359 
1373 static int e1000_open(struct net_device *netdev)
1374 {
1375  struct e1000_adapter *adapter = netdev_priv(netdev);
1376  struct e1000_hw *hw = &adapter->hw;
1377  int err;
1378 
1379  /* disallow open during test */
1380  if (test_bit(__E1000_TESTING, &adapter->flags))
1381  return -EBUSY;
1382 
1383  netif_carrier_off(netdev);
1384 
1385  /* allocate transmit descriptors */
1386  err = e1000_setup_all_tx_resources(adapter);
1387  if (err)
1388  goto err_setup_tx;
1389 
1390  /* allocate receive descriptors */
1391  err = e1000_setup_all_rx_resources(adapter);
1392  if (err)
1393  goto err_setup_rx;
1394 
1395  e1000_power_up_phy(adapter);
1396 
1397  adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1398  if ((hw->mng_cookie.status &
1400  e1000_update_mng_vlan(adapter);
1401  }
1402 
1403  /* before we allocate an interrupt, we must be ready to handle it.
1404  * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1405  * as soon as we call pci_request_irq, so we have to setup our
1406  * clean_rx handler before we do so. */
1407  e1000_configure(adapter);
1408 
1409  err = e1000_request_irq(adapter);
1410  if (err)
1411  goto err_req_irq;
1412 
1413  /* From here on the code is the same as e1000_up() */
1414  clear_bit(__E1000_DOWN, &adapter->flags);
1415 
1416  napi_enable(&adapter->napi);
1417 
1418  e1000_irq_enable(adapter);
1419 
1420  netif_start_queue(netdev);
1421 
1422  /* fire a link status change interrupt to start the watchdog */
1424 
1425  return E1000_SUCCESS;
1426 
1427 err_req_irq:
1428  e1000_power_down_phy(adapter);
1429  e1000_free_all_rx_resources(adapter);
1430 err_setup_rx:
1431  e1000_free_all_tx_resources(adapter);
1432 err_setup_tx:
1433  e1000_reset(adapter);
1434 
1435  return err;
1436 }
1437 
1450 static int e1000_close(struct net_device *netdev)
1451 {
1452  struct e1000_adapter *adapter = netdev_priv(netdev);
1453  struct e1000_hw *hw = &adapter->hw;
1454 
1455  WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1456  e1000_down(adapter);
1457  e1000_power_down_phy(adapter);
1458  e1000_free_irq(adapter);
1459 
1460  e1000_free_all_tx_resources(adapter);
1461  e1000_free_all_rx_resources(adapter);
1462 
1463  /* kill manageability vlan ID if supported, but not if a vlan with
1464  * the same ID is registered on the host OS (let 8021q kill it) */
1465  if ((hw->mng_cookie.status &
1467  !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1468  e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1469  }
1470 
1471  return 0;
1472 }
1473 
1480 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1481  unsigned long len)
1482 {
1483  struct e1000_hw *hw = &adapter->hw;
1484  unsigned long begin = (unsigned long)start;
1485  unsigned long end = begin + len;
1486 
1487  /* First rev 82545 and 82546 need to not allow any memory
1488  * write location to cross 64k boundary due to errata 23 */
1489  if (hw->mac_type == e1000_82545 ||
1490  hw->mac_type == e1000_ce4100 ||
1491  hw->mac_type == e1000_82546) {
1492  return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1493  }
1494 
1495  return true;
1496 }
1497 
1506 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1507  struct e1000_tx_ring *txdr)
1508 {
1509  struct pci_dev *pdev = adapter->pdev;
1510  int size;
1511 
1512  size = sizeof(struct e1000_buffer) * txdr->count;
1513  txdr->buffer_info = vzalloc(size);
1514  if (!txdr->buffer_info) {
1515  e_err(probe, "Unable to allocate memory for the Tx descriptor "
1516  "ring\n");
1517  return -ENOMEM;
1518  }
1519 
1520  /* round up to nearest 4K */
1521 
1522  txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1523  txdr->size = ALIGN(txdr->size, 4096);
1524 
1525  txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1526  GFP_KERNEL);
1527  if (!txdr->desc) {
1528 setup_tx_desc_die:
1529  vfree(txdr->buffer_info);
1530  e_err(probe, "Unable to allocate memory for the Tx descriptor "
1531  "ring\n");
1532  return -ENOMEM;
1533  }
1534 
1535  /* Fix for errata 23, can't cross 64kB boundary */
1536  if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1537  void *olddesc = txdr->desc;
1538  dma_addr_t olddma = txdr->dma;
1539  e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1540  txdr->size, txdr->desc);
1541  /* Try again, without freeing the previous */
1542  txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1543  &txdr->dma, GFP_KERNEL);
1544  /* Failed allocation, critical failure */
1545  if (!txdr->desc) {
1546  dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1547  olddma);
1548  goto setup_tx_desc_die;
1549  }
1550 
1551  if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1552  /* give up */
1553  dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1554  txdr->dma);
1555  dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1556  olddma);
1557  e_err(probe, "Unable to allocate aligned memory "
1558  "for the transmit descriptor ring\n");
1559  vfree(txdr->buffer_info);
1560  return -ENOMEM;
1561  } else {
1562  /* Free old allocation, new allocation was successful */
1563  dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1564  olddma);
1565  }
1566  }
1567  memset(txdr->desc, 0, txdr->size);
1568 
1569  txdr->next_to_use = 0;
1570  txdr->next_to_clean = 0;
1571 
1572  return 0;
1573 }
1574 
1584 {
1585  int i, err = 0;
1586 
1587  for (i = 0; i < adapter->num_tx_queues; i++) {
1588  err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1589  if (err) {
1590  e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1591  for (i-- ; i >= 0; i--)
1592  e1000_free_tx_resources(adapter,
1593  &adapter->tx_ring[i]);
1594  break;
1595  }
1596  }
1597 
1598  return err;
1599 }
1600 
1608 static void e1000_configure_tx(struct e1000_adapter *adapter)
1609 {
1610  u64 tdba;
1611  struct e1000_hw *hw = &adapter->hw;
1612  u32 tdlen, tctl, tipg;
1613  u32 ipgr1, ipgr2;
1614 
1615  /* Setup the HW Tx Head and Tail descriptor pointers */
1616 
1617  switch (adapter->num_tx_queues) {
1618  case 1:
1619  default:
1620  tdba = adapter->tx_ring[0].dma;
1621  tdlen = adapter->tx_ring[0].count *
1622  sizeof(struct e1000_tx_desc);
1623  ew32(TDLEN, tdlen);
1624  ew32(TDBAH, (tdba >> 32));
1625  ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1626  ew32(TDT, 0);
1627  ew32(TDH, 0);
1628  adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1629  adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1630  break;
1631  }
1632 
1633  /* Set the default values for the Tx Inter Packet Gap timer */
1634  if ((hw->media_type == e1000_media_type_fiber ||
1637  else
1639 
1640  switch (hw->mac_type) {
1641  case e1000_82542_rev2_0:
1642  case e1000_82542_rev2_1:
1643  tipg = DEFAULT_82542_TIPG_IPGT;
1644  ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1645  ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1646  break;
1647  default:
1648  ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1649  ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1650  break;
1651  }
1652  tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1653  tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1654  ew32(TIPG, tipg);
1655 
1656  /* Set the Tx Interrupt Delay register */
1657 
1658  ew32(TIDV, adapter->tx_int_delay);
1659  if (hw->mac_type >= e1000_82540)
1660  ew32(TADV, adapter->tx_abs_int_delay);
1661 
1662  /* Program the Transmit Control Register */
1663 
1664  tctl = er32(TCTL);
1665  tctl &= ~E1000_TCTL_CT;
1666  tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1668 
1670 
1671  /* Setup Transmit Descriptor Settings for eop descriptor */
1673 
1674  /* only set IDE if we are delaying interrupts using the timers */
1675  if (adapter->tx_int_delay)
1676  adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1677 
1678  if (hw->mac_type < e1000_82543)
1679  adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1680  else
1681  adapter->txd_cmd |= E1000_TXD_CMD_RS;
1682 
1683  /* Cache if we're 82544 running in PCI-X because we'll
1684  * need this to apply a workaround later in the send path. */
1685  if (hw->mac_type == e1000_82544 &&
1687  adapter->pcix_82544 = true;
1688 
1689  ew32(TCTL, tctl);
1690 
1691 }
1692 
1701 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1702  struct e1000_rx_ring *rxdr)
1703 {
1704  struct pci_dev *pdev = adapter->pdev;
1705  int size, desc_len;
1706 
1707  size = sizeof(struct e1000_buffer) * rxdr->count;
1708  rxdr->buffer_info = vzalloc(size);
1709  if (!rxdr->buffer_info) {
1710  e_err(probe, "Unable to allocate memory for the Rx descriptor "
1711  "ring\n");
1712  return -ENOMEM;
1713  }
1714 
1715  desc_len = sizeof(struct e1000_rx_desc);
1716 
1717  /* Round up to nearest 4K */
1718 
1719  rxdr->size = rxdr->count * desc_len;
1720  rxdr->size = ALIGN(rxdr->size, 4096);
1721 
1722  rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1723  GFP_KERNEL);
1724 
1725  if (!rxdr->desc) {
1726  e_err(probe, "Unable to allocate memory for the Rx descriptor "
1727  "ring\n");
1728 setup_rx_desc_die:
1729  vfree(rxdr->buffer_info);
1730  return -ENOMEM;
1731  }
1732 
1733  /* Fix for errata 23, can't cross 64kB boundary */
1734  if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1735  void *olddesc = rxdr->desc;
1736  dma_addr_t olddma = rxdr->dma;
1737  e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1738  rxdr->size, rxdr->desc);
1739  /* Try again, without freeing the previous */
1740  rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1741  &rxdr->dma, GFP_KERNEL);
1742  /* Failed allocation, critical failure */
1743  if (!rxdr->desc) {
1744  dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1745  olddma);
1746  e_err(probe, "Unable to allocate memory for the Rx "
1747  "descriptor ring\n");
1748  goto setup_rx_desc_die;
1749  }
1750 
1751  if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1752  /* give up */
1753  dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1754  rxdr->dma);
1755  dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1756  olddma);
1757  e_err(probe, "Unable to allocate aligned memory for "
1758  "the Rx descriptor ring\n");
1759  goto setup_rx_desc_die;
1760  } else {
1761  /* Free old allocation, new allocation was successful */
1762  dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1763  olddma);
1764  }
1765  }
1766  memset(rxdr->desc, 0, rxdr->size);
1767 
1768  rxdr->next_to_clean = 0;
1769  rxdr->next_to_use = 0;
1770  rxdr->rx_skb_top = NULL;
1771 
1772  return 0;
1773 }
1774 
1784 {
1785  int i, err = 0;
1786 
1787  for (i = 0; i < adapter->num_rx_queues; i++) {
1788  err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1789  if (err) {
1790  e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1791  for (i-- ; i >= 0; i--)
1792  e1000_free_rx_resources(adapter,
1793  &adapter->rx_ring[i]);
1794  break;
1795  }
1796  }
1797 
1798  return err;
1799 }
1800 
1805 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1806 {
1807  struct e1000_hw *hw = &adapter->hw;
1808  u32 rctl;
1809 
1810  rctl = er32(RCTL);
1811 
1812  rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1813 
1814  rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1817 
1818  if (hw->tbi_compatibility_on == 1)
1819  rctl |= E1000_RCTL_SBP;
1820  else
1821  rctl &= ~E1000_RCTL_SBP;
1822 
1823  if (adapter->netdev->mtu <= ETH_DATA_LEN)
1824  rctl &= ~E1000_RCTL_LPE;
1825  else
1826  rctl |= E1000_RCTL_LPE;
1827 
1828  /* Setup buffer sizes */
1829  rctl &= ~E1000_RCTL_SZ_4096;
1830  rctl |= E1000_RCTL_BSEX;
1831  switch (adapter->rx_buffer_len) {
1832  case E1000_RXBUFFER_2048:
1833  default:
1834  rctl |= E1000_RCTL_SZ_2048;
1835  rctl &= ~E1000_RCTL_BSEX;
1836  break;
1837  case E1000_RXBUFFER_4096:
1838  rctl |= E1000_RCTL_SZ_4096;
1839  break;
1840  case E1000_RXBUFFER_8192:
1841  rctl |= E1000_RCTL_SZ_8192;
1842  break;
1843  case E1000_RXBUFFER_16384:
1844  rctl |= E1000_RCTL_SZ_16384;
1845  break;
1846  }
1847 
1848  /* This is useful for sniffing bad packets. */
1849  if (adapter->netdev->features & NETIF_F_RXALL) {
1850  /* UPE and MPE will be handled by normal PROMISC logic
1851  * in e1000e_set_rx_mode */
1852  rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1853  E1000_RCTL_BAM | /* RX All Bcast Pkts */
1854  E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1855 
1856  rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1857  E1000_RCTL_DPF | /* Allow filtered pause */
1858  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1859  /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1860  * and that breaks VLANs.
1861  */
1862  }
1863 
1864  ew32(RCTL, rctl);
1865 }
1866 
1874 static void e1000_configure_rx(struct e1000_adapter *adapter)
1875 {
1876  u64 rdba;
1877  struct e1000_hw *hw = &adapter->hw;
1878  u32 rdlen, rctl, rxcsum;
1879 
1880  if (adapter->netdev->mtu > ETH_DATA_LEN) {
1881  rdlen = adapter->rx_ring[0].count *
1882  sizeof(struct e1000_rx_desc);
1883  adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1884  adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1885  } else {
1886  rdlen = adapter->rx_ring[0].count *
1887  sizeof(struct e1000_rx_desc);
1888  adapter->clean_rx = e1000_clean_rx_irq;
1889  adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1890  }
1891 
1892  /* disable receives while setting up the descriptors */
1893  rctl = er32(RCTL);
1894  ew32(RCTL, rctl & ~E1000_RCTL_EN);
1895 
1896  /* set the Receive Delay Timer Register */
1897  ew32(RDTR, adapter->rx_int_delay);
1898 
1899  if (hw->mac_type >= e1000_82540) {
1900  ew32(RADV, adapter->rx_abs_int_delay);
1901  if (adapter->itr_setting != 0)
1902  ew32(ITR, 1000000000 / (adapter->itr * 256));
1903  }
1904 
1905  /* Setup the HW Rx Head and Tail Descriptor Pointers and
1906  * the Base and Length of the Rx Descriptor Ring */
1907  switch (adapter->num_rx_queues) {
1908  case 1:
1909  default:
1910  rdba = adapter->rx_ring[0].dma;
1911  ew32(RDLEN, rdlen);
1912  ew32(RDBAH, (rdba >> 32));
1913  ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1914  ew32(RDT, 0);
1915  ew32(RDH, 0);
1916  adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1917  adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1918  break;
1919  }
1920 
1921  /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1922  if (hw->mac_type >= e1000_82543) {
1923  rxcsum = er32(RXCSUM);
1924  if (adapter->rx_csum)
1925  rxcsum |= E1000_RXCSUM_TUOFL;
1926  else
1927  /* don't need to clear IPPCSE as it defaults to 0 */
1928  rxcsum &= ~E1000_RXCSUM_TUOFL;
1929  ew32(RXCSUM, rxcsum);
1930  }
1931 
1932  /* Enable Receives */
1933  ew32(RCTL, rctl | E1000_RCTL_EN);
1934 }
1935 
1944 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1945  struct e1000_tx_ring *tx_ring)
1946 {
1947  struct pci_dev *pdev = adapter->pdev;
1948 
1949  e1000_clean_tx_ring(adapter, tx_ring);
1950 
1951  vfree(tx_ring->buffer_info);
1952  tx_ring->buffer_info = NULL;
1953 
1954  dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1955  tx_ring->dma);
1956 
1957  tx_ring->desc = NULL;
1958 }
1959 
1968 {
1969  int i;
1970 
1971  for (i = 0; i < adapter->num_tx_queues; i++)
1972  e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1973 }
1974 
1975 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1976  struct e1000_buffer *buffer_info)
1977 {
1978  if (buffer_info->dma) {
1979  if (buffer_info->mapped_as_page)
1980  dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1981  buffer_info->length, DMA_TO_DEVICE);
1982  else
1983  dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1984  buffer_info->length,
1985  DMA_TO_DEVICE);
1986  buffer_info->dma = 0;
1987  }
1988  if (buffer_info->skb) {
1989  dev_kfree_skb_any(buffer_info->skb);
1990  buffer_info->skb = NULL;
1991  }
1992  buffer_info->time_stamp = 0;
1993  /* buffer_info must be completely set up in the transmit path */
1994 }
1995 
2002 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2003  struct e1000_tx_ring *tx_ring)
2004 {
2005  struct e1000_hw *hw = &adapter->hw;
2006  struct e1000_buffer *buffer_info;
2007  unsigned long size;
2008  unsigned int i;
2009 
2010  /* Free all the Tx ring sk_buffs */
2011 
2012  for (i = 0; i < tx_ring->count; i++) {
2013  buffer_info = &tx_ring->buffer_info[i];
2014  e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2015  }
2016 
2017  netdev_reset_queue(adapter->netdev);
2018  size = sizeof(struct e1000_buffer) * tx_ring->count;
2019  memset(tx_ring->buffer_info, 0, size);
2020 
2021  /* Zero out the descriptor ring */
2022 
2023  memset(tx_ring->desc, 0, tx_ring->size);
2024 
2025  tx_ring->next_to_use = 0;
2026  tx_ring->next_to_clean = 0;
2027  tx_ring->last_tx_tso = false;
2028 
2029  writel(0, hw->hw_addr + tx_ring->tdh);
2030  writel(0, hw->hw_addr + tx_ring->tdt);
2031 }
2032 
2038 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2039 {
2040  int i;
2041 
2042  for (i = 0; i < adapter->num_tx_queues; i++)
2043  e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2044 }
2045 
2054 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2055  struct e1000_rx_ring *rx_ring)
2056 {
2057  struct pci_dev *pdev = adapter->pdev;
2058 
2059  e1000_clean_rx_ring(adapter, rx_ring);
2060 
2061  vfree(rx_ring->buffer_info);
2062  rx_ring->buffer_info = NULL;
2063 
2064  dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2065  rx_ring->dma);
2066 
2067  rx_ring->desc = NULL;
2068 }
2069 
2078 {
2079  int i;
2080 
2081  for (i = 0; i < adapter->num_rx_queues; i++)
2082  e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2083 }
2084 
2091 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2092  struct e1000_rx_ring *rx_ring)
2093 {
2094  struct e1000_hw *hw = &adapter->hw;
2095  struct e1000_buffer *buffer_info;
2096  struct pci_dev *pdev = adapter->pdev;
2097  unsigned long size;
2098  unsigned int i;
2099 
2100  /* Free all the Rx ring sk_buffs */
2101  for (i = 0; i < rx_ring->count; i++) {
2102  buffer_info = &rx_ring->buffer_info[i];
2103  if (buffer_info->dma &&
2104  adapter->clean_rx == e1000_clean_rx_irq) {
2105  dma_unmap_single(&pdev->dev, buffer_info->dma,
2106  buffer_info->length,
2107  DMA_FROM_DEVICE);
2108  } else if (buffer_info->dma &&
2109  adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2110  dma_unmap_page(&pdev->dev, buffer_info->dma,
2111  buffer_info->length,
2112  DMA_FROM_DEVICE);
2113  }
2114 
2115  buffer_info->dma = 0;
2116  if (buffer_info->page) {
2117  put_page(buffer_info->page);
2118  buffer_info->page = NULL;
2119  }
2120  if (buffer_info->skb) {
2121  dev_kfree_skb(buffer_info->skb);
2122  buffer_info->skb = NULL;
2123  }
2124  }
2125 
2126  /* there also may be some cached data from a chained receive */
2127  if (rx_ring->rx_skb_top) {
2128  dev_kfree_skb(rx_ring->rx_skb_top);
2129  rx_ring->rx_skb_top = NULL;
2130  }
2131 
2132  size = sizeof(struct e1000_buffer) * rx_ring->count;
2133  memset(rx_ring->buffer_info, 0, size);
2134 
2135  /* Zero out the descriptor ring */
2136  memset(rx_ring->desc, 0, rx_ring->size);
2137 
2138  rx_ring->next_to_clean = 0;
2139  rx_ring->next_to_use = 0;
2140 
2141  writel(0, hw->hw_addr + rx_ring->rdh);
2142  writel(0, hw->hw_addr + rx_ring->rdt);
2143 }
2144 
2150 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2151 {
2152  int i;
2153 
2154  for (i = 0; i < adapter->num_rx_queues; i++)
2155  e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2156 }
2157 
2158 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2159  * and memory write and invalidate disabled for certain operations
2160  */
2161 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2162 {
2163  struct e1000_hw *hw = &adapter->hw;
2164  struct net_device *netdev = adapter->netdev;
2165  u32 rctl;
2166 
2167  e1000_pci_clear_mwi(hw);
2168 
2169  rctl = er32(RCTL);
2170  rctl |= E1000_RCTL_RST;
2171  ew32(RCTL, rctl);
2173  mdelay(5);
2174 
2175  if (netif_running(netdev))
2176  e1000_clean_all_rx_rings(adapter);
2177 }
2178 
2179 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2180 {
2181  struct e1000_hw *hw = &adapter->hw;
2182  struct net_device *netdev = adapter->netdev;
2183  u32 rctl;
2184 
2185  rctl = er32(RCTL);
2186  rctl &= ~E1000_RCTL_RST;
2187  ew32(RCTL, rctl);
2189  mdelay(5);
2190 
2192  e1000_pci_set_mwi(hw);
2193 
2194  if (netif_running(netdev)) {
2195  /* No need to loop, because 82542 supports only 1 queue */
2196  struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2197  e1000_configure_rx(adapter);
2198  adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2199  }
2200 }
2201 
2210 static int e1000_set_mac(struct net_device *netdev, void *p)
2211 {
2212  struct e1000_adapter *adapter = netdev_priv(netdev);
2213  struct e1000_hw *hw = &adapter->hw;
2214  struct sockaddr *addr = p;
2215 
2216  if (!is_valid_ether_addr(addr->sa_data))
2217  return -EADDRNOTAVAIL;
2218 
2219  /* 82542 2.0 needs to be in reset to write receive address registers */
2220 
2221  if (hw->mac_type == e1000_82542_rev2_0)
2222  e1000_enter_82542_rst(adapter);
2223 
2224  memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2225  memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2226 
2227  e1000_rar_set(hw, hw->mac_addr, 0);
2228 
2229  if (hw->mac_type == e1000_82542_rev2_0)
2230  e1000_leave_82542_rst(adapter);
2231 
2232  return 0;
2233 }
2234 
2245 static void e1000_set_rx_mode(struct net_device *netdev)
2246 {
2247  struct e1000_adapter *adapter = netdev_priv(netdev);
2248  struct e1000_hw *hw = &adapter->hw;
2249  struct netdev_hw_addr *ha;
2250  bool use_uc = false;
2251  u32 rctl;
2252  u32 hash_value;
2253  int i, rar_entries = E1000_RAR_ENTRIES;
2254  int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2255  u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2256 
2257  if (!mcarray) {
2258  e_err(probe, "memory allocation failed\n");
2259  return;
2260  }
2261 
2262  /* Check for Promiscuous and All Multicast modes */
2263 
2264  rctl = er32(RCTL);
2265 
2266  if (netdev->flags & IFF_PROMISC) {
2267  rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2268  rctl &= ~E1000_RCTL_VFE;
2269  } else {
2270  if (netdev->flags & IFF_ALLMULTI)
2271  rctl |= E1000_RCTL_MPE;
2272  else
2273  rctl &= ~E1000_RCTL_MPE;
2274  /* Enable VLAN filter if there is a VLAN */
2275  if (e1000_vlan_used(adapter))
2276  rctl |= E1000_RCTL_VFE;
2277  }
2278 
2279  if (netdev_uc_count(netdev) > rar_entries - 1) {
2280  rctl |= E1000_RCTL_UPE;
2281  } else if (!(netdev->flags & IFF_PROMISC)) {
2282  rctl &= ~E1000_RCTL_UPE;
2283  use_uc = true;
2284  }
2285 
2286  ew32(RCTL, rctl);
2287 
2288  /* 82542 2.0 needs to be in reset to write receive address registers */
2289 
2290  if (hw->mac_type == e1000_82542_rev2_0)
2291  e1000_enter_82542_rst(adapter);
2292 
2293  /* load the first 14 addresses into the exact filters 1-14. Unicast
2294  * addresses take precedence to avoid disabling unicast filtering
2295  * when possible.
2296  *
2297  * RAR 0 is used for the station MAC address
2298  * if there are not 14 addresses, go ahead and clear the filters
2299  */
2300  i = 1;
2301  if (use_uc)
2302  netdev_for_each_uc_addr(ha, netdev) {
2303  if (i == rar_entries)
2304  break;
2305  e1000_rar_set(hw, ha->addr, i++);
2306  }
2307 
2308  netdev_for_each_mc_addr(ha, netdev) {
2309  if (i == rar_entries) {
2310  /* load any remaining addresses into the hash table */
2311  u32 hash_reg, hash_bit, mta;
2312  hash_value = e1000_hash_mc_addr(hw, ha->addr);
2313  hash_reg = (hash_value >> 5) & 0x7F;
2314  hash_bit = hash_value & 0x1F;
2315  mta = (1 << hash_bit);
2316  mcarray[hash_reg] |= mta;
2317  } else {
2318  e1000_rar_set(hw, ha->addr, i++);
2319  }
2320  }
2321 
2322  for (; i < rar_entries; i++) {
2323  E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2325  E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2327  }
2328 
2329  /* write the hash table completely, write from bottom to avoid
2330  * both stupid write combining chipsets, and flushing each write */
2331  for (i = mta_reg_count - 1; i >= 0 ; i--) {
2332  /*
2333  * If we are on an 82544 has an errata where writing odd
2334  * offsets overwrites the previous even offset, but writing
2335  * backwards over the range solves the issue by always
2336  * writing the odd offset first
2337  */
2338  E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2339  }
2341 
2342  if (hw->mac_type == e1000_82542_rev2_0)
2343  e1000_leave_82542_rst(adapter);
2344 
2345  kfree(mcarray);
2346 }
2347 
2355 static void e1000_update_phy_info_task(struct work_struct *work)
2356 {
2357  struct e1000_adapter *adapter = container_of(work,
2358  struct e1000_adapter,
2359  phy_info_task.work);
2360  if (test_bit(__E1000_DOWN, &adapter->flags))
2361  return;
2362  mutex_lock(&adapter->mutex);
2363  e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2364  mutex_unlock(&adapter->mutex);
2365 }
2366 
2371 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2372 {
2373  struct e1000_adapter *adapter = container_of(work,
2374  struct e1000_adapter,
2375  fifo_stall_task.work);
2376  struct e1000_hw *hw = &adapter->hw;
2377  struct net_device *netdev = adapter->netdev;
2378  u32 tctl;
2379 
2380  if (test_bit(__E1000_DOWN, &adapter->flags))
2381  return;
2382  mutex_lock(&adapter->mutex);
2383  if (atomic_read(&adapter->tx_fifo_stall)) {
2384  if ((er32(TDT) == er32(TDH)) &&
2385  (er32(TDFT) == er32(TDFH)) &&
2386  (er32(TDFTS) == er32(TDFHS))) {
2387  tctl = er32(TCTL);
2388  ew32(TCTL, tctl & ~E1000_TCTL_EN);
2389  ew32(TDFT, adapter->tx_head_addr);
2390  ew32(TDFH, adapter->tx_head_addr);
2391  ew32(TDFTS, adapter->tx_head_addr);
2392  ew32(TDFHS, adapter->tx_head_addr);
2393  ew32(TCTL, tctl);
2395 
2396  adapter->tx_fifo_head = 0;
2397  atomic_set(&adapter->tx_fifo_stall, 0);
2398  netif_wake_queue(netdev);
2399  } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2400  schedule_delayed_work(&adapter->fifo_stall_task, 1);
2401  }
2402  }
2403  mutex_unlock(&adapter->mutex);
2404 }
2405 
2406 bool e1000_has_link(struct e1000_adapter *adapter)
2407 {
2408  struct e1000_hw *hw = &adapter->hw;
2409  bool link_active = false;
2410 
2411  /* get_link_status is set on LSC (link status) interrupt or rx
2412  * sequence error interrupt (except on intel ce4100).
2413  * get_link_status will stay false until the
2414  * e1000_check_for_link establishes link for copper adapters
2415  * ONLY
2416  */
2417  switch (hw->media_type) {
2419  if (hw->mac_type == e1000_ce4100)
2420  hw->get_link_status = 1;
2421  if (hw->get_link_status) {
2423  link_active = !hw->get_link_status;
2424  } else {
2425  link_active = true;
2426  }
2427  break;
2430  link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2431  break;
2434  link_active = hw->serdes_has_link;
2435  break;
2436  default:
2437  break;
2438  }
2439 
2440  return link_active;
2441 }
2442 
2447 static void e1000_watchdog(struct work_struct *work)
2448 {
2449  struct e1000_adapter *adapter = container_of(work,
2450  struct e1000_adapter,
2451  watchdog_task.work);
2452  struct e1000_hw *hw = &adapter->hw;
2453  struct net_device *netdev = adapter->netdev;
2454  struct e1000_tx_ring *txdr = adapter->tx_ring;
2455  u32 link, tctl;
2456 
2457  if (test_bit(__E1000_DOWN, &adapter->flags))
2458  return;
2459 
2460  mutex_lock(&adapter->mutex);
2461  link = e1000_has_link(adapter);
2462  if ((netif_carrier_ok(netdev)) && link)
2463  goto link_up;
2464 
2465  if (link) {
2466  if (!netif_carrier_ok(netdev)) {
2467  u32 ctrl;
2468  bool txb2b = true;
2469  /* update snapshot of PHY registers on LSC */
2471  &adapter->link_speed,
2472  &adapter->link_duplex);
2473 
2474  ctrl = er32(CTRL);
2475  pr_info("%s NIC Link is Up %d Mbps %s, "
2476  "Flow Control: %s\n",
2477  netdev->name,
2478  adapter->link_speed,
2479  adapter->link_duplex == FULL_DUPLEX ?
2480  "Full Duplex" : "Half Duplex",
2481  ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2482  E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2483  E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2484  E1000_CTRL_TFCE) ? "TX" : "None")));
2485 
2486  /* adjust timeout factor according to speed/duplex */
2487  adapter->tx_timeout_factor = 1;
2488  switch (adapter->link_speed) {
2489  case SPEED_10:
2490  txb2b = false;
2491  adapter->tx_timeout_factor = 16;
2492  break;
2493  case SPEED_100:
2494  txb2b = false;
2495  /* maybe add some timeout factor ? */
2496  break;
2497  }
2498 
2499  /* enable transmits in the hardware */
2500  tctl = er32(TCTL);
2501  tctl |= E1000_TCTL_EN;
2502  ew32(TCTL, tctl);
2503 
2504  netif_carrier_on(netdev);
2505  if (!test_bit(__E1000_DOWN, &adapter->flags))
2507  2 * HZ);
2508  adapter->smartspeed = 0;
2509  }
2510  } else {
2511  if (netif_carrier_ok(netdev)) {
2512  adapter->link_speed = 0;
2513  adapter->link_duplex = 0;
2514  pr_info("%s NIC Link is Down\n",
2515  netdev->name);
2516  netif_carrier_off(netdev);
2517 
2518  if (!test_bit(__E1000_DOWN, &adapter->flags))
2520  2 * HZ);
2521  }
2522 
2523  e1000_smartspeed(adapter);
2524  }
2525 
2526 link_up:
2527  e1000_update_stats(adapter);
2528 
2529  hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2530  adapter->tpt_old = adapter->stats.tpt;
2531  hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2532  adapter->colc_old = adapter->stats.colc;
2533 
2534  adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2535  adapter->gorcl_old = adapter->stats.gorcl;
2536  adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2537  adapter->gotcl_old = adapter->stats.gotcl;
2538 
2540 
2541  if (!netif_carrier_ok(netdev)) {
2542  if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2543  /* We've lost link, so the controller stops DMA,
2544  * but we've got queued Tx work that's never going
2545  * to get done, so reset controller to flush Tx.
2546  * (Do the reset outside of interrupt context). */
2547  adapter->tx_timeout_count++;
2548  schedule_work(&adapter->reset_task);
2549  /* exit immediately since reset is imminent */
2550  goto unlock;
2551  }
2552  }
2553 
2554  /* Simple mode for Interrupt Throttle Rate (ITR) */
2555  if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2556  /*
2557  * Symmetric Tx/Rx gets a reduced ITR=2000;
2558  * Total asymmetrical Tx or Rx gets ITR=8000;
2559  * everyone else is between 2000-8000.
2560  */
2561  u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2562  u32 dif = (adapter->gotcl > adapter->gorcl ?
2563  adapter->gotcl - adapter->gorcl :
2564  adapter->gorcl - adapter->gotcl) / 10000;
2565  u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2566 
2567  ew32(ITR, 1000000000 / (itr * 256));
2568  }
2569 
2570  /* Cause software interrupt to ensure rx ring is cleaned */
2572 
2573  /* Force detection of hung controller every watchdog period */
2574  adapter->detect_tx_hung = true;
2575 
2576  /* Reschedule the task */
2577  if (!test_bit(__E1000_DOWN, &adapter->flags))
2578  schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2579 
2580 unlock:
2581  mutex_unlock(&adapter->mutex);
2582 }
2583 
2589 };
2590 
2608 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2609  u16 itr_setting, int packets, int bytes)
2610 {
2611  unsigned int retval = itr_setting;
2612  struct e1000_hw *hw = &adapter->hw;
2613 
2614  if (unlikely(hw->mac_type < e1000_82540))
2615  goto update_itr_done;
2616 
2617  if (packets == 0)
2618  goto update_itr_done;
2619 
2620  switch (itr_setting) {
2621  case lowest_latency:
2622  /* jumbo frames get bulk treatment*/
2623  if (bytes/packets > 8000)
2624  retval = bulk_latency;
2625  else if ((packets < 5) && (bytes > 512))
2626  retval = low_latency;
2627  break;
2628  case low_latency: /* 50 usec aka 20000 ints/s */
2629  if (bytes > 10000) {
2630  /* jumbo frames need bulk latency setting */
2631  if (bytes/packets > 8000)
2632  retval = bulk_latency;
2633  else if ((packets < 10) || ((bytes/packets) > 1200))
2634  retval = bulk_latency;
2635  else if ((packets > 35))
2636  retval = lowest_latency;
2637  } else if (bytes/packets > 2000)
2638  retval = bulk_latency;
2639  else if (packets <= 2 && bytes < 512)
2640  retval = lowest_latency;
2641  break;
2642  case bulk_latency: /* 250 usec aka 4000 ints/s */
2643  if (bytes > 25000) {
2644  if (packets > 35)
2645  retval = low_latency;
2646  } else if (bytes < 6000) {
2647  retval = low_latency;
2648  }
2649  break;
2650  }
2651 
2652 update_itr_done:
2653  return retval;
2654 }
2655 
2656 static void e1000_set_itr(struct e1000_adapter *adapter)
2657 {
2658  struct e1000_hw *hw = &adapter->hw;
2659  u16 current_itr;
2660  u32 new_itr = adapter->itr;
2661 
2662  if (unlikely(hw->mac_type < e1000_82540))
2663  return;
2664 
2665  /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2666  if (unlikely(adapter->link_speed != SPEED_1000)) {
2667  current_itr = 0;
2668  new_itr = 4000;
2669  goto set_itr_now;
2670  }
2671 
2672  adapter->tx_itr = e1000_update_itr(adapter,
2673  adapter->tx_itr,
2674  adapter->total_tx_packets,
2675  adapter->total_tx_bytes);
2676  /* conservative mode (itr 3) eliminates the lowest_latency setting */
2677  if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2678  adapter->tx_itr = low_latency;
2679 
2680  adapter->rx_itr = e1000_update_itr(adapter,
2681  adapter->rx_itr,
2682  adapter->total_rx_packets,
2683  adapter->total_rx_bytes);
2684  /* conservative mode (itr 3) eliminates the lowest_latency setting */
2685  if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2686  adapter->rx_itr = low_latency;
2687 
2688  current_itr = max(adapter->rx_itr, adapter->tx_itr);
2689 
2690  switch (current_itr) {
2691  /* counts and packets in update_itr are dependent on these numbers */
2692  case lowest_latency:
2693  new_itr = 70000;
2694  break;
2695  case low_latency:
2696  new_itr = 20000; /* aka hwitr = ~200 */
2697  break;
2698  case bulk_latency:
2699  new_itr = 4000;
2700  break;
2701  default:
2702  break;
2703  }
2704 
2705 set_itr_now:
2706  if (new_itr != adapter->itr) {
2707  /* this attempts to bias the interrupt rate towards Bulk
2708  * by adding intermediate steps when interrupt rate is
2709  * increasing */
2710  new_itr = new_itr > adapter->itr ?
2711  min(adapter->itr + (new_itr >> 2), new_itr) :
2712  new_itr;
2713  adapter->itr = new_itr;
2714  ew32(ITR, 1000000000 / (new_itr * 256));
2715  }
2716 }
2717 
2718 #define E1000_TX_FLAGS_CSUM 0x00000001
2719 #define E1000_TX_FLAGS_VLAN 0x00000002
2720 #define E1000_TX_FLAGS_TSO 0x00000004
2721 #define E1000_TX_FLAGS_IPV4 0x00000008
2722 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2723 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2724 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2725 
2726 static int e1000_tso(struct e1000_adapter *adapter,
2727  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2728 {
2729  struct e1000_context_desc *context_desc;
2730  struct e1000_buffer *buffer_info;
2731  unsigned int i;
2732  u32 cmd_length = 0;
2733  u16 ipcse = 0, tucse, mss;
2734  u8 ipcss, ipcso, tucss, tucso, hdr_len;
2735  int err;
2736 
2737  if (skb_is_gso(skb)) {
2738  if (skb_header_cloned(skb)) {
2739  err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2740  if (err)
2741  return err;
2742  }
2743 
2744  hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2745  mss = skb_shinfo(skb)->gso_size;
2746  if (skb->protocol == htons(ETH_P_IP)) {
2747  struct iphdr *iph = ip_hdr(skb);
2748  iph->tot_len = 0;
2749  iph->check = 0;
2750  tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2751  iph->daddr, 0,
2752  IPPROTO_TCP,
2753  0);
2754  cmd_length = E1000_TXD_CMD_IP;
2755  ipcse = skb_transport_offset(skb) - 1;
2756  } else if (skb->protocol == htons(ETH_P_IPV6)) {
2757  ipv6_hdr(skb)->payload_len = 0;
2758  tcp_hdr(skb)->check =
2759  ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2760  &ipv6_hdr(skb)->daddr,
2761  0, IPPROTO_TCP, 0);
2762  ipcse = 0;
2763  }
2764  ipcss = skb_network_offset(skb);
2765  ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2766  tucss = skb_transport_offset(skb);
2767  tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2768  tucse = 0;
2769 
2770  cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2771  E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2772 
2773  i = tx_ring->next_to_use;
2774  context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2775  buffer_info = &tx_ring->buffer_info[i];
2776 
2777  context_desc->lower_setup.ip_fields.ipcss = ipcss;
2778  context_desc->lower_setup.ip_fields.ipcso = ipcso;
2779  context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2780  context_desc->upper_setup.tcp_fields.tucss = tucss;
2781  context_desc->upper_setup.tcp_fields.tucso = tucso;
2782  context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2783  context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2784  context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2785  context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2786 
2787  buffer_info->time_stamp = jiffies;
2788  buffer_info->next_to_watch = i;
2789 
2790  if (++i == tx_ring->count) i = 0;
2791  tx_ring->next_to_use = i;
2792 
2793  return true;
2794  }
2795  return false;
2796 }
2797 
2798 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2799  struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2800 {
2801  struct e1000_context_desc *context_desc;
2802  struct e1000_buffer *buffer_info;
2803  unsigned int i;
2804  u8 css;
2806 
2807  if (skb->ip_summed != CHECKSUM_PARTIAL)
2808  return false;
2809 
2810  switch (skb->protocol) {
2811  case cpu_to_be16(ETH_P_IP):
2812  if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2813  cmd_len |= E1000_TXD_CMD_TCP;
2814  break;
2815  case cpu_to_be16(ETH_P_IPV6):
2816  /* XXX not handling all IPV6 headers */
2817  if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2818  cmd_len |= E1000_TXD_CMD_TCP;
2819  break;
2820  default:
2821  if (unlikely(net_ratelimit()))
2822  e_warn(drv, "checksum_partial proto=%x!\n",
2823  skb->protocol);
2824  break;
2825  }
2826 
2827  css = skb_checksum_start_offset(skb);
2828 
2829  i = tx_ring->next_to_use;
2830  buffer_info = &tx_ring->buffer_info[i];
2831  context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2832 
2833  context_desc->lower_setup.ip_config = 0;
2834  context_desc->upper_setup.tcp_fields.tucss = css;
2835  context_desc->upper_setup.tcp_fields.tucso =
2836  css + skb->csum_offset;
2837  context_desc->upper_setup.tcp_fields.tucse = 0;
2838  context_desc->tcp_seg_setup.data = 0;
2839  context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2840 
2841  buffer_info->time_stamp = jiffies;
2842  buffer_info->next_to_watch = i;
2843 
2844  if (unlikely(++i == tx_ring->count)) i = 0;
2845  tx_ring->next_to_use = i;
2846 
2847  return true;
2848 }
2849 
2850 #define E1000_MAX_TXD_PWR 12
2851 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2852 
2853 static int e1000_tx_map(struct e1000_adapter *adapter,
2854  struct e1000_tx_ring *tx_ring,
2855  struct sk_buff *skb, unsigned int first,
2856  unsigned int max_per_txd, unsigned int nr_frags,
2857  unsigned int mss)
2858 {
2859  struct e1000_hw *hw = &adapter->hw;
2860  struct pci_dev *pdev = adapter->pdev;
2861  struct e1000_buffer *buffer_info;
2862  unsigned int len = skb_headlen(skb);
2863  unsigned int offset = 0, size, count = 0, i;
2864  unsigned int f, bytecount, segs;
2865 
2866  i = tx_ring->next_to_use;
2867 
2868  while (len) {
2869  buffer_info = &tx_ring->buffer_info[i];
2870  size = min(len, max_per_txd);
2871  /* Workaround for Controller erratum --
2872  * descriptor for non-tso packet in a linear SKB that follows a
2873  * tso gets written back prematurely before the data is fully
2874  * DMA'd to the controller */
2875  if (!skb->data_len && tx_ring->last_tx_tso &&
2876  !skb_is_gso(skb)) {
2877  tx_ring->last_tx_tso = false;
2878  size -= 4;
2879  }
2880 
2881  /* Workaround for premature desc write-backs
2882  * in TSO mode. Append 4-byte sentinel desc */
2883  if (unlikely(mss && !nr_frags && size == len && size > 8))
2884  size -= 4;
2885  /* work-around for errata 10 and it applies
2886  * to all controllers in PCI-X mode
2887  * The fix is to make sure that the first descriptor of a
2888  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2889  */
2890  if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2891  (size > 2015) && count == 0))
2892  size = 2015;
2893 
2894  /* Workaround for potential 82544 hang in PCI-X. Avoid
2895  * terminating buffers within evenly-aligned dwords. */
2896  if (unlikely(adapter->pcix_82544 &&
2897  !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2898  size > 4))
2899  size -= 4;
2900 
2901  buffer_info->length = size;
2902  /* set time_stamp *before* dma to help avoid a possible race */
2903  buffer_info->time_stamp = jiffies;
2904  buffer_info->mapped_as_page = false;
2905  buffer_info->dma = dma_map_single(&pdev->dev,
2906  skb->data + offset,
2907  size, DMA_TO_DEVICE);
2908  if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2909  goto dma_error;
2910  buffer_info->next_to_watch = i;
2911 
2912  len -= size;
2913  offset += size;
2914  count++;
2915  if (len) {
2916  i++;
2917  if (unlikely(i == tx_ring->count))
2918  i = 0;
2919  }
2920  }
2921 
2922  for (f = 0; f < nr_frags; f++) {
2923  const struct skb_frag_struct *frag;
2924 
2925  frag = &skb_shinfo(skb)->frags[f];
2926  len = skb_frag_size(frag);
2927  offset = 0;
2928 
2929  while (len) {
2930  unsigned long bufend;
2931  i++;
2932  if (unlikely(i == tx_ring->count))
2933  i = 0;
2934 
2935  buffer_info = &tx_ring->buffer_info[i];
2936  size = min(len, max_per_txd);
2937  /* Workaround for premature desc write-backs
2938  * in TSO mode. Append 4-byte sentinel desc */
2939  if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2940  size -= 4;
2941  /* Workaround for potential 82544 hang in PCI-X.
2942  * Avoid terminating buffers within evenly-aligned
2943  * dwords. */
2944  bufend = (unsigned long)
2945  page_to_phys(skb_frag_page(frag));
2946  bufend += offset + size - 1;
2947  if (unlikely(adapter->pcix_82544 &&
2948  !(bufend & 4) &&
2949  size > 4))
2950  size -= 4;
2951 
2952  buffer_info->length = size;
2953  buffer_info->time_stamp = jiffies;
2954  buffer_info->mapped_as_page = true;
2955  buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2956  offset, size, DMA_TO_DEVICE);
2957  if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2958  goto dma_error;
2959  buffer_info->next_to_watch = i;
2960 
2961  len -= size;
2962  offset += size;
2963  count++;
2964  }
2965  }
2966 
2967  segs = skb_shinfo(skb)->gso_segs ?: 1;
2968  /* multiply data chunks by size of headers */
2969  bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2970 
2971  tx_ring->buffer_info[i].skb = skb;
2972  tx_ring->buffer_info[i].segs = segs;
2973  tx_ring->buffer_info[i].bytecount = bytecount;
2974  tx_ring->buffer_info[first].next_to_watch = i;
2975 
2976  return count;
2977 
2978 dma_error:
2979  dev_err(&pdev->dev, "TX DMA map failed\n");
2980  buffer_info->dma = 0;
2981  if (count)
2982  count--;
2983 
2984  while (count--) {
2985  if (i==0)
2986  i += tx_ring->count;
2987  i--;
2988  buffer_info = &tx_ring->buffer_info[i];
2989  e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2990  }
2991 
2992  return 0;
2993 }
2994 
2995 static void e1000_tx_queue(struct e1000_adapter *adapter,
2996  struct e1000_tx_ring *tx_ring, int tx_flags,
2997  int count)
2998 {
2999  struct e1000_hw *hw = &adapter->hw;
3000  struct e1000_tx_desc *tx_desc = NULL;
3001  struct e1000_buffer *buffer_info;
3002  u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3003  unsigned int i;
3004 
3005  if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3006  txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3008  txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3009 
3010  if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3011  txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3012  }
3013 
3014  if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3015  txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3016  txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3017  }
3018 
3019  if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3020  txd_lower |= E1000_TXD_CMD_VLE;
3021  txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3022  }
3023 
3024  if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3025  txd_lower &= ~(E1000_TXD_CMD_IFCS);
3026 
3027  i = tx_ring->next_to_use;
3028 
3029  while (count--) {
3030  buffer_info = &tx_ring->buffer_info[i];
3031  tx_desc = E1000_TX_DESC(*tx_ring, i);
3032  tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3033  tx_desc->lower.data =
3034  cpu_to_le32(txd_lower | buffer_info->length);
3035  tx_desc->upper.data = cpu_to_le32(txd_upper);
3036  if (unlikely(++i == tx_ring->count)) i = 0;
3037  }
3038 
3039  tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3040 
3041  /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3042  if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3043  tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3044 
3045  /* Force memory writes to complete before letting h/w
3046  * know there are new descriptors to fetch. (Only
3047  * applicable for weak-ordered memory model archs,
3048  * such as IA-64). */
3049  wmb();
3050 
3051  tx_ring->next_to_use = i;
3052  writel(i, hw->hw_addr + tx_ring->tdt);
3053  /* we need this if more than one processor can write to our tail
3054  * at a time, it syncronizes IO on IA64/Altix systems */
3055  mmiowb();
3056 }
3057 
3058 /* 82547 workaround to avoid controller hang in half-duplex environment.
3059  * The workaround is to avoid queuing a large packet that would span
3060  * the internal Tx FIFO ring boundary by notifying the stack to resend
3061  * the packet at a later time. This gives the Tx FIFO an opportunity to
3062  * flush all packets. When that occurs, we reset the Tx FIFO pointers
3063  * to the beginning of the Tx FIFO.
3064  */
3065 
3066 #define E1000_FIFO_HDR 0x10
3067 #define E1000_82547_PAD_LEN 0x3E0
3068 
3069 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3070  struct sk_buff *skb)
3071 {
3072  u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3073  u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3074 
3075  skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3076 
3077  if (adapter->link_duplex != HALF_DUPLEX)
3078  goto no_fifo_stall_required;
3079 
3080  if (atomic_read(&adapter->tx_fifo_stall))
3081  return 1;
3082 
3083  if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3084  atomic_set(&adapter->tx_fifo_stall, 1);
3085  return 1;
3086  }
3087 
3088 no_fifo_stall_required:
3089  adapter->tx_fifo_head += skb_fifo_len;
3090  if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3091  adapter->tx_fifo_head -= adapter->tx_fifo_size;
3092  return 0;
3093 }
3094 
3095 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3096 {
3097  struct e1000_adapter *adapter = netdev_priv(netdev);
3098  struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3099 
3100  netif_stop_queue(netdev);
3101  /* Herbert's original patch had:
3102  * smp_mb__after_netif_stop_queue();
3103  * but since that doesn't exist yet, just open code it. */
3104  smp_mb();
3105 
3106  /* We need to check again in a case another CPU has just
3107  * made room available. */
3108  if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3109  return -EBUSY;
3110 
3111  /* A reprieve! */
3112  netif_start_queue(netdev);
3113  ++adapter->restart_queue;
3114  return 0;
3115 }
3116 
3117 static int e1000_maybe_stop_tx(struct net_device *netdev,
3118  struct e1000_tx_ring *tx_ring, int size)
3119 {
3120  if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3121  return 0;
3122  return __e1000_maybe_stop_tx(netdev, size);
3123 }
3124 
3125 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3126 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3127  struct net_device *netdev)
3128 {
3129  struct e1000_adapter *adapter = netdev_priv(netdev);
3130  struct e1000_hw *hw = &adapter->hw;
3131  struct e1000_tx_ring *tx_ring;
3132  unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3133  unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3134  unsigned int tx_flags = 0;
3135  unsigned int len = skb_headlen(skb);
3136  unsigned int nr_frags;
3137  unsigned int mss;
3138  int count = 0;
3139  int tso;
3140  unsigned int f;
3141 
3142  /* This goes back to the question of how to logically map a tx queue
3143  * to a flow. Right now, performance is impacted slightly negatively
3144  * if using multiple tx queues. If the stack breaks away from a
3145  * single qdisc implementation, we can look at this again. */
3146  tx_ring = adapter->tx_ring;
3147 
3148  if (unlikely(skb->len <= 0)) {
3149  dev_kfree_skb_any(skb);
3150  return NETDEV_TX_OK;
3151  }
3152 
3153  /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3154  * packets may get corrupted during padding by HW.
3155  * To WA this issue, pad all small packets manually.
3156  */
3157  if (skb->len < ETH_ZLEN) {
3158  if (skb_pad(skb, ETH_ZLEN - skb->len))
3159  return NETDEV_TX_OK;
3160  skb->len = ETH_ZLEN;
3161  skb_set_tail_pointer(skb, ETH_ZLEN);
3162  }
3163 
3164  mss = skb_shinfo(skb)->gso_size;
3165  /* The controller does a simple calculation to
3166  * make sure there is enough room in the FIFO before
3167  * initiating the DMA for each buffer. The calc is:
3168  * 4 = ceil(buffer len/mss). To make sure we don't
3169  * overrun the FIFO, adjust the max buffer len if mss
3170  * drops. */
3171  if (mss) {
3172  u8 hdr_len;
3173  max_per_txd = min(mss << 2, max_per_txd);
3174  max_txd_pwr = fls(max_per_txd) - 1;
3175 
3176  hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3177  if (skb->data_len && hdr_len == len) {
3178  switch (hw->mac_type) {
3179  unsigned int pull_size;
3180  case e1000_82544:
3181  /* Make sure we have room to chop off 4 bytes,
3182  * and that the end alignment will work out to
3183  * this hardware's requirements
3184  * NOTE: this is a TSO only workaround
3185  * if end byte alignment not correct move us
3186  * into the next dword */
3187  if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3188  break;
3189  /* fall through */
3190  pull_size = min((unsigned int)4, skb->data_len);
3191  if (!__pskb_pull_tail(skb, pull_size)) {
3192  e_err(drv, "__pskb_pull_tail "
3193  "failed.\n");
3194  dev_kfree_skb_any(skb);
3195  return NETDEV_TX_OK;
3196  }
3197  len = skb_headlen(skb);
3198  break;
3199  default:
3200  /* do nothing */
3201  break;
3202  }
3203  }
3204  }
3205 
3206  /* reserve a descriptor for the offload context */
3207  if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3208  count++;
3209  count++;
3210 
3211  /* Controller Erratum workaround */
3212  if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3213  count++;
3214 
3215  count += TXD_USE_COUNT(len, max_txd_pwr);
3216 
3217  if (adapter->pcix_82544)
3218  count++;
3219 
3220  /* work-around for errata 10 and it applies to all controllers
3221  * in PCI-X mode, so add one more descriptor to the count
3222  */
3223  if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3224  (len > 2015)))
3225  count++;
3226 
3227  nr_frags = skb_shinfo(skb)->nr_frags;
3228  for (f = 0; f < nr_frags; f++)
3229  count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3230  max_txd_pwr);
3231  if (adapter->pcix_82544)
3232  count += nr_frags;
3233 
3234  /* need: count + 2 desc gap to keep tail from touching
3235  * head, otherwise try next time */
3236  if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3237  return NETDEV_TX_BUSY;
3238 
3239  if (unlikely((hw->mac_type == e1000_82547) &&
3240  (e1000_82547_fifo_workaround(adapter, skb)))) {
3241  netif_stop_queue(netdev);
3242  if (!test_bit(__E1000_DOWN, &adapter->flags))
3243  schedule_delayed_work(&adapter->fifo_stall_task, 1);
3244  return NETDEV_TX_BUSY;
3245  }
3246 
3247  if (vlan_tx_tag_present(skb)) {
3248  tx_flags |= E1000_TX_FLAGS_VLAN;
3249  tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3250  }
3251 
3252  first = tx_ring->next_to_use;
3253 
3254  tso = e1000_tso(adapter, tx_ring, skb);
3255  if (tso < 0) {
3256  dev_kfree_skb_any(skb);
3257  return NETDEV_TX_OK;
3258  }
3259 
3260  if (likely(tso)) {
3261  if (likely(hw->mac_type != e1000_82544))
3262  tx_ring->last_tx_tso = true;
3263  tx_flags |= E1000_TX_FLAGS_TSO;
3264  } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3265  tx_flags |= E1000_TX_FLAGS_CSUM;
3266 
3267  if (likely(skb->protocol == htons(ETH_P_IP)))
3268  tx_flags |= E1000_TX_FLAGS_IPV4;
3269 
3270  if (unlikely(skb->no_fcs))
3271  tx_flags |= E1000_TX_FLAGS_NO_FCS;
3272 
3273  count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3274  nr_frags, mss);
3275 
3276  if (count) {
3277  netdev_sent_queue(netdev, skb->len);
3278  skb_tx_timestamp(skb);
3279 
3280  e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3281  /* Make sure there is space in the ring for the next send. */
3282  e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3283 
3284  } else {
3285  dev_kfree_skb_any(skb);
3286  tx_ring->buffer_info[first].time_stamp = 0;
3287  tx_ring->next_to_use = first;
3288  }
3289 
3290  return NETDEV_TX_OK;
3291 }
3292 
3293 #define NUM_REGS 38 /* 1 based count */
3294 static void e1000_regdump(struct e1000_adapter *adapter)
3295 {
3296  struct e1000_hw *hw = &adapter->hw;
3297  u32 regs[NUM_REGS];
3298  u32 *regs_buff = regs;
3299  int i = 0;
3300 
3301  static const char * const reg_name[] = {
3302  "CTRL", "STATUS",
3303  "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3304  "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3305  "TIDV", "TXDCTL", "TADV", "TARC0",
3306  "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3307  "TXDCTL1", "TARC1",
3308  "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3309  "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3310  "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3311  };
3312 
3313  regs_buff[0] = er32(CTRL);
3314  regs_buff[1] = er32(STATUS);
3315 
3316  regs_buff[2] = er32(RCTL);
3317  regs_buff[3] = er32(RDLEN);
3318  regs_buff[4] = er32(RDH);
3319  regs_buff[5] = er32(RDT);
3320  regs_buff[6] = er32(RDTR);
3321 
3322  regs_buff[7] = er32(TCTL);
3323  regs_buff[8] = er32(TDBAL);
3324  regs_buff[9] = er32(TDBAH);
3325  regs_buff[10] = er32(TDLEN);
3326  regs_buff[11] = er32(TDH);
3327  regs_buff[12] = er32(TDT);
3328  regs_buff[13] = er32(TIDV);
3329  regs_buff[14] = er32(TXDCTL);
3330  regs_buff[15] = er32(TADV);
3331  regs_buff[16] = er32(TARC0);
3332 
3333  regs_buff[17] = er32(TDBAL1);
3334  regs_buff[18] = er32(TDBAH1);
3335  regs_buff[19] = er32(TDLEN1);
3336  regs_buff[20] = er32(TDH1);
3337  regs_buff[21] = er32(TDT1);
3338  regs_buff[22] = er32(TXDCTL1);
3339  regs_buff[23] = er32(TARC1);
3340  regs_buff[24] = er32(CTRL_EXT);
3341  regs_buff[25] = er32(ERT);
3342  regs_buff[26] = er32(RDBAL0);
3343  regs_buff[27] = er32(RDBAH0);
3344  regs_buff[28] = er32(TDFH);
3345  regs_buff[29] = er32(TDFT);
3346  regs_buff[30] = er32(TDFHS);
3347  regs_buff[31] = er32(TDFTS);
3348  regs_buff[32] = er32(TDFPC);
3349  regs_buff[33] = er32(RDFH);
3350  regs_buff[34] = er32(RDFT);
3351  regs_buff[35] = er32(RDFHS);
3352  regs_buff[36] = er32(RDFTS);
3353  regs_buff[37] = er32(RDFPC);
3354 
3355  pr_info("Register dump\n");
3356  for (i = 0; i < NUM_REGS; i++)
3357  pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3358 }
3359 
3360 /*
3361  * e1000_dump: Print registers, tx ring and rx ring
3362  */
3363 static void e1000_dump(struct e1000_adapter *adapter)
3364 {
3365  /* this code doesn't handle multiple rings */
3366  struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3367  struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3368  int i;
3369 
3370  if (!netif_msg_hw(adapter))
3371  return;
3372 
3373  /* Print Registers */
3374  e1000_regdump(adapter);
3375 
3376  /*
3377  * transmit dump
3378  */
3379  pr_info("TX Desc ring0 dump\n");
3380 
3381  /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3382  *
3383  * Legacy Transmit Descriptor
3384  * +--------------------------------------------------------------+
3385  * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3386  * +--------------------------------------------------------------+
3387  * 8 | Special | CSS | Status | CMD | CSO | Length |
3388  * +--------------------------------------------------------------+
3389  * 63 48 47 36 35 32 31 24 23 16 15 0
3390  *
3391  * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3392  * 63 48 47 40 39 32 31 16 15 8 7 0
3393  * +----------------------------------------------------------------+
3394  * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3395  * +----------------------------------------------------------------+
3396  * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3397  * +----------------------------------------------------------------+
3398  * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3399  *
3400  * Extended Data Descriptor (DTYP=0x1)
3401  * +----------------------------------------------------------------+
3402  * 0 | Buffer Address [63:0] |
3403  * +----------------------------------------------------------------+
3404  * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3405  * +----------------------------------------------------------------+
3406  * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3407  */
3408  pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3409  pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3410 
3411  if (!netif_msg_tx_done(adapter))
3412  goto rx_ring_summary;
3413 
3414  for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3415  struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3416  struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3417  struct my_u { __le64 a; __le64 b; };
3418  struct my_u *u = (struct my_u *)tx_desc;
3419  const char *type;
3420 
3421  if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3422  type = "NTC/U";
3423  else if (i == tx_ring->next_to_use)
3424  type = "NTU";
3425  else if (i == tx_ring->next_to_clean)
3426  type = "NTC";
3427  else
3428  type = "";
3429 
3430  pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3431  ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3432  le64_to_cpu(u->a), le64_to_cpu(u->b),
3433  (u64)buffer_info->dma, buffer_info->length,
3434  buffer_info->next_to_watch,
3435  (u64)buffer_info->time_stamp, buffer_info->skb, type);
3436  }
3437 
3438 rx_ring_summary:
3439  /*
3440  * receive dump
3441  */
3442  pr_info("\nRX Desc ring dump\n");
3443 
3444  /* Legacy Receive Descriptor Format
3445  *
3446  * +-----------------------------------------------------+
3447  * | Buffer Address [63:0] |
3448  * +-----------------------------------------------------+
3449  * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3450  * +-----------------------------------------------------+
3451  * 63 48 47 40 39 32 31 16 15 0
3452  */
3453  pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3454 
3455  if (!netif_msg_rx_status(adapter))
3456  goto exit;
3457 
3458  for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3459  struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3460  struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3461  struct my_u { __le64 a; __le64 b; };
3462  struct my_u *u = (struct my_u *)rx_desc;
3463  const char *type;
3464 
3465  if (i == rx_ring->next_to_use)
3466  type = "NTU";
3467  else if (i == rx_ring->next_to_clean)
3468  type = "NTC";
3469  else
3470  type = "";
3471 
3472  pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3473  i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3474  (u64)buffer_info->dma, buffer_info->skb, type);
3475  } /* for */
3476 
3477  /* dump the descriptor caches */
3478  /* rx */
3479  pr_info("Rx descriptor cache in 64bit format\n");
3480  for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3481  pr_info("R%04X: %08X|%08X %08X|%08X\n",
3482  i,
3483  readl(adapter->hw.hw_addr + i+4),
3484  readl(adapter->hw.hw_addr + i),
3485  readl(adapter->hw.hw_addr + i+12),
3486  readl(adapter->hw.hw_addr + i+8));
3487  }
3488  /* tx */
3489  pr_info("Tx descriptor cache in 64bit format\n");
3490  for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3491  pr_info("T%04X: %08X|%08X %08X|%08X\n",
3492  i,
3493  readl(adapter->hw.hw_addr + i+4),
3494  readl(adapter->hw.hw_addr + i),
3495  readl(adapter->hw.hw_addr + i+12),
3496  readl(adapter->hw.hw_addr + i+8));
3497  }
3498 exit:
3499  return;
3500 }
3501 
3507 static void e1000_tx_timeout(struct net_device *netdev)
3508 {
3509  struct e1000_adapter *adapter = netdev_priv(netdev);
3510 
3511  /* Do the reset outside of interrupt context */
3512  adapter->tx_timeout_count++;
3513  schedule_work(&adapter->reset_task);
3514 }
3515 
3516 static void e1000_reset_task(struct work_struct *work)
3517 {
3518  struct e1000_adapter *adapter =
3519  container_of(work, struct e1000_adapter, reset_task);
3520 
3521  if (test_bit(__E1000_DOWN, &adapter->flags))
3522  return;
3523  e_err(drv, "Reset adapter\n");
3524  e1000_reinit_safe(adapter);
3525 }
3526 
3535 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3536 {
3537  /* only return the current stats */
3538  return &netdev->stats;
3539 }
3540 
3549 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3550 {
3551  struct e1000_adapter *adapter = netdev_priv(netdev);
3552  struct e1000_hw *hw = &adapter->hw;
3553  int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3554 
3555  if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3556  (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3557  e_err(probe, "Invalid MTU setting\n");
3558  return -EINVAL;
3559  }
3560 
3561  /* Adapter-specific max frame size limits. */
3562  switch (hw->mac_type) {
3564  if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3565  e_err(probe, "Jumbo Frames not supported.\n");
3566  return -EINVAL;
3567  }
3568  break;
3569  default:
3570  /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3571  break;
3572  }
3573 
3574  while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3575  msleep(1);
3576  /* e1000_down has a dependency on max_frame_size */
3577  hw->max_frame_size = max_frame;
3578  if (netif_running(netdev))
3579  e1000_down(adapter);
3580 
3581  /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3582  * means we reserve 2 more, this pushes us to allocate from the next
3583  * larger slab size.
3584  * i.e. RXBUFFER_2048 --> size-4096 slab
3585  * however with the new *_jumbo_rx* routines, jumbo receives will use
3586  * fragmented skbs */
3587 
3588  if (max_frame <= E1000_RXBUFFER_2048)
3590  else
3591 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3593 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3594  adapter->rx_buffer_len = PAGE_SIZE;
3595 #endif
3596 
3597  /* adjust allocation if LPE protects us, and we aren't using SBP */
3598  if (!hw->tbi_compatibility_on &&
3599  ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3600  (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3602 
3603  pr_info("%s changing MTU from %d to %d\n",
3604  netdev->name, netdev->mtu, new_mtu);
3605  netdev->mtu = new_mtu;
3606 
3607  if (netif_running(netdev))
3608  e1000_up(adapter);
3609  else
3610  e1000_reset(adapter);
3611 
3612  clear_bit(__E1000_RESETTING, &adapter->flags);
3613 
3614  return 0;
3615 }
3616 
3622 void e1000_update_stats(struct e1000_adapter *adapter)
3623 {
3624  struct net_device *netdev = adapter->netdev;
3625  struct e1000_hw *hw = &adapter->hw;
3626  struct pci_dev *pdev = adapter->pdev;
3627  unsigned long flags;
3628  u16 phy_tmp;
3629 
3630 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3631 
3632  /*
3633  * Prevent stats update while adapter is being reset, or if the pci
3634  * connection is down.
3635  */
3636  if (adapter->link_speed == 0)
3637  return;
3638  if (pci_channel_offline(pdev))
3639  return;
3640 
3641  spin_lock_irqsave(&adapter->stats_lock, flags);
3642 
3643  /* these counters are modified from e1000_tbi_adjust_stats,
3644  * called from the interrupt context, so they must only
3645  * be written while holding adapter->stats_lock
3646  */
3647 
3648  adapter->stats.crcerrs += er32(CRCERRS);
3649  adapter->stats.gprc += er32(GPRC);
3650  adapter->stats.gorcl += er32(GORCL);
3651  adapter->stats.gorch += er32(GORCH);
3652  adapter->stats.bprc += er32(BPRC);
3653  adapter->stats.mprc += er32(MPRC);
3654  adapter->stats.roc += er32(ROC);
3655 
3656  adapter->stats.prc64 += er32(PRC64);
3657  adapter->stats.prc127 += er32(PRC127);
3658  adapter->stats.prc255 += er32(PRC255);
3659  adapter->stats.prc511 += er32(PRC511);
3660  adapter->stats.prc1023 += er32(PRC1023);
3661  adapter->stats.prc1522 += er32(PRC1522);
3662 
3663  adapter->stats.symerrs += er32(SYMERRS);
3664  adapter->stats.mpc += er32(MPC);
3665  adapter->stats.scc += er32(SCC);
3666  adapter->stats.ecol += er32(ECOL);
3667  adapter->stats.mcc += er32(MCC);
3668  adapter->stats.latecol += er32(LATECOL);
3669  adapter->stats.dc += er32(DC);
3670  adapter->stats.sec += er32(SEC);
3671  adapter->stats.rlec += er32(RLEC);
3672  adapter->stats.xonrxc += er32(XONRXC);
3673  adapter->stats.xontxc += er32(XONTXC);
3674  adapter->stats.xoffrxc += er32(XOFFRXC);
3675  adapter->stats.xofftxc += er32(XOFFTXC);
3676  adapter->stats.fcruc += er32(FCRUC);
3677  adapter->stats.gptc += er32(GPTC);
3678  adapter->stats.gotcl += er32(GOTCL);
3679  adapter->stats.gotch += er32(GOTCH);
3680  adapter->stats.rnbc += er32(RNBC);
3681  adapter->stats.ruc += er32(RUC);
3682  adapter->stats.rfc += er32(RFC);
3683  adapter->stats.rjc += er32(RJC);
3684  adapter->stats.torl += er32(TORL);
3685  adapter->stats.torh += er32(TORH);
3686  adapter->stats.totl += er32(TOTL);
3687  adapter->stats.toth += er32(TOTH);
3688  adapter->stats.tpr += er32(TPR);
3689 
3690  adapter->stats.ptc64 += er32(PTC64);
3691  adapter->stats.ptc127 += er32(PTC127);
3692  adapter->stats.ptc255 += er32(PTC255);
3693  adapter->stats.ptc511 += er32(PTC511);
3694  adapter->stats.ptc1023 += er32(PTC1023);
3695  adapter->stats.ptc1522 += er32(PTC1522);
3696 
3697  adapter->stats.mptc += er32(MPTC);
3698  adapter->stats.bptc += er32(BPTC);
3699 
3700  /* used for adaptive IFS */
3701 
3702  hw->tx_packet_delta = er32(TPT);
3703  adapter->stats.tpt += hw->tx_packet_delta;
3704  hw->collision_delta = er32(COLC);
3705  adapter->stats.colc += hw->collision_delta;
3706 
3707  if (hw->mac_type >= e1000_82543) {
3708  adapter->stats.algnerrc += er32(ALGNERRC);
3709  adapter->stats.rxerrc += er32(RXERRC);
3710  adapter->stats.tncrs += er32(TNCRS);
3711  adapter->stats.cexterr += er32(CEXTERR);
3712  adapter->stats.tsctc += er32(TSCTC);
3713  adapter->stats.tsctfc += er32(TSCTFC);
3714  }
3715 
3716  /* Fill out the OS statistics structure */
3717  netdev->stats.multicast = adapter->stats.mprc;
3718  netdev->stats.collisions = adapter->stats.colc;
3719 
3720  /* Rx Errors */
3721 
3722  /* RLEC on some newer hardware can be incorrect so build
3723  * our own version based on RUC and ROC */
3724  netdev->stats.rx_errors = adapter->stats.rxerrc +
3725  adapter->stats.crcerrs + adapter->stats.algnerrc +
3726  adapter->stats.ruc + adapter->stats.roc +
3727  adapter->stats.cexterr;
3728  adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3729  netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3730  netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3731  netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3732  netdev->stats.rx_missed_errors = adapter->stats.mpc;
3733 
3734  /* Tx Errors */
3735  adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3736  netdev->stats.tx_errors = adapter->stats.txerrc;
3737  netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3738  netdev->stats.tx_window_errors = adapter->stats.latecol;
3739  netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3740  if (hw->bad_tx_carr_stats_fd &&
3741  adapter->link_duplex == FULL_DUPLEX) {
3742  netdev->stats.tx_carrier_errors = 0;
3743  adapter->stats.tncrs = 0;
3744  }
3745 
3746  /* Tx Dropped needs to be maintained elsewhere */
3747 
3748  /* Phy Stats */
3749  if (hw->media_type == e1000_media_type_copper) {
3750  if ((adapter->link_speed == SPEED_1000) &&
3751  (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3752  phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3753  adapter->phy_stats.idle_errors += phy_tmp;
3754  }
3755 
3756  if ((hw->mac_type <= e1000_82546) &&
3757  (hw->phy_type == e1000_phy_m88) &&
3758  !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3759  adapter->phy_stats.receive_errors += phy_tmp;
3760  }
3761 
3762  /* Management Stats */
3763  if (hw->has_smbus) {
3764  adapter->stats.mgptc += er32(MGTPTC);
3765  adapter->stats.mgprc += er32(MGTPRC);
3766  adapter->stats.mgpdc += er32(MGTPDC);
3767  }
3768 
3769  spin_unlock_irqrestore(&adapter->stats_lock, flags);
3770 }
3771 
3778 static irqreturn_t e1000_intr(int irq, void *data)
3779 {
3780  struct net_device *netdev = data;
3781  struct e1000_adapter *adapter = netdev_priv(netdev);
3782  struct e1000_hw *hw = &adapter->hw;
3783  u32 icr = er32(ICR);
3784 
3785  if (unlikely((!icr)))
3786  return IRQ_NONE; /* Not our interrupt */
3787 
3788  /*
3789  * we might have caused the interrupt, but the above
3790  * read cleared it, and just in case the driver is
3791  * down there is nothing to do so return handled
3792  */
3793  if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3794  return IRQ_HANDLED;
3795 
3796  if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3797  hw->get_link_status = 1;
3798  /* guard against interrupt when we're going down */
3799  if (!test_bit(__E1000_DOWN, &adapter->flags))
3800  schedule_delayed_work(&adapter->watchdog_task, 1);
3801  }
3802 
3803  /* disable interrupts, without the synchronize_irq bit */
3804  ew32(IMC, ~0);
3806 
3807  if (likely(napi_schedule_prep(&adapter->napi))) {
3808  adapter->total_tx_bytes = 0;
3809  adapter->total_tx_packets = 0;
3810  adapter->total_rx_bytes = 0;
3811  adapter->total_rx_packets = 0;
3812  __napi_schedule(&adapter->napi);
3813  } else {
3814  /* this really should not happen! if it does it is basically a
3815  * bug, but not a hard error, so enable ints and continue */
3816  if (!test_bit(__E1000_DOWN, &adapter->flags))
3817  e1000_irq_enable(adapter);
3818  }
3819 
3820  return IRQ_HANDLED;
3821 }
3822 
3827 static int e1000_clean(struct napi_struct *napi, int budget)
3828 {
3829  struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3830  int tx_clean_complete = 0, work_done = 0;
3831 
3832  tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3833 
3834  adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3835 
3836  if (!tx_clean_complete)
3837  work_done = budget;
3838 
3839  /* If budget not fully consumed, exit the polling mode */
3840  if (work_done < budget) {
3841  if (likely(adapter->itr_setting & 3))
3842  e1000_set_itr(adapter);
3843  napi_complete(napi);
3844  if (!test_bit(__E1000_DOWN, &adapter->flags))
3845  e1000_irq_enable(adapter);
3846  }
3847 
3848  return work_done;
3849 }
3850 
3855 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3856  struct e1000_tx_ring *tx_ring)
3857 {
3858  struct e1000_hw *hw = &adapter->hw;
3859  struct net_device *netdev = adapter->netdev;
3860  struct e1000_tx_desc *tx_desc, *eop_desc;
3861  struct e1000_buffer *buffer_info;
3862  unsigned int i, eop;
3863  unsigned int count = 0;
3864  unsigned int total_tx_bytes=0, total_tx_packets=0;
3865  unsigned int bytes_compl = 0, pkts_compl = 0;
3866 
3867  i = tx_ring->next_to_clean;
3868  eop = tx_ring->buffer_info[i].next_to_watch;
3869  eop_desc = E1000_TX_DESC(*tx_ring, eop);
3870 
3871  while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3872  (count < tx_ring->count)) {
3873  bool cleaned = false;
3874  rmb(); /* read buffer_info after eop_desc */
3875  for ( ; !cleaned; count++) {
3876  tx_desc = E1000_TX_DESC(*tx_ring, i);
3877  buffer_info = &tx_ring->buffer_info[i];
3878  cleaned = (i == eop);
3879 
3880  if (cleaned) {
3881  total_tx_packets += buffer_info->segs;
3882  total_tx_bytes += buffer_info->bytecount;
3883  if (buffer_info->skb) {
3884  bytes_compl += buffer_info->skb->len;
3885  pkts_compl++;
3886  }
3887 
3888  }
3889  e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3890  tx_desc->upper.data = 0;
3891 
3892  if (unlikely(++i == tx_ring->count)) i = 0;
3893  }
3894 
3895  eop = tx_ring->buffer_info[i].next_to_watch;
3896  eop_desc = E1000_TX_DESC(*tx_ring, eop);
3897  }
3898 
3899  tx_ring->next_to_clean = i;
3900 
3901  netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3902 
3903 #define TX_WAKE_THRESHOLD 32
3904  if (unlikely(count && netif_carrier_ok(netdev) &&
3905  E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3906  /* Make sure that anybody stopping the queue after this
3907  * sees the new next_to_clean.
3908  */
3909  smp_mb();
3910 
3911  if (netif_queue_stopped(netdev) &&
3912  !(test_bit(__E1000_DOWN, &adapter->flags))) {
3913  netif_wake_queue(netdev);
3914  ++adapter->restart_queue;
3915  }
3916  }
3917 
3918  if (adapter->detect_tx_hung) {
3919  /* Detect a transmit hang in hardware, this serializes the
3920  * check with the clearing of time_stamp and movement of i */
3921  adapter->detect_tx_hung = false;
3922  if (tx_ring->buffer_info[eop].time_stamp &&
3923  time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3924  (adapter->tx_timeout_factor * HZ)) &&
3925  !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3926 
3927  /* detected Tx unit hang */
3928  e_err(drv, "Detected Tx Unit Hang\n"
3929  " Tx Queue <%lu>\n"
3930  " TDH <%x>\n"
3931  " TDT <%x>\n"
3932  " next_to_use <%x>\n"
3933  " next_to_clean <%x>\n"
3934  "buffer_info[next_to_clean]\n"
3935  " time_stamp <%lx>\n"
3936  " next_to_watch <%x>\n"
3937  " jiffies <%lx>\n"
3938  " next_to_watch.status <%x>\n",
3939  (unsigned long)((tx_ring - adapter->tx_ring) /
3940  sizeof(struct e1000_tx_ring)),
3941  readl(hw->hw_addr + tx_ring->tdh),
3942  readl(hw->hw_addr + tx_ring->tdt),
3943  tx_ring->next_to_use,
3944  tx_ring->next_to_clean,
3945  tx_ring->buffer_info[eop].time_stamp,
3946  eop,
3947  jiffies,
3948  eop_desc->upper.fields.status);
3949  e1000_dump(adapter);
3950  netif_stop_queue(netdev);
3951  }
3952  }
3953  adapter->total_tx_bytes += total_tx_bytes;
3954  adapter->total_tx_packets += total_tx_packets;
3955  netdev->stats.tx_bytes += total_tx_bytes;
3956  netdev->stats.tx_packets += total_tx_packets;
3957  return count < tx_ring->count;
3958 }
3959 
3968 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3969  u32 csum, struct sk_buff *skb)
3970 {
3971  struct e1000_hw *hw = &adapter->hw;
3972  u16 status = (u16)status_err;
3973  u8 errors = (u8)(status_err >> 24);
3974 
3975  skb_checksum_none_assert(skb);
3976 
3977  /* 82543 or newer only */
3978  if (unlikely(hw->mac_type < e1000_82543)) return;
3979  /* Ignore Checksum bit is set */
3980  if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3981  /* TCP/UDP checksum error bit is set */
3982  if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3983  /* let the stack verify checksum errors */
3984  adapter->hw_csum_err++;
3985  return;
3986  }
3987  /* TCP/UDP Checksum has not been calculated */
3988  if (!(status & E1000_RXD_STAT_TCPCS))
3989  return;
3990 
3991  /* It must be a TCP or UDP packet with a valid checksum */
3992  if (likely(status & E1000_RXD_STAT_TCPCS)) {
3993  /* TCP checksum is good */
3995  }
3996  adapter->hw_csum_good++;
3997 }
3998 
4002 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
4003  u16 length)
4004 {
4005  bi->page = NULL;
4006  skb->len += length;
4007  skb->data_len += length;
4008  skb->truesize += PAGE_SIZE;
4009 }
4010 
4018 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4019  __le16 vlan, struct sk_buff *skb)
4020 {
4021  skb->protocol = eth_type_trans(skb, adapter->netdev);
4022 
4023  if (status & E1000_RXD_STAT_VP) {
4024  u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4025 
4026  __vlan_hwaccel_put_tag(skb, vid);
4027  }
4028  napi_gro_receive(&adapter->napi, skb);
4029 }
4030 
4041 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4042  struct e1000_rx_ring *rx_ring,
4043  int *work_done, int work_to_do)
4044 {
4045  struct e1000_hw *hw = &adapter->hw;
4046  struct net_device *netdev = adapter->netdev;
4047  struct pci_dev *pdev = adapter->pdev;
4048  struct e1000_rx_desc *rx_desc, *next_rxd;
4049  struct e1000_buffer *buffer_info, *next_buffer;
4050  unsigned long irq_flags;
4051  u32 length;
4052  unsigned int i;
4053  int cleaned_count = 0;
4054  bool cleaned = false;
4055  unsigned int total_rx_bytes=0, total_rx_packets=0;
4056 
4057  i = rx_ring->next_to_clean;
4058  rx_desc = E1000_RX_DESC(*rx_ring, i);
4059  buffer_info = &rx_ring->buffer_info[i];
4060 
4061  while (rx_desc->status & E1000_RXD_STAT_DD) {
4062  struct sk_buff *skb;
4063  u8 status;
4064 
4065  if (*work_done >= work_to_do)
4066  break;
4067  (*work_done)++;
4068  rmb(); /* read descriptor and rx_buffer_info after status DD */
4069 
4070  status = rx_desc->status;
4071  skb = buffer_info->skb;
4072  buffer_info->skb = NULL;
4073 
4074  if (++i == rx_ring->count) i = 0;
4075  next_rxd = E1000_RX_DESC(*rx_ring, i);
4076  prefetch(next_rxd);
4077 
4078  next_buffer = &rx_ring->buffer_info[i];
4079 
4080  cleaned = true;
4081  cleaned_count++;
4082  dma_unmap_page(&pdev->dev, buffer_info->dma,
4083  buffer_info->length, DMA_FROM_DEVICE);
4084  buffer_info->dma = 0;
4085 
4086  length = le16_to_cpu(rx_desc->length);
4087 
4088  /* errors is only valid for DD + EOP descriptors */
4089  if (unlikely((status & E1000_RXD_STAT_EOP) &&
4090  (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4091  u8 *mapped;
4092  u8 last_byte;
4093 
4094  mapped = page_address(buffer_info->page);
4095  last_byte = *(mapped + length - 1);
4096  if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4097  last_byte)) {
4098  spin_lock_irqsave(&adapter->stats_lock,
4099  irq_flags);
4100  e1000_tbi_adjust_stats(hw, &adapter->stats,
4101  length, mapped);
4102  spin_unlock_irqrestore(&adapter->stats_lock,
4103  irq_flags);
4104  length--;
4105  } else {
4106  if (netdev->features & NETIF_F_RXALL)
4107  goto process_skb;
4108  /* recycle both page and skb */
4109  buffer_info->skb = skb;
4110  /* an error means any chain goes out the window
4111  * too */
4112  if (rx_ring->rx_skb_top)
4113  dev_kfree_skb(rx_ring->rx_skb_top);
4114  rx_ring->rx_skb_top = NULL;
4115  goto next_desc;
4116  }
4117  }
4118 
4119 #define rxtop rx_ring->rx_skb_top
4120 process_skb:
4121  if (!(status & E1000_RXD_STAT_EOP)) {
4122  /* this descriptor is only the beginning (or middle) */
4123  if (!rxtop) {
4124  /* this is the beginning of a chain */
4125  rxtop = skb;
4126  skb_fill_page_desc(rxtop, 0, buffer_info->page,
4127  0, length);
4128  } else {
4129  /* this is the middle of a chain */
4130  skb_fill_page_desc(rxtop,
4131  skb_shinfo(rxtop)->nr_frags,
4132  buffer_info->page, 0, length);
4133  /* re-use the skb, only consumed the page */
4134  buffer_info->skb = skb;
4135  }
4136  e1000_consume_page(buffer_info, rxtop, length);
4137  goto next_desc;
4138  } else {
4139  if (rxtop) {
4140  /* end of the chain */
4141  skb_fill_page_desc(rxtop,
4142  skb_shinfo(rxtop)->nr_frags,
4143  buffer_info->page, 0, length);
4144  /* re-use the current skb, we only consumed the
4145  * page */
4146  buffer_info->skb = skb;
4147  skb = rxtop;
4148  rxtop = NULL;
4149  e1000_consume_page(buffer_info, skb, length);
4150  } else {
4151  /* no chain, got EOP, this buf is the packet
4152  * copybreak to save the put_page/alloc_page */
4153  if (length <= copybreak &&
4154  skb_tailroom(skb) >= length) {
4155  u8 *vaddr;
4156  vaddr = kmap_atomic(buffer_info->page);
4157  memcpy(skb_tail_pointer(skb), vaddr, length);
4158  kunmap_atomic(vaddr);
4159  /* re-use the page, so don't erase
4160  * buffer_info->page */
4161  skb_put(skb, length);
4162  } else {
4163  skb_fill_page_desc(skb, 0,
4164  buffer_info->page, 0,
4165  length);
4166  e1000_consume_page(buffer_info, skb,
4167  length);
4168  }
4169  }
4170  }
4171 
4172  /* Receive Checksum Offload XXX recompute due to CRC strip? */
4173  e1000_rx_checksum(adapter,
4174  (u32)(status) |
4175  ((u32)(rx_desc->errors) << 24),
4176  le16_to_cpu(rx_desc->csum), skb);
4177 
4178  total_rx_bytes += (skb->len - 4); /* don't count FCS */
4179  if (likely(!(netdev->features & NETIF_F_RXFCS)))
4180  pskb_trim(skb, skb->len - 4);
4181  total_rx_packets++;
4182 
4183  /* eth type trans needs skb->data to point to something */
4184  if (!pskb_may_pull(skb, ETH_HLEN)) {
4185  e_err(drv, "pskb_may_pull failed.\n");
4186  dev_kfree_skb(skb);
4187  goto next_desc;
4188  }
4189 
4190  e1000_receive_skb(adapter, status, rx_desc->special, skb);
4191 
4192 next_desc:
4193  rx_desc->status = 0;
4194 
4195  /* return some buffers to hardware, one at a time is too slow */
4196  if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4197  adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4198  cleaned_count = 0;
4199  }
4200 
4201  /* use prefetched values */
4202  rx_desc = next_rxd;
4203  buffer_info = next_buffer;
4204  }
4205  rx_ring->next_to_clean = i;
4206 
4207  cleaned_count = E1000_DESC_UNUSED(rx_ring);
4208  if (cleaned_count)
4209  adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4210 
4211  adapter->total_rx_packets += total_rx_packets;
4212  adapter->total_rx_bytes += total_rx_bytes;
4213  netdev->stats.rx_bytes += total_rx_bytes;
4214  netdev->stats.rx_packets += total_rx_packets;
4215  return cleaned;
4216 }
4217 
4218 /*
4219  * this should improve performance for small packets with large amounts
4220  * of reassembly being done in the stack
4221  */
4222 static void e1000_check_copybreak(struct net_device *netdev,
4223  struct e1000_buffer *buffer_info,
4224  u32 length, struct sk_buff **skb)
4225 {
4226  struct sk_buff *new_skb;
4227 
4228  if (length > copybreak)
4229  return;
4230 
4231  new_skb = netdev_alloc_skb_ip_align(netdev, length);
4232  if (!new_skb)
4233  return;
4234 
4235  skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4236  (*skb)->data - NET_IP_ALIGN,
4237  length + NET_IP_ALIGN);
4238  /* save the skb in buffer_info as good */
4239  buffer_info->skb = *skb;
4240  *skb = new_skb;
4241 }
4242 
4250 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4251  struct e1000_rx_ring *rx_ring,
4252  int *work_done, int work_to_do)
4253 {
4254  struct e1000_hw *hw = &adapter->hw;
4255  struct net_device *netdev = adapter->netdev;
4256  struct pci_dev *pdev = adapter->pdev;
4257  struct e1000_rx_desc *rx_desc, *next_rxd;
4258  struct e1000_buffer *buffer_info, *next_buffer;
4259  unsigned long flags;
4260  u32 length;
4261  unsigned int i;
4262  int cleaned_count = 0;
4263  bool cleaned = false;
4264  unsigned int total_rx_bytes=0, total_rx_packets=0;
4265 
4266  i = rx_ring->next_to_clean;
4267  rx_desc = E1000_RX_DESC(*rx_ring, i);
4268  buffer_info = &rx_ring->buffer_info[i];
4269 
4270  while (rx_desc->status & E1000_RXD_STAT_DD) {
4271  struct sk_buff *skb;
4272  u8 status;
4273 
4274  if (*work_done >= work_to_do)
4275  break;
4276  (*work_done)++;
4277  rmb(); /* read descriptor and rx_buffer_info after status DD */
4278 
4279  status = rx_desc->status;
4280  skb = buffer_info->skb;
4281  buffer_info->skb = NULL;
4282 
4283  prefetch(skb->data - NET_IP_ALIGN);
4284 
4285  if (++i == rx_ring->count) i = 0;
4286  next_rxd = E1000_RX_DESC(*rx_ring, i);
4287  prefetch(next_rxd);
4288 
4289  next_buffer = &rx_ring->buffer_info[i];
4290 
4291  cleaned = true;
4292  cleaned_count++;
4293  dma_unmap_single(&pdev->dev, buffer_info->dma,
4294  buffer_info->length, DMA_FROM_DEVICE);
4295  buffer_info->dma = 0;
4296 
4297  length = le16_to_cpu(rx_desc->length);
4298  /* !EOP means multiple descriptors were used to store a single
4299  * packet, if thats the case we need to toss it. In fact, we
4300  * to toss every packet with the EOP bit clear and the next
4301  * frame that _does_ have the EOP bit set, as it is by
4302  * definition only a frame fragment
4303  */
4304  if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4305  adapter->discarding = true;
4306 
4307  if (adapter->discarding) {
4308  /* All receives must fit into a single buffer */
4309  e_dbg("Receive packet consumed multiple buffers\n");
4310  /* recycle */
4311  buffer_info->skb = skb;
4312  if (status & E1000_RXD_STAT_EOP)
4313  adapter->discarding = false;
4314  goto next_desc;
4315  }
4316 
4317  if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4318  u8 last_byte = *(skb->data + length - 1);
4319  if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4320  last_byte)) {
4321  spin_lock_irqsave(&adapter->stats_lock, flags);
4322  e1000_tbi_adjust_stats(hw, &adapter->stats,
4323  length, skb->data);
4324  spin_unlock_irqrestore(&adapter->stats_lock,
4325  flags);
4326  length--;
4327  } else {
4328  if (netdev->features & NETIF_F_RXALL)
4329  goto process_skb;
4330  /* recycle */
4331  buffer_info->skb = skb;
4332  goto next_desc;
4333  }
4334  }
4335 
4336 process_skb:
4337  total_rx_bytes += (length - 4); /* don't count FCS */
4338  total_rx_packets++;
4339 
4340  if (likely(!(netdev->features & NETIF_F_RXFCS)))
4341  /* adjust length to remove Ethernet CRC, this must be
4342  * done after the TBI_ACCEPT workaround above
4343  */
4344  length -= 4;
4345 
4346  e1000_check_copybreak(netdev, buffer_info, length, &skb);
4347 
4348  skb_put(skb, length);
4349 
4350  /* Receive Checksum Offload */
4351  e1000_rx_checksum(adapter,
4352  (u32)(status) |
4353  ((u32)(rx_desc->errors) << 24),
4354  le16_to_cpu(rx_desc->csum), skb);
4355 
4356  e1000_receive_skb(adapter, status, rx_desc->special, skb);
4357 
4358 next_desc:
4359  rx_desc->status = 0;
4360 
4361  /* return some buffers to hardware, one at a time is too slow */
4362  if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4363  adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4364  cleaned_count = 0;
4365  }
4366 
4367  /* use prefetched values */
4368  rx_desc = next_rxd;
4369  buffer_info = next_buffer;
4370  }
4371  rx_ring->next_to_clean = i;
4372 
4373  cleaned_count = E1000_DESC_UNUSED(rx_ring);
4374  if (cleaned_count)
4375  adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4376 
4377  adapter->total_rx_packets += total_rx_packets;
4378  adapter->total_rx_bytes += total_rx_bytes;
4379  netdev->stats.rx_bytes += total_rx_bytes;
4380  netdev->stats.rx_packets += total_rx_packets;
4381  return cleaned;
4382 }
4383 
4391 static void
4392 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4393  struct e1000_rx_ring *rx_ring, int cleaned_count)
4394 {
4395  struct net_device *netdev = adapter->netdev;
4396  struct pci_dev *pdev = adapter->pdev;
4397  struct e1000_rx_desc *rx_desc;
4398  struct e1000_buffer *buffer_info;
4399  struct sk_buff *skb;
4400  unsigned int i;
4401  unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4402 
4403  i = rx_ring->next_to_use;
4404  buffer_info = &rx_ring->buffer_info[i];
4405 
4406  while (cleaned_count--) {
4407  skb = buffer_info->skb;
4408  if (skb) {
4409  skb_trim(skb, 0);
4410  goto check_page;
4411  }
4412 
4413  skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4414  if (unlikely(!skb)) {
4415  /* Better luck next round */
4416  adapter->alloc_rx_buff_failed++;
4417  break;
4418  }
4419 
4420  buffer_info->skb = skb;
4421  buffer_info->length = adapter->rx_buffer_len;
4422 check_page:
4423  /* allocate a new page if necessary */
4424  if (!buffer_info->page) {
4425  buffer_info->page = alloc_page(GFP_ATOMIC);
4426  if (unlikely(!buffer_info->page)) {
4427  adapter->alloc_rx_buff_failed++;
4428  break;
4429  }
4430  }
4431 
4432  if (!buffer_info->dma) {
4433  buffer_info->dma = dma_map_page(&pdev->dev,
4434  buffer_info->page, 0,
4435  buffer_info->length,
4436  DMA_FROM_DEVICE);
4437  if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4438  put_page(buffer_info->page);
4439  dev_kfree_skb(skb);
4440  buffer_info->page = NULL;
4441  buffer_info->skb = NULL;
4442  buffer_info->dma = 0;
4443  adapter->alloc_rx_buff_failed++;
4444  break; /* while !buffer_info->skb */
4445  }
4446  }
4447 
4448  rx_desc = E1000_RX_DESC(*rx_ring, i);
4449  rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4450 
4451  if (unlikely(++i == rx_ring->count))
4452  i = 0;
4453  buffer_info = &rx_ring->buffer_info[i];
4454  }
4455 
4456  if (likely(rx_ring->next_to_use != i)) {
4457  rx_ring->next_to_use = i;
4458  if (unlikely(i-- == 0))
4459  i = (rx_ring->count - 1);
4460 
4461  /* Force memory writes to complete before letting h/w
4462  * know there are new descriptors to fetch. (Only
4463  * applicable for weak-ordered memory model archs,
4464  * such as IA-64). */
4465  wmb();
4466  writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4467  }
4468 }
4469 
4475 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4476  struct e1000_rx_ring *rx_ring,
4477  int cleaned_count)
4478 {
4479  struct e1000_hw *hw = &adapter->hw;
4480  struct net_device *netdev = adapter->netdev;
4481  struct pci_dev *pdev = adapter->pdev;
4482  struct e1000_rx_desc *rx_desc;
4483  struct e1000_buffer *buffer_info;
4484  struct sk_buff *skb;
4485  unsigned int i;
4486  unsigned int bufsz = adapter->rx_buffer_len;
4487 
4488  i = rx_ring->next_to_use;
4489  buffer_info = &rx_ring->buffer_info[i];
4490 
4491  while (cleaned_count--) {
4492  skb = buffer_info->skb;
4493  if (skb) {
4494  skb_trim(skb, 0);
4495  goto map_skb;
4496  }
4497 
4498  skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4499  if (unlikely(!skb)) {
4500  /* Better luck next round */
4501  adapter->alloc_rx_buff_failed++;
4502  break;
4503  }
4504 
4505  /* Fix for errata 23, can't cross 64kB boundary */
4506  if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4507  struct sk_buff *oldskb = skb;
4508  e_err(rx_err, "skb align check failed: %u bytes at "
4509  "%p\n", bufsz, skb->data);
4510  /* Try again, without freeing the previous */
4511  skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4512  /* Failed allocation, critical failure */
4513  if (!skb) {
4514  dev_kfree_skb(oldskb);
4515  adapter->alloc_rx_buff_failed++;
4516  break;
4517  }
4518 
4519  if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4520  /* give up */
4521  dev_kfree_skb(skb);
4522  dev_kfree_skb(oldskb);
4523  adapter->alloc_rx_buff_failed++;
4524  break; /* while !buffer_info->skb */
4525  }
4526 
4527  /* Use new allocation */
4528  dev_kfree_skb(oldskb);
4529  }
4530  buffer_info->skb = skb;
4531  buffer_info->length = adapter->rx_buffer_len;
4532 map_skb:
4533  buffer_info->dma = dma_map_single(&pdev->dev,
4534  skb->data,
4535  buffer_info->length,
4536  DMA_FROM_DEVICE);
4537  if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4538  dev_kfree_skb(skb);
4539  buffer_info->skb = NULL;
4540  buffer_info->dma = 0;
4541  adapter->alloc_rx_buff_failed++;
4542  break; /* while !buffer_info->skb */
4543  }
4544 
4545  /*
4546  * XXX if it was allocated cleanly it will never map to a
4547  * boundary crossing
4548  */
4549 
4550  /* Fix for errata 23, can't cross 64kB boundary */
4551  if (!e1000_check_64k_bound(adapter,
4552  (void *)(unsigned long)buffer_info->dma,
4553  adapter->rx_buffer_len)) {
4554  e_err(rx_err, "dma align check failed: %u bytes at "
4555  "%p\n", adapter->rx_buffer_len,
4556  (void *)(unsigned long)buffer_info->dma);
4557  dev_kfree_skb(skb);
4558  buffer_info->skb = NULL;
4559 
4560  dma_unmap_single(&pdev->dev, buffer_info->dma,
4561  adapter->rx_buffer_len,
4562  DMA_FROM_DEVICE);
4563  buffer_info->dma = 0;
4564 
4565  adapter->alloc_rx_buff_failed++;
4566  break; /* while !buffer_info->skb */
4567  }
4568  rx_desc = E1000_RX_DESC(*rx_ring, i);
4569  rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4570 
4571  if (unlikely(++i == rx_ring->count))
4572  i = 0;
4573  buffer_info = &rx_ring->buffer_info[i];
4574  }
4575 
4576  if (likely(rx_ring->next_to_use != i)) {
4577  rx_ring->next_to_use = i;
4578  if (unlikely(i-- == 0))
4579  i = (rx_ring->count - 1);
4580 
4581  /* Force memory writes to complete before letting h/w
4582  * know there are new descriptors to fetch. (Only
4583  * applicable for weak-ordered memory model archs,
4584  * such as IA-64). */
4585  wmb();
4586  writel(i, hw->hw_addr + rx_ring->rdt);
4587  }
4588 }
4589 
4595 static void e1000_smartspeed(struct e1000_adapter *adapter)
4596 {
4597  struct e1000_hw *hw = &adapter->hw;
4598  u16 phy_status;
4599  u16 phy_ctrl;
4600 
4601  if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4603  return;
4604 
4605  if (adapter->smartspeed == 0) {
4606  /* If Master/Slave config fault is asserted twice,
4607  * we assume back-to-back */
4608  e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4609  if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4610  e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4611  if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4612  e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4613  if (phy_ctrl & CR_1000T_MS_ENABLE) {
4614  phy_ctrl &= ~CR_1000T_MS_ENABLE;
4616  phy_ctrl);
4617  adapter->smartspeed++;
4618  if (!e1000_phy_setup_autoneg(hw) &&
4620  &phy_ctrl)) {
4621  phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4624  phy_ctrl);
4625  }
4626  }
4627  return;
4628  } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4629  /* If still no link, perhaps using 2/3 pair cable */
4630  e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4631  phy_ctrl |= CR_1000T_MS_ENABLE;
4632  e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4633  if (!e1000_phy_setup_autoneg(hw) &&
4634  !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4635  phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4637  e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4638  }
4639  }
4640  /* Restart process after E1000_SMARTSPEED_MAX iterations */
4641  if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4642  adapter->smartspeed = 0;
4643 }
4644 
4652 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4653 {
4654  switch (cmd) {
4655  case SIOCGMIIPHY:
4656  case SIOCGMIIREG:
4657  case SIOCSMIIREG:
4658  return e1000_mii_ioctl(netdev, ifr, cmd);
4659  default:
4660  return -EOPNOTSUPP;
4661  }
4662 }
4663 
4671 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4672  int cmd)
4673 {
4674  struct e1000_adapter *adapter = netdev_priv(netdev);
4675  struct e1000_hw *hw = &adapter->hw;
4676  struct mii_ioctl_data *data = if_mii(ifr);
4677  int retval;
4678  u16 mii_reg;
4679  unsigned long flags;
4680 
4682  return -EOPNOTSUPP;
4683 
4684  switch (cmd) {
4685  case SIOCGMIIPHY:
4686  data->phy_id = hw->phy_addr;
4687  break;
4688  case SIOCGMIIREG:
4689  spin_lock_irqsave(&adapter->stats_lock, flags);
4690  if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4691  &data->val_out)) {
4692  spin_unlock_irqrestore(&adapter->stats_lock, flags);
4693  return -EIO;
4694  }
4695  spin_unlock_irqrestore(&adapter->stats_lock, flags);
4696  break;
4697  case SIOCSMIIREG:
4698  if (data->reg_num & ~(0x1F))
4699  return -EFAULT;
4700  mii_reg = data->val_in;
4701  spin_lock_irqsave(&adapter->stats_lock, flags);
4702  if (e1000_write_phy_reg(hw, data->reg_num,
4703  mii_reg)) {
4704  spin_unlock_irqrestore(&adapter->stats_lock, flags);
4705  return -EIO;
4706  }
4707  spin_unlock_irqrestore(&adapter->stats_lock, flags);
4708  if (hw->media_type == e1000_media_type_copper) {
4709  switch (data->reg_num) {
4710  case PHY_CTRL:
4711  if (mii_reg & MII_CR_POWER_DOWN)
4712  break;
4713  if (mii_reg & MII_CR_AUTO_NEG_EN) {
4714  hw->autoneg = 1;
4715  hw->autoneg_advertised = 0x2F;
4716  } else {
4717  u32 speed;
4718  if (mii_reg & 0x40)
4719  speed = SPEED_1000;
4720  else if (mii_reg & 0x2000)
4721  speed = SPEED_100;
4722  else
4723  speed = SPEED_10;
4724  retval = e1000_set_spd_dplx(
4725  adapter, speed,
4726  ((mii_reg & 0x100)
4727  ? DUPLEX_FULL :
4728  DUPLEX_HALF));
4729  if (retval)
4730  return retval;
4731  }
4732  if (netif_running(adapter->netdev))
4733  e1000_reinit_locked(adapter);
4734  else
4735  e1000_reset(adapter);
4736  break;
4739  if (e1000_phy_reset(hw))
4740  return -EIO;
4741  break;
4742  }
4743  } else {
4744  switch (data->reg_num) {
4745  case PHY_CTRL:
4746  if (mii_reg & MII_CR_POWER_DOWN)
4747  break;
4748  if (netif_running(adapter->netdev))
4749  e1000_reinit_locked(adapter);
4750  else
4751  e1000_reset(adapter);
4752  break;
4753  }
4754  }
4755  break;
4756  default:
4757  return -EOPNOTSUPP;
4758  }
4759  return E1000_SUCCESS;
4760 }
4761 
4762 void e1000_pci_set_mwi(struct e1000_hw *hw)
4763 {
4764  struct e1000_adapter *adapter = hw->back;
4765  int ret_val = pci_set_mwi(adapter->pdev);
4766 
4767  if (ret_val)
4768  e_err(probe, "Error in setting MWI\n");
4769 }
4770 
4772 {
4773  struct e1000_adapter *adapter = hw->back;
4774 
4775  pci_clear_mwi(adapter->pdev);
4776 }
4777 
4779 {
4780  struct e1000_adapter *adapter = hw->back;
4781  return pcix_get_mmrbc(adapter->pdev);
4782 }
4783 
4784 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4785 {
4786  struct e1000_adapter *adapter = hw->back;
4787  pcix_set_mmrbc(adapter->pdev, mmrbc);
4788 }
4789 
4790 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4791 {
4792  outl(value, port);
4793 }
4794 
4795 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4796 {
4797  u16 vid;
4798 
4799  for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4800  return true;
4801  return false;
4802 }
4803 
4804 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4805  netdev_features_t features)
4806 {
4807  struct e1000_hw *hw = &adapter->hw;
4808  u32 ctrl;
4809 
4810  ctrl = er32(CTRL);
4811  if (features & NETIF_F_HW_VLAN_RX) {
4812  /* enable VLAN tag insert/strip */
4813  ctrl |= E1000_CTRL_VME;
4814  } else {
4815  /* disable VLAN tag insert/strip */
4816  ctrl &= ~E1000_CTRL_VME;
4817  }
4818  ew32(CTRL, ctrl);
4819 }
4820 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4821  bool filter_on)
4822 {
4823  struct e1000_hw *hw = &adapter->hw;
4824  u32 rctl;
4825 
4826  if (!test_bit(__E1000_DOWN, &adapter->flags))
4827  e1000_irq_disable(adapter);
4828 
4829  __e1000_vlan_mode(adapter, adapter->netdev->features);
4830  if (filter_on) {
4831  /* enable VLAN receive filtering */
4832  rctl = er32(RCTL);
4833  rctl &= ~E1000_RCTL_CFIEN;
4834  if (!(adapter->netdev->flags & IFF_PROMISC))
4835  rctl |= E1000_RCTL_VFE;
4836  ew32(RCTL, rctl);
4837  e1000_update_mng_vlan(adapter);
4838  } else {
4839  /* disable VLAN receive filtering */
4840  rctl = er32(RCTL);
4841  rctl &= ~E1000_RCTL_VFE;
4842  ew32(RCTL, rctl);
4843  }
4844 
4845  if (!test_bit(__E1000_DOWN, &adapter->flags))
4846  e1000_irq_enable(adapter);
4847 }
4848 
4849 static void e1000_vlan_mode(struct net_device *netdev,
4850  netdev_features_t features)
4851 {
4852  struct e1000_adapter *adapter = netdev_priv(netdev);
4853 
4854  if (!test_bit(__E1000_DOWN, &adapter->flags))
4855  e1000_irq_disable(adapter);
4856 
4857  __e1000_vlan_mode(adapter, features);
4858 
4859  if (!test_bit(__E1000_DOWN, &adapter->flags))
4860  e1000_irq_enable(adapter);
4861 }
4862 
4863 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4864 {
4865  struct e1000_adapter *adapter = netdev_priv(netdev);
4866  struct e1000_hw *hw = &adapter->hw;
4867  u32 vfta, index;
4868 
4869  if ((hw->mng_cookie.status &
4871  (vid == adapter->mng_vlan_id))
4872  return 0;
4873 
4874  if (!e1000_vlan_used(adapter))
4875  e1000_vlan_filter_on_off(adapter, true);
4876 
4877  /* add VID to filter table */
4878  index = (vid >> 5) & 0x7F;
4879  vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4880  vfta |= (1 << (vid & 0x1F));
4881  e1000_write_vfta(hw, index, vfta);
4882 
4883  set_bit(vid, adapter->active_vlans);
4884 
4885  return 0;
4886 }
4887 
4888 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4889 {
4890  struct e1000_adapter *adapter = netdev_priv(netdev);
4891  struct e1000_hw *hw = &adapter->hw;
4892  u32 vfta, index;
4893 
4894  if (!test_bit(__E1000_DOWN, &adapter->flags))
4895  e1000_irq_disable(adapter);
4896  if (!test_bit(__E1000_DOWN, &adapter->flags))
4897  e1000_irq_enable(adapter);
4898 
4899  /* remove VID from filter table */
4900  index = (vid >> 5) & 0x7F;
4901  vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4902  vfta &= ~(1 << (vid & 0x1F));
4903  e1000_write_vfta(hw, index, vfta);
4904 
4905  clear_bit(vid, adapter->active_vlans);
4906 
4907  if (!e1000_vlan_used(adapter))
4908  e1000_vlan_filter_on_off(adapter, false);
4909 
4910  return 0;
4911 }
4912 
4913 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4914 {
4915  u16 vid;
4916 
4917  if (!e1000_vlan_used(adapter))
4918  return;
4919 
4920  e1000_vlan_filter_on_off(adapter, true);
4921  for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4922  e1000_vlan_rx_add_vid(adapter->netdev, vid);
4923 }
4924 
4926 {
4927  struct e1000_hw *hw = &adapter->hw;
4928 
4929  hw->autoneg = 0;
4930 
4931  /* Make sure dplx is at most 1 bit and lsb of speed is not set
4932  * for the switch() below to work */
4933  if ((spd & 1) || (dplx & ~1))
4934  goto err_inval;
4935 
4936  /* Fiber NICs only allow 1000 gbps Full duplex */
4937  if ((hw->media_type == e1000_media_type_fiber) &&
4938  spd != SPEED_1000 &&
4939  dplx != DUPLEX_FULL)
4940  goto err_inval;
4941 
4942  switch (spd + dplx) {
4943  case SPEED_10 + DUPLEX_HALF:
4945  break;
4946  case SPEED_10 + DUPLEX_FULL:
4948  break;
4949  case SPEED_100 + DUPLEX_HALF:
4951  break;
4952  case SPEED_100 + DUPLEX_FULL:
4954  break;
4955  case SPEED_1000 + DUPLEX_FULL:
4956  hw->autoneg = 1;
4958  break;
4959  case SPEED_1000 + DUPLEX_HALF: /* not supported */
4960  default:
4961  goto err_inval;
4962  }
4963 
4964  /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4965  hw->mdix = AUTO_ALL_MODES;
4966 
4967  return 0;
4968 
4969 err_inval:
4970  e_err(probe, "Unsupported Speed/Duplex configuration\n");
4971  return -EINVAL;
4972 }
4973 
4974 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4975 {
4976  struct net_device *netdev = pci_get_drvdata(pdev);
4977  struct e1000_adapter *adapter = netdev_priv(netdev);
4978  struct e1000_hw *hw = &adapter->hw;
4979  u32 ctrl, ctrl_ext, rctl, status;
4980  u32 wufc = adapter->wol;
4981 #ifdef CONFIG_PM
4982  int retval = 0;
4983 #endif
4984 
4985  netif_device_detach(netdev);
4986 
4987  if (netif_running(netdev)) {
4988  WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4989  e1000_down(adapter);
4990  }
4991 
4992 #ifdef CONFIG_PM
4993  retval = pci_save_state(pdev);
4994  if (retval)
4995  return retval;
4996 #endif
4997 
4998  status = er32(STATUS);
4999  if (status & E1000_STATUS_LU)
5000  wufc &= ~E1000_WUFC_LNKC;
5001 
5002  if (wufc) {
5003  e1000_setup_rctl(adapter);
5004  e1000_set_rx_mode(netdev);
5005 
5006  rctl = er32(RCTL);
5007 
5008  /* turn on all-multi mode if wake on multicast is enabled */
5009  if (wufc & E1000_WUFC_MC)
5010  rctl |= E1000_RCTL_MPE;
5011 
5012  /* enable receives in the hardware */
5013  ew32(RCTL, rctl | E1000_RCTL_EN);
5014 
5015  if (hw->mac_type >= e1000_82540) {
5016  ctrl = er32(CTRL);
5017  /* advertise wake from D3Cold */
5018  #define E1000_CTRL_ADVD3WUC 0x00100000
5019  /* phy power management enable */
5020  #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5021  ctrl |= E1000_CTRL_ADVD3WUC |
5023  ew32(CTRL, ctrl);
5024  }
5025 
5026  if (hw->media_type == e1000_media_type_fiber ||
5028  /* keep the laser running in D3 */
5029  ctrl_ext = er32(CTRL_EXT);
5030  ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5031  ew32(CTRL_EXT, ctrl_ext);
5032  }
5033 
5034  ew32(WUC, E1000_WUC_PME_EN);
5035  ew32(WUFC, wufc);
5036  } else {
5037  ew32(WUC, 0);
5038  ew32(WUFC, 0);
5039  }
5040 
5041  e1000_release_manageability(adapter);
5042 
5043  *enable_wake = !!wufc;
5044 
5045  /* make sure adapter isn't asleep if manageability is enabled */
5046  if (adapter->en_mng_pt)
5047  *enable_wake = true;
5048 
5049  if (netif_running(netdev))
5050  e1000_free_irq(adapter);
5051 
5052  pci_disable_device(pdev);
5053 
5054  return 0;
5055 }
5056 
5057 #ifdef CONFIG_PM
5058 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5059 {
5060  int retval;
5061  bool wake;
5062 
5063  retval = __e1000_shutdown(pdev, &wake);
5064  if (retval)
5065  return retval;
5066 
5067  if (wake) {
5068  pci_prepare_to_sleep(pdev);
5069  } else {
5070  pci_wake_from_d3(pdev, false);
5072  }
5073 
5074  return 0;
5075 }
5076 
5077 static int e1000_resume(struct pci_dev *pdev)
5078 {
5079  struct net_device *netdev = pci_get_drvdata(pdev);
5080  struct e1000_adapter *adapter = netdev_priv(netdev);
5081  struct e1000_hw *hw = &adapter->hw;
5082  u32 err;
5083 
5084  pci_set_power_state(pdev, PCI_D0);
5085  pci_restore_state(pdev);
5086  pci_save_state(pdev);
5087 
5088  if (adapter->need_ioport)
5089  err = pci_enable_device(pdev);
5090  else
5091  err = pci_enable_device_mem(pdev);
5092  if (err) {
5093  pr_err("Cannot enable PCI device from suspend\n");
5094  return err;
5095  }
5096  pci_set_master(pdev);
5097 
5098  pci_enable_wake(pdev, PCI_D3hot, 0);
5099  pci_enable_wake(pdev, PCI_D3cold, 0);
5100 
5101  if (netif_running(netdev)) {
5102  err = e1000_request_irq(adapter);
5103  if (err)
5104  return err;
5105  }
5106 
5107  e1000_power_up_phy(adapter);
5108  e1000_reset(adapter);
5109  ew32(WUS, ~0);
5110 
5111  e1000_init_manageability(adapter);
5112 
5113  if (netif_running(netdev))
5114  e1000_up(adapter);
5115 
5116  netif_device_attach(netdev);
5117 
5118  return 0;
5119 }
5120 #endif
5121 
5122 static void e1000_shutdown(struct pci_dev *pdev)
5123 {
5124  bool wake;
5125 
5126  __e1000_shutdown(pdev, &wake);
5127 
5128  if (system_state == SYSTEM_POWER_OFF) {
5129  pci_wake_from_d3(pdev, wake);
5131  }
5132 }
5133 
5134 #ifdef CONFIG_NET_POLL_CONTROLLER
5135 /*
5136  * Polling 'interrupt' - used by things like netconsole to send skbs
5137  * without having to re-enable interrupts. It's not called while
5138  * the interrupt routine is executing.
5139  */
5140 static void e1000_netpoll(struct net_device *netdev)
5141 {
5142  struct e1000_adapter *adapter = netdev_priv(netdev);
5143 
5144  disable_irq(adapter->pdev->irq);
5145  e1000_intr(adapter->pdev->irq, netdev);
5146  enable_irq(adapter->pdev->irq);
5147 }
5148 #endif
5149 
5158 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5159  pci_channel_state_t state)
5160 {
5161  struct net_device *netdev = pci_get_drvdata(pdev);
5162  struct e1000_adapter *adapter = netdev_priv(netdev);
5163 
5164  netif_device_detach(netdev);
5165 
5166  if (state == pci_channel_io_perm_failure)
5168 
5169  if (netif_running(netdev))
5170  e1000_down(adapter);
5171  pci_disable_device(pdev);
5172 
5173  /* Request a slot slot reset. */
5175 }
5176 
5184 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5185 {
5186  struct net_device *netdev = pci_get_drvdata(pdev);
5187  struct e1000_adapter *adapter = netdev_priv(netdev);
5188  struct e1000_hw *hw = &adapter->hw;
5189  int err;
5190 
5191  if (adapter->need_ioport)
5192  err = pci_enable_device(pdev);
5193  else
5194  err = pci_enable_device_mem(pdev);
5195  if (err) {
5196  pr_err("Cannot re-enable PCI device after reset.\n");
5198  }
5199  pci_set_master(pdev);
5200 
5201  pci_enable_wake(pdev, PCI_D3hot, 0);
5202  pci_enable_wake(pdev, PCI_D3cold, 0);
5203 
5204  e1000_reset(adapter);
5205  ew32(WUS, ~0);
5206 
5207  return PCI_ERS_RESULT_RECOVERED;
5208 }
5209 
5218 static void e1000_io_resume(struct pci_dev *pdev)
5219 {
5220  struct net_device *netdev = pci_get_drvdata(pdev);
5221  struct e1000_adapter *adapter = netdev_priv(netdev);
5222 
5223  e1000_init_manageability(adapter);
5224 
5225  if (netif_running(netdev)) {
5226  if (e1000_up(adapter)) {
5227  pr_info("can't bring device back up after reset\n");
5228  return;
5229  }
5230  }
5231 
5232  netif_device_attach(netdev);
5233 }
5234 
5235 /* e1000_main.c */