Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
e100.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel PRO/100 Linux driver
4  Copyright(c) 1999 - 2006 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  Linux NICS <[email protected]>
24  e1000-devel Mailing List <[email protected]>
25  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 /*
30  * e100.c: Intel(R) PRO/100 ethernet driver
31  *
32  * (Re)written 2003 by [email protected]. Based loosely on
33  * original e100 driver, but better described as a munging of
34  * e100, e1000, eepro100, tg3, 8139cp, and other drivers.
35  *
36  * References:
37  * Intel 8255x 10/100 Mbps Ethernet Controller Family,
38  * Open Source Software Developers Manual,
39  * http://sourceforge.net/projects/e1000
40  *
41  *
42  * Theory of Operation
43  *
44  * I. General
45  *
46  * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet
47  * controller family, which includes the 82557, 82558, 82559, 82550,
48  * 82551, and 82562 devices. 82558 and greater controllers
49  * integrate the Intel 82555 PHY. The controllers are used in
50  * server and client network interface cards, as well as in
51  * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx
52  * configurations. 8255x supports a 32-bit linear addressing
53  * mode and operates at 33Mhz PCI clock rate.
54  *
55  * II. Driver Operation
56  *
57  * Memory-mapped mode is used exclusively to access the device's
58  * shared-memory structure, the Control/Status Registers (CSR). All
59  * setup, configuration, and control of the device, including queuing
60  * of Tx, Rx, and configuration commands is through the CSR.
61  * cmd_lock serializes accesses to the CSR command register. cb_lock
62  * protects the shared Command Block List (CBL).
63  *
64  * 8255x is highly MII-compliant and all access to the PHY go
65  * through the Management Data Interface (MDI). Consequently, the
66  * driver leverages the mii.c library shared with other MII-compliant
67  * devices.
68  *
69  * Big- and Little-Endian byte order as well as 32- and 64-bit
70  * archs are supported. Weak-ordered memory and non-cache-coherent
71  * archs are supported.
72  *
73  * III. Transmit
74  *
75  * A Tx skb is mapped and hangs off of a TCB. TCBs are linked
76  * together in a fixed-size ring (CBL) thus forming the flexible mode
77  * memory structure. A TCB marked with the suspend-bit indicates
78  * the end of the ring. The last TCB processed suspends the
79  * controller, and the controller can be restarted by issue a CU
80  * resume command to continue from the suspend point, or a CU start
81  * command to start at a given position in the ring.
82  *
83  * Non-Tx commands (config, multicast setup, etc) are linked
84  * into the CBL ring along with Tx commands. The common structure
85  * used for both Tx and non-Tx commands is the Command Block (CB).
86  *
87  * cb_to_use is the next CB to use for queuing a command; cb_to_clean
88  * is the next CB to check for completion; cb_to_send is the first
89  * CB to start on in case of a previous failure to resume. CB clean
90  * up happens in interrupt context in response to a CU interrupt.
91  * cbs_avail keeps track of number of free CB resources available.
92  *
93  * Hardware padding of short packets to minimum packet size is
94  * enabled. 82557 pads with 7Eh, while the later controllers pad
95  * with 00h.
96  *
97  * IV. Receive
98  *
99  * The Receive Frame Area (RFA) comprises a ring of Receive Frame
100  * Descriptors (RFD) + data buffer, thus forming the simplified mode
101  * memory structure. Rx skbs are allocated to contain both the RFD
102  * and the data buffer, but the RFD is pulled off before the skb is
103  * indicated. The data buffer is aligned such that encapsulated
104  * protocol headers are u32-aligned. Since the RFD is part of the
105  * mapped shared memory, and completion status is contained within
106  * the RFD, the RFD must be dma_sync'ed to maintain a consistent
107  * view from software and hardware.
108  *
109  * In order to keep updates to the RFD link field from colliding with
110  * hardware writes to mark packets complete, we use the feature that
111  * hardware will not write to a size 0 descriptor and mark the previous
112  * packet as end-of-list (EL). After updating the link, we remove EL
113  * and only then restore the size such that hardware may use the
114  * previous-to-end RFD.
115  *
116  * Under typical operation, the receive unit (RU) is start once,
117  * and the controller happily fills RFDs as frames arrive. If
118  * replacement RFDs cannot be allocated, or the RU goes non-active,
119  * the RU must be restarted. Frame arrival generates an interrupt,
120  * and Rx indication and re-allocation happen in the same context,
121  * therefore no locking is required. A software-generated interrupt
122  * is generated from the watchdog to recover from a failed allocation
123  * scenario where all Rx resources have been indicated and none re-
124  * placed.
125  *
126  * V. Miscellaneous
127  *
128  * VLAN offloading of tagging, stripping and filtering is not
129  * supported, but driver will accommodate the extra 4-byte VLAN tag
130  * for processing by upper layers. Tx/Rx Checksum offloading is not
131  * supported. Tx Scatter/Gather is not supported. Jumbo Frames is
132  * not supported (hardware limitation).
133  *
134  * MagicPacket(tm) WoL support is enabled/disabled via ethtool.
135  *
136  * Thanks to JC ([email protected]) for helping with
137  * testing/troubleshooting the development driver.
138  *
139  * TODO:
140  * o several entry points race with dev->close
141  * o check for tx-no-resources/stop Q races with tx clean/wake Q
142  *
143  * FIXES:
144  * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
145  * - Stratus87247: protect MDI control register manipulations
146  * 2009/06/01 - Andreas Mohr <andi at lisas dot de>
147  * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
148  */
149 
150 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151 
152 #include <linux/hardirq.h>
153 #include <linux/interrupt.h>
154 #include <linux/module.h>
155 #include <linux/moduleparam.h>
156 #include <linux/kernel.h>
157 #include <linux/types.h>
158 #include <linux/sched.h>
159 #include <linux/slab.h>
160 #include <linux/delay.h>
161 #include <linux/init.h>
162 #include <linux/pci.h>
163 #include <linux/dma-mapping.h>
164 #include <linux/dmapool.h>
165 #include <linux/netdevice.h>
166 #include <linux/etherdevice.h>
167 #include <linux/mii.h>
168 #include <linux/if_vlan.h>
169 #include <linux/skbuff.h>
170 #include <linux/ethtool.h>
171 #include <linux/string.h>
172 #include <linux/firmware.h>
173 #include <linux/rtnetlink.h>
174 #include <asm/unaligned.h>
175 
176 
177 #define DRV_NAME "e100"
178 #define DRV_EXT "-NAPI"
179 #define DRV_VERSION "3.5.24-k2"DRV_EXT
180 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
181 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
182 
183 #define E100_WATCHDOG_PERIOD (2 * HZ)
184 #define E100_NAPI_WEIGHT 16
185 
186 #define FIRMWARE_D101M "e100/d101m_ucode.bin"
187 #define FIRMWARE_D101S "e100/d101s_ucode.bin"
188 #define FIRMWARE_D102E "e100/d102e_ucode.bin"
189 
192 MODULE_LICENSE("GPL");
197 
198 static int debug = 3;
199 static int eeprom_bad_csum_allow = 0;
200 static int use_io = 0;
201 module_param(debug, int, 0);
202 module_param(eeprom_bad_csum_allow, int, 0);
203 module_param(use_io, int, 0);
204 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
205 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
206 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
207 
208 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209  PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
210  PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
211 static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
212  INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
213  INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
214  INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
215  INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
216  INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
217  INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
218  INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
219  INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
220  INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
221  INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
222  INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
223  INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
224  INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
225  INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
226  INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
227  INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
228  INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
229  INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
230  INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
231  INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
232  INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
233  INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
234  INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
235  INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
236  INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
237  INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
238  INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
239  INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
240  INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
241  INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
242  INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
243  INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
244  INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
245  INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
246  INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
247  INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
248  INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
249  INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
250  INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
251  INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
252  INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
253  INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
254  { 0, }
255 };
256 MODULE_DEVICE_TABLE(pci, e100_id_table);
257 
258 enum mac {
271  mac_unknown = 0xFF,
272 };
273 
274 enum phy {
275  phy_100a = 0x000003E0,
276  phy_100c = 0x035002A8,
277  phy_82555_tx = 0x015002A8,
278  phy_nsc_tx = 0x5C002000,
279  phy_82562_et = 0x033002A8,
280  phy_82562_em = 0x032002A8,
281  phy_82562_ek = 0x031002A8,
282  phy_82562_eh = 0x017002A8,
283  phy_82552_v = 0xd061004d,
284  phy_unknown = 0xFFFFFFFF,
285 };
286 
287 /* CSR (Control/Status Registers) */
288 struct csr {
289  struct {
295  } scb;
302 };
303 
305  rus_no_res = 0x08,
306  rus_ready = 0x10,
307  rus_mask = 0x3C,
308 };
309 
310 enum ru_state {
314 };
315 
319  stat_ack_rnr = 0x10,
326 };
327 
330  irq_mask_all = 0x01,
331  irq_sw_gen = 0x02,
332 };
333 
335  cuc_nop = 0x00,
336  ruc_start = 0x01,
338  cuc_start = 0x10,
339  cuc_resume = 0x20,
344 };
345 
346 enum cuc_dump {
347  cuc_dump_complete = 0x0000A005,
349 };
350 
351 enum port {
352  software_reset = 0x0000,
353  selftest = 0x0001,
354  selective_reset = 0x0002,
355 };
356 
358  eesk = 0x01,
359  eecs = 0x02,
360  eedi = 0x04,
361  eedo = 0x08,
362 };
363 
364 enum mdi_ctrl {
365  mdi_write = 0x04000000,
366  mdi_read = 0x08000000,
367  mdi_ready = 0x10000000,
368 };
369 
370 enum eeprom_op {
371  op_write = 0x05,
372  op_read = 0x06,
373  op_ewds = 0x10,
374  op_ewen = 0x13,
375 };
376 
380  eeprom_id = 0x0A,
383 };
384 
387 };
388 
398  DP83840A = 10,
399 };
400 
401 enum eeprom_id {
402  eeprom_id_wol = 0x0020,
403 };
404 
406  eeprom_asf = 0x8000,
407  eeprom_gcl = 0x4000,
408 };
409 
410 enum cb_status {
411  cb_complete = 0x8000,
412  cb_ok = 0x2000,
413 };
414 
420  cb_nop = 0x0000,
421  cb_iaaddr = 0x0001,
422  cb_config = 0x0002,
423  cb_multi = 0x0003,
424  cb_tx = 0x0004,
425  cb_ucode = 0x0005,
426  cb_dump = 0x0006,
427  cb_tx_sf = 0x0008,
428  cb_tx_nc = 0x0010,
429  cb_cid = 0x1f00,
430  cb_i = 0x2000,
431  cb_s = 0x4000,
432  cb_el = 0x8000,
433 };
434 
435 struct rfd {
442 };
443 
444 struct rx {
445  struct rx *next, *prev;
446  struct sk_buff *skb;
448 };
449 
450 #if defined(__BIG_ENDIAN_BITFIELD)
451 #define X(a,b) b,a
452 #else
453 #define X(a,b) a,b
454 #endif
455 struct config {
456 /*0*/ u8 X(byte_count:6, pad0:2);
457 /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
459 /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
460  term_write_cache_line:1), pad3:4);
461 /*4*/ u8 X(rx_dma_max_count:7, pad4:1);
462 /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
463 /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
464  tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
465  rx_save_overruns : 1), rx_save_bad_frames : 1);
466 /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
467  pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
468  tx_dynamic_tbd:1);
469 /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
470 /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
471  link_status_wake:1), arp_wake:1), mcmatch_wake:1);
472 /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
473  loopback:2);
474 /*11*/ u8 X(linear_priority:3, pad11:5);
475 /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
476 /*13*/ u8 ip_addr_lo;
477 /*14*/ u8 ip_addr_hi;
478 /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
479  wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
480  pad15_2:1), crs_or_cdt:1);
483 /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
484  rx_long_ok:1), fc_priority_threshold:3), pad18:1);
485 /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
486  fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
487  full_duplex_force:1), full_duplex_pin:1);
488 /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
489 /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
490 /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
492 };
493 
494 #define E100_MAX_MULTICAST_ADDRS 64
495 struct multi {
498 };
499 
500 /* Important: keep total struct u32-aligned */
501 #define UCODE_SIZE 134
502 struct cb {
506  union {
509  struct config config;
510  struct multi multi;
511  struct {
516  struct {
520  } tbd;
521  } tcb;
523  } u;
524  struct cb *next, *prev;
526  struct sk_buff *skb;
527 };
528 
529 enum loopback {
530  lb_none = 0, lb_mac = 1, lb_phy = 3,
531 };
532 
533 struct stats {
543 };
544 
545 struct mem {
546  struct {
549  } selftest;
550  struct stats stats;
551  u8 dump_buf[596];
552 };
553 
554 struct param_range {
558 };
559 
560 struct params {
562  struct param_range cbs;
563 };
564 
565 struct nic {
566  /* Begin: frequently used values: keep adjacent for cache effect */
568  struct net_device *netdev;
569  struct pci_dev *pdev;
570  u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
571 
572  struct rx *rxs ____cacheline_aligned;
573  struct rx *rx_to_use;
574  struct rx *rx_to_clean;
575  struct rfd blank_rfd;
576  enum ru_state ru_running;
577 
580  struct csr __iomem *csr;
581  enum scb_cmd_lo cuc_cmd;
582  unsigned int cbs_avail;
583  struct napi_struct napi;
584  struct cb *cbs;
585  struct cb *cb_to_use;
586  struct cb *cb_to_send;
587  struct cb *cb_to_clean;
588  __le16 tx_command;
589  /* End: frequently used values: keep adjacent for cache effect */
590 
591  enum {
592  ich = (1 << 0),
593  promiscuous = (1 << 1),
594  multicast_all = (1 << 2),
595  wol_magic = (1 << 3),
596  ich_10h_workaround = (1 << 4),
598 
599  enum mac mac;
600  enum phy phy;
601  struct params params;
602  struct timer_list watchdog;
603  struct mii_if_info mii;
604  struct work_struct tx_timeout_task;
605  enum loopback loopback;
606 
607  struct mem *mem;
609 
610  struct pci_pool *cbs_pool;
611  dma_addr_t cbs_dma_addr;
612  u8 adaptive_ifs;
613  u8 tx_threshold;
614  u32 tx_frames;
615  u32 tx_collisions;
616  u32 tx_deferred;
617  u32 tx_single_collisions;
618  u32 tx_multiple_collisions;
619  u32 tx_fc_pause;
620  u32 tx_tco_frames;
621 
622  u32 rx_fc_pause;
623  u32 rx_fc_unsupported;
624  u32 rx_tco_frames;
625  u32 rx_short_frame_errors;
626  u32 rx_over_length_errors;
627 
628  u16 eeprom_wc;
629  __le16 eeprom[256];
630  spinlock_t mdio_lock;
631  const struct firmware *fw;
632 };
633 
634 static inline void e100_write_flush(struct nic *nic)
635 {
636  /* Flush previous PCI writes through intermediate bridges
637  * by doing a benign read */
638  (void)ioread8(&nic->csr->scb.status);
639 }
640 
641 static void e100_enable_irq(struct nic *nic)
642 {
643  unsigned long flags;
644 
645  spin_lock_irqsave(&nic->cmd_lock, flags);
646  iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
647  e100_write_flush(nic);
648  spin_unlock_irqrestore(&nic->cmd_lock, flags);
649 }
650 
651 static void e100_disable_irq(struct nic *nic)
652 {
653  unsigned long flags;
654 
655  spin_lock_irqsave(&nic->cmd_lock, flags);
656  iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
657  e100_write_flush(nic);
658  spin_unlock_irqrestore(&nic->cmd_lock, flags);
659 }
660 
661 static void e100_hw_reset(struct nic *nic)
662 {
663  /* Put CU and RU into idle with a selective reset to get
664  * device off of PCI bus */
665  iowrite32(selective_reset, &nic->csr->port);
666  e100_write_flush(nic); udelay(20);
667 
668  /* Now fully reset device */
669  iowrite32(software_reset, &nic->csr->port);
670  e100_write_flush(nic); udelay(20);
671 
672  /* Mask off our interrupt line - it's unmasked after reset */
673  e100_disable_irq(nic);
674 }
675 
676 static int e100_self_test(struct nic *nic)
677 {
678  u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
679 
680  /* Passing the self-test is a pretty good indication
681  * that the device can DMA to/from host memory */
682 
683  nic->mem->selftest.signature = 0;
684  nic->mem->selftest.result = 0xFFFFFFFF;
685 
686  iowrite32(selftest | dma_addr, &nic->csr->port);
687  e100_write_flush(nic);
688  /* Wait 10 msec for self-test to complete */
689  msleep(10);
690 
691  /* Interrupts are enabled after self-test */
692  e100_disable_irq(nic);
693 
694  /* Check results of self-test */
695  if (nic->mem->selftest.result != 0) {
696  netif_err(nic, hw, nic->netdev,
697  "Self-test failed: result=0x%08X\n",
698  nic->mem->selftest.result);
699  return -ETIMEDOUT;
700  }
701  if (nic->mem->selftest.signature == 0) {
702  netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
703  return -ETIMEDOUT;
704  }
705 
706  return 0;
707 }
708 
709 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
710 {
711  u32 cmd_addr_data[3];
712  u8 ctrl;
713  int i, j;
714 
715  /* Three cmds: write/erase enable, write data, write/erase disable */
716  cmd_addr_data[0] = op_ewen << (addr_len - 2);
717  cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
718  le16_to_cpu(data);
719  cmd_addr_data[2] = op_ewds << (addr_len - 2);
720 
721  /* Bit-bang cmds to write word to eeprom */
722  for (j = 0; j < 3; j++) {
723 
724  /* Chip select */
725  iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
726  e100_write_flush(nic); udelay(4);
727 
728  for (i = 31; i >= 0; i--) {
729  ctrl = (cmd_addr_data[j] & (1 << i)) ?
730  eecs | eedi : eecs;
731  iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
732  e100_write_flush(nic); udelay(4);
733 
734  iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
735  e100_write_flush(nic); udelay(4);
736  }
737  /* Wait 10 msec for cmd to complete */
738  msleep(10);
739 
740  /* Chip deselect */
741  iowrite8(0, &nic->csr->eeprom_ctrl_lo);
742  e100_write_flush(nic); udelay(4);
743  }
744 };
745 
746 /* General technique stolen from the eepro100 driver - very clever */
747 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
748 {
749  u32 cmd_addr_data;
750  u16 data = 0;
751  u8 ctrl;
752  int i;
753 
754  cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
755 
756  /* Chip select */
757  iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
758  e100_write_flush(nic); udelay(4);
759 
760  /* Bit-bang to read word from eeprom */
761  for (i = 31; i >= 0; i--) {
762  ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
763  iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
764  e100_write_flush(nic); udelay(4);
765 
766  iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
767  e100_write_flush(nic); udelay(4);
768 
769  /* Eeprom drives a dummy zero to EEDO after receiving
770  * complete address. Use this to adjust addr_len. */
771  ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
772  if (!(ctrl & eedo) && i > 16) {
773  *addr_len -= (i - 16);
774  i = 17;
775  }
776 
777  data = (data << 1) | (ctrl & eedo ? 1 : 0);
778  }
779 
780  /* Chip deselect */
781  iowrite8(0, &nic->csr->eeprom_ctrl_lo);
782  e100_write_flush(nic); udelay(4);
783 
784  return cpu_to_le16(data);
785 };
786 
787 /* Load entire EEPROM image into driver cache and validate checksum */
788 static int e100_eeprom_load(struct nic *nic)
789 {
790  u16 addr, addr_len = 8, checksum = 0;
791 
792  /* Try reading with an 8-bit addr len to discover actual addr len */
793  e100_eeprom_read(nic, &addr_len, 0);
794  nic->eeprom_wc = 1 << addr_len;
795 
796  for (addr = 0; addr < nic->eeprom_wc; addr++) {
797  nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
798  if (addr < nic->eeprom_wc - 1)
799  checksum += le16_to_cpu(nic->eeprom[addr]);
800  }
801 
802  /* The checksum, stored in the last word, is calculated such that
803  * the sum of words should be 0xBABA */
804  if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
805  netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
806  if (!eeprom_bad_csum_allow)
807  return -EAGAIN;
808  }
809 
810  return 0;
811 }
812 
813 /* Save (portion of) driver EEPROM cache to device and update checksum */
814 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
815 {
816  u16 addr, addr_len = 8, checksum = 0;
817 
818  /* Try reading with an 8-bit addr len to discover actual addr len */
819  e100_eeprom_read(nic, &addr_len, 0);
820  nic->eeprom_wc = 1 << addr_len;
821 
822  if (start + count >= nic->eeprom_wc)
823  return -EINVAL;
824 
825  for (addr = start; addr < start + count; addr++)
826  e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
827 
828  /* The checksum, stored in the last word, is calculated such that
829  * the sum of words should be 0xBABA */
830  for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
831  checksum += le16_to_cpu(nic->eeprom[addr]);
832  nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
833  e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
834  nic->eeprom[nic->eeprom_wc - 1]);
835 
836  return 0;
837 }
838 
839 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
840 #define E100_WAIT_SCB_FAST 20 /* delay like the old code */
841 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
842 {
843  unsigned long flags;
844  unsigned int i;
845  int err = 0;
846 
847  spin_lock_irqsave(&nic->cmd_lock, flags);
848 
849  /* Previous command is accepted when SCB clears */
850  for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
851  if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
852  break;
853  cpu_relax();
854  if (unlikely(i > E100_WAIT_SCB_FAST))
855  udelay(5);
856  }
857  if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
858  err = -EAGAIN;
859  goto err_unlock;
860  }
861 
862  if (unlikely(cmd != cuc_resume))
863  iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
864  iowrite8(cmd, &nic->csr->scb.cmd_lo);
865 
866 err_unlock:
867  spin_unlock_irqrestore(&nic->cmd_lock, flags);
868 
869  return err;
870 }
871 
872 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
873  void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
874 {
875  struct cb *cb;
876  unsigned long flags;
877  int err = 0;
878 
879  spin_lock_irqsave(&nic->cb_lock, flags);
880 
881  if (unlikely(!nic->cbs_avail)) {
882  err = -ENOMEM;
883  goto err_unlock;
884  }
885 
886  cb = nic->cb_to_use;
887  nic->cb_to_use = cb->next;
888  nic->cbs_avail--;
889  cb->skb = skb;
890 
891  if (unlikely(!nic->cbs_avail))
892  err = -ENOSPC;
893 
894  cb_prepare(nic, cb, skb);
895 
896  /* Order is important otherwise we'll be in a race with h/w:
897  * set S-bit in current first, then clear S-bit in previous. */
898  cb->command |= cpu_to_le16(cb_s);
899  wmb();
900  cb->prev->command &= cpu_to_le16(~cb_s);
901 
902  while (nic->cb_to_send != nic->cb_to_use) {
903  if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
904  nic->cb_to_send->dma_addr))) {
905  /* Ok, here's where things get sticky. It's
906  * possible that we can't schedule the command
907  * because the controller is too busy, so
908  * let's just queue the command and try again
909  * when another command is scheduled. */
910  if (err == -ENOSPC) {
911  //request a reset
912  schedule_work(&nic->tx_timeout_task);
913  }
914  break;
915  } else {
916  nic->cuc_cmd = cuc_resume;
917  nic->cb_to_send = nic->cb_to_send->next;
918  }
919  }
920 
921 err_unlock:
922  spin_unlock_irqrestore(&nic->cb_lock, flags);
923 
924  return err;
925 }
926 
927 static int mdio_read(struct net_device *netdev, int addr, int reg)
928 {
929  struct nic *nic = netdev_priv(netdev);
930  return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
931 }
932 
933 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
934 {
935  struct nic *nic = netdev_priv(netdev);
936 
937  nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
938 }
939 
940 /* the standard mdio_ctrl() function for usual MII-compliant hardware */
941 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
942 {
943  u32 data_out = 0;
944  unsigned int i;
945  unsigned long flags;
946 
947 
948  /*
949  * Stratus87247: we shouldn't be writing the MDI control
950  * register until the Ready bit shows True. Also, since
951  * manipulation of the MDI control registers is a multi-step
952  * procedure it should be done under lock.
953  */
954  spin_lock_irqsave(&nic->mdio_lock, flags);
955  for (i = 100; i; --i) {
956  if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
957  break;
958  udelay(20);
959  }
960  if (unlikely(!i)) {
961  netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
962  spin_unlock_irqrestore(&nic->mdio_lock, flags);
963  return 0; /* No way to indicate timeout error */
964  }
965  iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
966 
967  for (i = 0; i < 100; i++) {
968  udelay(20);
969  if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
970  break;
971  }
972  spin_unlock_irqrestore(&nic->mdio_lock, flags);
973  netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
974  "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
975  dir == mdi_read ? "READ" : "WRITE",
976  addr, reg, data, data_out);
977  return (u16)data_out;
978 }
979 
980 /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */
981 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
982  u32 addr,
983  u32 dir,
984  u32 reg,
985  u16 data)
986 {
987  if ((reg == MII_BMCR) && (dir == mdi_write)) {
988  if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
989  u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
990  MII_ADVERTISE);
991 
992  /*
993  * Workaround Si issue where sometimes the part will not
994  * autoneg to 100Mbps even when advertised.
995  */
996  if (advert & ADVERTISE_100FULL)
997  data |= BMCR_SPEED100 | BMCR_FULLDPLX;
998  else if (advert & ADVERTISE_100HALF)
999  data |= BMCR_SPEED100;
1000  }
1001  }
1002  return mdio_ctrl_hw(nic, addr, dir, reg, data);
1003 }
1004 
1005 /* Fully software-emulated mdio_ctrl() function for cards without
1006  * MII-compliant PHYs.
1007  * For now, this is mainly geared towards 80c24 support; in case of further
1008  * requirements for other types (i82503, ...?) either extend this mechanism
1009  * or split it, whichever is cleaner.
1010  */
1011 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1012  u32 addr,
1013  u32 dir,
1014  u32 reg,
1015  u16 data)
1016 {
1017  /* might need to allocate a netdev_priv'ed register array eventually
1018  * to be able to record state changes, but for now
1019  * some fully hardcoded register handling ought to be ok I guess. */
1020 
1021  if (dir == mdi_read) {
1022  switch (reg) {
1023  case MII_BMCR:
1024  /* Auto-negotiation, right? */
1025  return BMCR_ANENABLE |
1026  BMCR_FULLDPLX;
1027  case MII_BMSR:
1028  return BMSR_LSTATUS /* for mii_link_ok() */ |
1030  BMSR_10FULL;
1031  case MII_ADVERTISE:
1032  /* 80c24 is a "combo card" PHY, right? */
1033  return ADVERTISE_10HALF |
1035  default:
1036  netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1037  "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1038  dir == mdi_read ? "READ" : "WRITE",
1039  addr, reg, data);
1040  return 0xFFFF;
1041  }
1042  } else {
1043  switch (reg) {
1044  default:
1045  netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1046  "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1047  dir == mdi_read ? "READ" : "WRITE",
1048  addr, reg, data);
1049  return 0xFFFF;
1050  }
1051  }
1052 }
1053 static inline int e100_phy_supports_mii(struct nic *nic)
1054 {
1055  /* for now, just check it by comparing whether we
1056  are using MII software emulation.
1057  */
1058  return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1059 }
1060 
1061 static void e100_get_defaults(struct nic *nic)
1062 {
1063  struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1064  struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1065 
1066  /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
1067  nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1068  if (nic->mac == mac_unknown)
1069  nic->mac = mac_82557_D100_A;
1070 
1071  nic->params.rfds = rfds;
1072  nic->params.cbs = cbs;
1073 
1074  /* Quadwords to DMA into FIFO before starting frame transmit */
1075  nic->tx_threshold = 0xE0;
1076 
1077  /* no interrupt for every tx completion, delay = 256us if not 557 */
1078  nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1079  ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1080 
1081  /* Template for a freshly allocated RFD */
1082  nic->blank_rfd.command = 0;
1083  nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1084  nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1085 
1086  /* MII setup */
1087  nic->mii.phy_id_mask = 0x1F;
1088  nic->mii.reg_num_mask = 0x1F;
1089  nic->mii.dev = nic->netdev;
1090  nic->mii.mdio_read = mdio_read;
1091  nic->mii.mdio_write = mdio_write;
1092 }
1093 
1094 static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1095 {
1096  struct config *config = &cb->u.config;
1097  u8 *c = (u8 *)config;
1098  struct net_device *netdev = nic->netdev;
1099 
1100  cb->command = cpu_to_le16(cb_config);
1101 
1102  memset(config, 0, sizeof(struct config));
1103 
1104  config->byte_count = 0x16; /* bytes in this struct */
1105  config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */
1106  config->direct_rx_dma = 0x1; /* reserved */
1107  config->standard_tcb = 0x1; /* 1=standard, 0=extended */
1108  config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */
1109  config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */
1110  config->tx_underrun_retry = 0x3; /* # of underrun retries */
1111  if (e100_phy_supports_mii(nic))
1112  config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */
1113  config->pad10 = 0x6;
1114  config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */
1115  config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */
1116  config->ifs = 0x6; /* x16 = inter frame spacing */
1117  config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */
1118  config->pad15_1 = 0x1;
1119  config->pad15_2 = 0x1;
1120  config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */
1121  config->fc_delay_hi = 0x40; /* time delay for fc frame */
1122  config->tx_padding = 0x1; /* 1=pad short frames */
1123  config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */
1124  config->pad18 = 0x1;
1125  config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */
1126  config->pad20_1 = 0x1F;
1127  config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */
1128  config->pad21_1 = 0x5;
1129 
1130  config->adaptive_ifs = nic->adaptive_ifs;
1131  config->loopback = nic->loopback;
1132 
1133  if (nic->mii.force_media && nic->mii.full_duplex)
1134  config->full_duplex_force = 0x1; /* 1=force, 0=auto */
1135 
1136  if (nic->flags & promiscuous || nic->loopback) {
1137  config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1138  config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1139  config->promiscuous_mode = 0x1; /* 1=on, 0=off */
1140  }
1141 
1142  if (unlikely(netdev->features & NETIF_F_RXFCS))
1143  config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */
1144 
1145  if (nic->flags & multicast_all)
1146  config->multicast_all = 0x1; /* 1=accept, 0=no */
1147 
1148  /* disable WoL when up */
1149  if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1150  config->magic_packet_disable = 0x1; /* 1=off, 0=on */
1151 
1152  if (nic->mac >= mac_82558_D101_A4) {
1153  config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */
1154  config->mwi_enable = 0x1; /* 1=enable, 0=disable */
1155  config->standard_tcb = 0x0; /* 1=standard, 0=extended */
1156  config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */
1157  if (nic->mac >= mac_82559_D101M) {
1158  config->tno_intr = 0x1; /* TCO stats enable */
1159  /* Enable TCO in extended config */
1160  if (nic->mac >= mac_82551_10) {
1161  config->byte_count = 0x20; /* extended bytes */
1162  config->rx_d102_mode = 0x1; /* GMRC for TCO */
1163  }
1164  } else {
1165  config->standard_stat_counter = 0x0;
1166  }
1167  }
1168 
1169  if (netdev->features & NETIF_F_RXALL) {
1170  config->rx_save_overruns = 0x1; /* 1=save, 0=discard */
1171  config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */
1172  config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
1173  }
1174 
1175  netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1176  "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1177  c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1178  netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1179  "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1180  c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1181  netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1182  "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1183  c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1184 }
1185 
1186 /*************************************************************************
1187 * CPUSaver parameters
1188 *
1189 * All CPUSaver parameters are 16-bit literals that are part of a
1190 * "move immediate value" instruction. By changing the value of
1191 * the literal in the instruction before the code is loaded, the
1192 * driver can change the algorithm.
1193 *
1194 * INTDELAY - This loads the dead-man timer with its initial value.
1195 * When this timer expires the interrupt is asserted, and the
1196 * timer is reset each time a new packet is received. (see
1197 * BUNDLEMAX below to set the limit on number of chained packets)
1198 * The current default is 0x600 or 1536. Experiments show that
1199 * the value should probably stay within the 0x200 - 0x1000.
1200 *
1201 * BUNDLEMAX -
1202 * This sets the maximum number of frames that will be bundled. In
1203 * some situations, such as the TCP windowing algorithm, it may be
1204 * better to limit the growth of the bundle size than let it go as
1205 * high as it can, because that could cause too much added latency.
1206 * The default is six, because this is the number of packets in the
1207 * default TCP window size. A value of 1 would make CPUSaver indicate
1208 * an interrupt for every frame received. If you do not want to put
1209 * a limit on the bundle size, set this value to xFFFF.
1210 *
1211 * BUNDLESMALL -
1212 * This contains a bit-mask describing the minimum size frame that
1213 * will be bundled. The default masks the lower 7 bits, which means
1214 * that any frame less than 128 bytes in length will not be bundled,
1215 * but will instead immediately generate an interrupt. This does
1216 * not affect the current bundle in any way. Any frame that is 128
1217 * bytes or large will be bundled normally. This feature is meant
1218 * to provide immediate indication of ACK frames in a TCP environment.
1219 * Customers were seeing poor performance when a machine with CPUSaver
1220 * enabled was sending but not receiving. The delay introduced when
1221 * the ACKs were received was enough to reduce total throughput, because
1222 * the sender would sit idle until the ACK was finally seen.
1223 *
1224 * The current default is 0xFF80, which masks out the lower 7 bits.
1225 * This means that any frame which is x7F (127) bytes or smaller
1226 * will cause an immediate interrupt. Because this value must be a
1227 * bit mask, there are only a few valid values that can be used. To
1228 * turn this feature off, the driver can write the value xFFFF to the
1229 * lower word of this instruction (in the same way that the other
1230 * parameters are used). Likewise, a value of 0xF800 (2047) would
1231 * cause an interrupt to be generated for every frame, because all
1232 * standard Ethernet frames are <= 2047 bytes in length.
1233 *************************************************************************/
1234 
1235 /* if you wish to disable the ucode functionality, while maintaining the
1236  * workarounds it provides, set the following defines to:
1237  * BUNDLESMALL 0
1238  * BUNDLEMAX 1
1239  * INTDELAY 1
1240  */
1241 #define BUNDLESMALL 1
1242 #define BUNDLEMAX (u16)6
1243 #define INTDELAY (u16)1536 /* 0x600 */
1244 
1245 /* Initialize firmware */
1246 static const struct firmware *e100_request_firmware(struct nic *nic)
1247 {
1248  const char *fw_name;
1249  const struct firmware *fw = nic->fw;
1250  u8 timer, bundle, min_size;
1251  int err = 0;
1252  bool required = false;
1253 
1254  /* do not load u-code for ICH devices */
1255  if (nic->flags & ich)
1256  return NULL;
1257 
1258  /* Search for ucode match against h/w revision
1259  *
1260  * Based on comments in the source code for the FreeBSD fxp
1261  * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
1262  *
1263  * "fixes for bugs in the B-step hardware (specifically, bugs
1264  * with Inline Receive)."
1265  *
1266  * So we must fail if it cannot be loaded.
1267  *
1268  * The other microcode files are only required for the optional
1269  * CPUSaver feature. Nice to have, but no reason to fail.
1270  */
1271  if (nic->mac == mac_82559_D101M) {
1272  fw_name = FIRMWARE_D101M;
1273  } else if (nic->mac == mac_82559_D101S) {
1274  fw_name = FIRMWARE_D101S;
1275  } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1276  fw_name = FIRMWARE_D102E;
1277  required = true;
1278  } else { /* No ucode on other devices */
1279  return NULL;
1280  }
1281 
1282  /* If the firmware has not previously been loaded, request a pointer
1283  * to it. If it was previously loaded, we are reinitializing the
1284  * adapter, possibly in a resume from hibernate, in which case
1285  * request_firmware() cannot be used.
1286  */
1287  if (!fw)
1288  err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1289 
1290  if (err) {
1291  if (required) {
1292  netif_err(nic, probe, nic->netdev,
1293  "Failed to load firmware \"%s\": %d\n",
1294  fw_name, err);
1295  return ERR_PTR(err);
1296  } else {
1297  netif_info(nic, probe, nic->netdev,
1298  "CPUSaver disabled. Needs \"%s\": %d\n",
1299  fw_name, err);
1300  return NULL;
1301  }
1302  }
1303 
1304  /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1305  indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1306  if (fw->size != UCODE_SIZE * 4 + 3) {
1307  netif_err(nic, probe, nic->netdev,
1308  "Firmware \"%s\" has wrong size %zu\n",
1309  fw_name, fw->size);
1310  release_firmware(fw);
1311  return ERR_PTR(-EINVAL);
1312  }
1313 
1314  /* Read timer, bundle and min_size from end of firmware blob */
1315  timer = fw->data[UCODE_SIZE * 4];
1316  bundle = fw->data[UCODE_SIZE * 4 + 1];
1317  min_size = fw->data[UCODE_SIZE * 4 + 2];
1318 
1319  if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1320  min_size >= UCODE_SIZE) {
1321  netif_err(nic, probe, nic->netdev,
1322  "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1323  fw_name, timer, bundle, min_size);
1324  release_firmware(fw);
1325  return ERR_PTR(-EINVAL);
1326  }
1327 
1328  /* OK, firmware is validated and ready to use. Save a pointer
1329  * to it in the nic */
1330  nic->fw = fw;
1331  return fw;
1332 }
1333 
1334 static void e100_setup_ucode(struct nic *nic, struct cb *cb,
1335  struct sk_buff *skb)
1336 {
1337  const struct firmware *fw = (void *)skb;
1338  u8 timer, bundle, min_size;
1339 
1340  /* It's not a real skb; we just abused the fact that e100_exec_cb
1341  will pass it through to here... */
1342  cb->skb = NULL;
1343 
1344  /* firmware is stored as little endian already */
1345  memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1346 
1347  /* Read timer, bundle and min_size from end of firmware blob */
1348  timer = fw->data[UCODE_SIZE * 4];
1349  bundle = fw->data[UCODE_SIZE * 4 + 1];
1350  min_size = fw->data[UCODE_SIZE * 4 + 2];
1351 
1352  /* Insert user-tunable settings in cb->u.ucode */
1353  cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1354  cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1355  cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1356  cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1357  cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1358  cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1359 
1360  cb->command = cpu_to_le16(cb_ucode | cb_el);
1361 }
1362 
1363 static inline int e100_load_ucode_wait(struct nic *nic)
1364 {
1365  const struct firmware *fw;
1366  int err = 0, counter = 50;
1367  struct cb *cb = nic->cb_to_clean;
1368 
1369  fw = e100_request_firmware(nic);
1370  /* If it's NULL, then no ucode is required */
1371  if (!fw || IS_ERR(fw))
1372  return PTR_ERR(fw);
1373 
1374  if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1375  netif_err(nic, probe, nic->netdev,
1376  "ucode cmd failed with error %d\n", err);
1377 
1378  /* must restart cuc */
1379  nic->cuc_cmd = cuc_start;
1380 
1381  /* wait for completion */
1382  e100_write_flush(nic);
1383  udelay(10);
1384 
1385  /* wait for possibly (ouch) 500ms */
1386  while (!(cb->status & cpu_to_le16(cb_complete))) {
1387  msleep(10);
1388  if (!--counter) break;
1389  }
1390 
1391  /* ack any interrupts, something could have been set */
1392  iowrite8(~0, &nic->csr->scb.stat_ack);
1393 
1394  /* if the command failed, or is not OK, notify and return */
1395  if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1396  netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1397  err = -EPERM;
1398  }
1399 
1400  return err;
1401 }
1402 
1403 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1404  struct sk_buff *skb)
1405 {
1406  cb->command = cpu_to_le16(cb_iaaddr);
1407  memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1408 }
1409 
1410 static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1411 {
1412  cb->command = cpu_to_le16(cb_dump);
1413  cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1414  offsetof(struct mem, dump_buf));
1415 }
1416 
1417 static int e100_phy_check_without_mii(struct nic *nic)
1418 {
1419  u8 phy_type;
1420  int without_mii;
1421 
1422  phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
1423 
1424  switch (phy_type) {
1425  case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
1426  case I82503: /* Non-MII PHY; UNTESTED! */
1427  case S80C24: /* Non-MII PHY; tested and working */
1428  /* paragraph from the FreeBSD driver, "FXP_PHY_80C24":
1429  * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
1430  * doesn't have a programming interface of any sort. The
1431  * media is sensed automatically based on how the link partner
1432  * is configured. This is, in essence, manual configuration.
1433  */
1434  netif_info(nic, probe, nic->netdev,
1435  "found MII-less i82503 or 80c24 or other PHY\n");
1436 
1437  nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1438  nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
1439 
1440  /* these might be needed for certain MII-less cards...
1441  * nic->flags |= ich;
1442  * nic->flags |= ich_10h_workaround; */
1443 
1444  without_mii = 1;
1445  break;
1446  default:
1447  without_mii = 0;
1448  break;
1449  }
1450  return without_mii;
1451 }
1452 
1453 #define NCONFIG_AUTO_SWITCH 0x0080
1454 #define MII_NSC_CONG MII_RESV1
1455 #define NSC_CONG_ENABLE 0x0100
1456 #define NSC_CONG_TXREADY 0x0400
1457 #define ADVERTISE_FC_SUPPORTED 0x0400
1458 static int e100_phy_init(struct nic *nic)
1459 {
1460  struct net_device *netdev = nic->netdev;
1461  u32 addr;
1462  u16 bmcr, stat, id_lo, id_hi, cong;
1463 
1464  /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
1465  for (addr = 0; addr < 32; addr++) {
1466  nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1467  bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1468  stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1469  stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1470  if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1471  break;
1472  }
1473  if (addr == 32) {
1474  /* uhoh, no PHY detected: check whether we seem to be some
1475  * weird, rare variant which is *known* to not have any MII.
1476  * But do this AFTER MII checking only, since this does
1477  * lookup of EEPROM values which may easily be unreliable. */
1478  if (e100_phy_check_without_mii(nic))
1479  return 0; /* simply return and hope for the best */
1480  else {
1481  /* for unknown cases log a fatal error */
1482  netif_err(nic, hw, nic->netdev,
1483  "Failed to locate any known PHY, aborting\n");
1484  return -EAGAIN;
1485  }
1486  } else
1487  netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1488  "phy_addr = %d\n", nic->mii.phy_id);
1489 
1490  /* Get phy ID */
1491  id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1492  id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1493  nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1494  netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1495  "phy ID = 0x%08X\n", nic->phy);
1496 
1497  /* Select the phy and isolate the rest */
1498  for (addr = 0; addr < 32; addr++) {
1499  if (addr != nic->mii.phy_id) {
1500  mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1501  } else if (nic->phy != phy_82552_v) {
1502  bmcr = mdio_read(netdev, addr, MII_BMCR);
1503  mdio_write(netdev, addr, MII_BMCR,
1504  bmcr & ~BMCR_ISOLATE);
1505  }
1506  }
1507  /*
1508  * Workaround for 82552:
1509  * Clear the ISOLATE bit on selected phy_id last (mirrored on all
1510  * other phy_id's) using bmcr value from addr discovery loop above.
1511  */
1512  if (nic->phy == phy_82552_v)
1513  mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1514  bmcr & ~BMCR_ISOLATE);
1515 
1516  /* Handle National tx phys */
1517 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1518  if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1519  /* Disable congestion control */
1520  cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1521  cong |= NSC_CONG_TXREADY;
1522  cong &= ~NSC_CONG_ENABLE;
1523  mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1524  }
1525 
1526  if (nic->phy == phy_82552_v) {
1527  u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1528 
1529  /* assign special tweaked mdio_ctrl() function */
1530  nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1531 
1532  /* Workaround Si not advertising flow-control during autoneg */
1534  mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1535 
1536  /* Reset for the above changes to take effect */
1537  bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1538  bmcr |= BMCR_RESET;
1539  mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1540  } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1541  (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1542  !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1543  /* enable/disable MDI/MDI-X auto-switching. */
1544  mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1545  nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1546  }
1547 
1548  return 0;
1549 }
1550 
1551 static int e100_hw_init(struct nic *nic)
1552 {
1553  int err = 0;
1554 
1555  e100_hw_reset(nic);
1556 
1557  netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1558  if (!in_interrupt() && (err = e100_self_test(nic)))
1559  return err;
1560 
1561  if ((err = e100_phy_init(nic)))
1562  return err;
1563  if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1564  return err;
1565  if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1566  return err;
1567  if ((err = e100_load_ucode_wait(nic)))
1568  return err;
1569  if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1570  return err;
1571  if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1572  return err;
1573  if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1574  nic->dma_addr + offsetof(struct mem, stats))))
1575  return err;
1576  if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1577  return err;
1578 
1579  e100_disable_irq(nic);
1580 
1581  return 0;
1582 }
1583 
1584 static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1585 {
1586  struct net_device *netdev = nic->netdev;
1587  struct netdev_hw_addr *ha;
1588  u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1589 
1590  cb->command = cpu_to_le16(cb_multi);
1591  cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1592  i = 0;
1593  netdev_for_each_mc_addr(ha, netdev) {
1594  if (i == count)
1595  break;
1596  memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1597  ETH_ALEN);
1598  }
1599 }
1600 
1601 static void e100_set_multicast_list(struct net_device *netdev)
1602 {
1603  struct nic *nic = netdev_priv(netdev);
1604 
1605  netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1606  "mc_count=%d, flags=0x%04X\n",
1607  netdev_mc_count(netdev), netdev->flags);
1608 
1609  if (netdev->flags & IFF_PROMISC)
1610  nic->flags |= promiscuous;
1611  else
1612  nic->flags &= ~promiscuous;
1613 
1614  if (netdev->flags & IFF_ALLMULTI ||
1616  nic->flags |= multicast_all;
1617  else
1618  nic->flags &= ~multicast_all;
1619 
1620  e100_exec_cb(nic, NULL, e100_configure);
1621  e100_exec_cb(nic, NULL, e100_multi);
1622 }
1623 
1624 static void e100_update_stats(struct nic *nic)
1625 {
1626  struct net_device *dev = nic->netdev;
1627  struct net_device_stats *ns = &dev->stats;
1628  struct stats *s = &nic->mem->stats;
1629  __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1630  (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1631  &s->complete;
1632 
1633  /* Device's stats reporting may take several microseconds to
1634  * complete, so we're always waiting for results of the
1635  * previous command. */
1636 
1637  if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1638  *complete = 0;
1639  nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1640  nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1645  ns->collisions += nic->tx_collisions;
1648  nic->rx_short_frame_errors +=
1650  ns->rx_length_errors = nic->rx_short_frame_errors +
1651  nic->rx_over_length_errors;
1657  ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1661  nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1662  nic->tx_single_collisions +=
1664  nic->tx_multiple_collisions +=
1666  if (nic->mac >= mac_82558_D101_A4) {
1667  nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1668  nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1669  nic->rx_fc_unsupported +=
1671  if (nic->mac >= mac_82559_D101M) {
1672  nic->tx_tco_frames +=
1674  nic->rx_tco_frames +=
1676  }
1677  }
1678  }
1679 
1680 
1681  if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1682  netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1683  "exec cuc_dump_reset failed\n");
1684 }
1685 
1686 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1687 {
1688  /* Adjust inter-frame-spacing (IFS) between two transmits if
1689  * we're getting collisions on a half-duplex connection. */
1690 
1691  if (duplex == DUPLEX_HALF) {
1692  u32 prev = nic->adaptive_ifs;
1693  u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1694 
1695  if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1696  (nic->tx_frames > min_frames)) {
1697  if (nic->adaptive_ifs < 60)
1698  nic->adaptive_ifs += 5;
1699  } else if (nic->tx_frames < min_frames) {
1700  if (nic->adaptive_ifs >= 5)
1701  nic->adaptive_ifs -= 5;
1702  }
1703  if (nic->adaptive_ifs != prev)
1704  e100_exec_cb(nic, NULL, e100_configure);
1705  }
1706 }
1707 
1708 static void e100_watchdog(unsigned long data)
1709 {
1710  struct nic *nic = (struct nic *)data;
1711  struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1712  u32 speed;
1713 
1714  netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1715  "right now = %ld\n", jiffies);
1716 
1717  /* mii library handles link maintenance tasks */
1718 
1719  mii_ethtool_gset(&nic->mii, &cmd);
1720  speed = ethtool_cmd_speed(&cmd);
1721 
1722  if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1723  netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1724  speed == SPEED_100 ? 100 : 10,
1725  cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1726  } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1727  netdev_info(nic->netdev, "NIC Link is Down\n");
1728  }
1729 
1730  mii_check_link(&nic->mii);
1731 
1732  /* Software generated interrupt to recover from (rare) Rx
1733  * allocation failure.
1734  * Unfortunately have to use a spinlock to not re-enable interrupts
1735  * accidentally, due to hardware that shares a register between the
1736  * interrupt mask bit and the SW Interrupt generation bit */
1737  spin_lock_irq(&nic->cmd_lock);
1738  iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1739  e100_write_flush(nic);
1740  spin_unlock_irq(&nic->cmd_lock);
1741 
1742  e100_update_stats(nic);
1743  e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1744 
1745  if (nic->mac <= mac_82557_D100_C)
1746  /* Issue a multicast command to workaround a 557 lock up */
1747  e100_set_multicast_list(nic->netdev);
1748 
1749  if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1750  /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
1751  nic->flags |= ich_10h_workaround;
1752  else
1753  nic->flags &= ~ich_10h_workaround;
1754 
1755  mod_timer(&nic->watchdog,
1756  round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1757 }
1758 
1759 static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1760  struct sk_buff *skb)
1761 {
1762  cb->command = nic->tx_command;
1763 
1764  /*
1765  * Use the last 4 bytes of the SKB payload packet as the CRC, used for
1766  * testing, ie sending frames with bad CRC.
1767  */
1768  if (unlikely(skb->no_fcs))
1770  else
1772 
1773  /* interrupt every 16 packets regardless of delay */
1774  if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1775  cb->command |= cpu_to_le16(cb_i);
1776  cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1777  cb->u.tcb.tcb_byte_count = 0;
1778  cb->u.tcb.threshold = nic->tx_threshold;
1779  cb->u.tcb.tbd_count = 1;
1780  cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
1781  skb->data, skb->len, PCI_DMA_TODEVICE));
1782  /* check for mapping failure? */
1783  cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1784  skb_tx_timestamp(skb);
1785 }
1786 
1787 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1788  struct net_device *netdev)
1789 {
1790  struct nic *nic = netdev_priv(netdev);
1791  int err;
1792 
1793  if (nic->flags & ich_10h_workaround) {
1794  /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
1795  Issue a NOP command followed by a 1us delay before
1796  issuing the Tx command. */
1797  if (e100_exec_cmd(nic, cuc_nop, 0))
1798  netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1799  "exec cuc_nop failed\n");
1800  udelay(1);
1801  }
1802 
1803  err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1804 
1805  switch (err) {
1806  case -ENOSPC:
1807  /* We queued the skb, but now we're out of space. */
1808  netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1809  "No space for CB\n");
1810  netif_stop_queue(netdev);
1811  break;
1812  case -ENOMEM:
1813  /* This is a hard error - log it. */
1814  netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1815  "Out of Tx resources, returning skb\n");
1816  netif_stop_queue(netdev);
1817  return NETDEV_TX_BUSY;
1818  }
1819 
1820  return NETDEV_TX_OK;
1821 }
1822 
1823 static int e100_tx_clean(struct nic *nic)
1824 {
1825  struct net_device *dev = nic->netdev;
1826  struct cb *cb;
1827  int tx_cleaned = 0;
1828 
1829  spin_lock(&nic->cb_lock);
1830 
1831  /* Clean CBs marked complete */
1832  for (cb = nic->cb_to_clean;
1834  cb = nic->cb_to_clean = cb->next) {
1835  rmb(); /* read skb after status */
1836  netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1837  "cb[%d]->status = 0x%04X\n",
1838  (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1839  cb->status);
1840 
1841  if (likely(cb->skb != NULL)) {
1842  dev->stats.tx_packets++;
1843  dev->stats.tx_bytes += cb->skb->len;
1844 
1845  pci_unmap_single(nic->pdev,
1846  le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1847  le16_to_cpu(cb->u.tcb.tbd.size),
1849  dev_kfree_skb_any(cb->skb);
1850  cb->skb = NULL;
1851  tx_cleaned = 1;
1852  }
1853  cb->status = 0;
1854  nic->cbs_avail++;
1855  }
1856 
1857  spin_unlock(&nic->cb_lock);
1858 
1859  /* Recover from running out of Tx resources in xmit_frame */
1860  if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1861  netif_wake_queue(nic->netdev);
1862 
1863  return tx_cleaned;
1864 }
1865 
1866 static void e100_clean_cbs(struct nic *nic)
1867 {
1868  if (nic->cbs) {
1869  while (nic->cbs_avail != nic->params.cbs.count) {
1870  struct cb *cb = nic->cb_to_clean;
1871  if (cb->skb) {
1872  pci_unmap_single(nic->pdev,
1873  le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1874  le16_to_cpu(cb->u.tcb.tbd.size),
1876  dev_kfree_skb(cb->skb);
1877  }
1878  nic->cb_to_clean = nic->cb_to_clean->next;
1879  nic->cbs_avail++;
1880  }
1881  pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1882  nic->cbs = NULL;
1883  nic->cbs_avail = 0;
1884  }
1885  nic->cuc_cmd = cuc_start;
1886  nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1887  nic->cbs;
1888 }
1889 
1890 static int e100_alloc_cbs(struct nic *nic)
1891 {
1892  struct cb *cb;
1893  unsigned int i, count = nic->params.cbs.count;
1894 
1895  nic->cuc_cmd = cuc_start;
1896  nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1897  nic->cbs_avail = 0;
1898 
1899  nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1900  &nic->cbs_dma_addr);
1901  if (!nic->cbs)
1902  return -ENOMEM;
1903  memset(nic->cbs, 0, count * sizeof(struct cb));
1904 
1905  for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1906  cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1907  cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1908 
1909  cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1910  cb->link = cpu_to_le32(nic->cbs_dma_addr +
1911  ((i+1) % count) * sizeof(struct cb));
1912  }
1913 
1914  nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1915  nic->cbs_avail = count;
1916 
1917  return 0;
1918 }
1919 
1920 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1921 {
1922  if (!nic->rxs) return;
1923  if (RU_SUSPENDED != nic->ru_running) return;
1924 
1925  /* handle init time starts */
1926  if (!rx) rx = nic->rxs;
1927 
1928  /* (Re)start RU if suspended or idle and RFA is non-NULL */
1929  if (rx->skb) {
1930  e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1931  nic->ru_running = RU_RUNNING;
1932  }
1933 }
1934 
1935 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1936 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1937 {
1938  if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1939  return -ENOMEM;
1940 
1941  /* Init, and map the RFD. */
1942  skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1943  rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1945 
1946  if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1947  dev_kfree_skb_any(rx->skb);
1948  rx->skb = NULL;
1949  rx->dma_addr = 0;
1950  return -ENOMEM;
1951  }
1952 
1953  /* Link the RFD to end of RFA by linking previous RFD to
1954  * this one. We are safe to touch the previous RFD because
1955  * it is protected by the before last buffer's el bit being set */
1956  if (rx->prev->skb) {
1957  struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1958  put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1959  pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
1960  sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1961  }
1962 
1963  return 0;
1964 }
1965 
1966 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1967  unsigned int *work_done, unsigned int work_to_do)
1968 {
1969  struct net_device *dev = nic->netdev;
1970  struct sk_buff *skb = rx->skb;
1971  struct rfd *rfd = (struct rfd *)skb->data;
1972  u16 rfd_status, actual_size;
1973  u16 fcs_pad = 0;
1974 
1975  if (unlikely(work_done && *work_done >= work_to_do))
1976  return -EAGAIN;
1977 
1978  /* Need to sync before taking a peek at cb_complete bit */
1979  pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
1980  sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1981  rfd_status = le16_to_cpu(rfd->status);
1982 
1983  netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1984  "status=0x%04X\n", rfd_status);
1985  rmb(); /* read size after status bit */
1986 
1987  /* If data isn't ready, nothing to indicate */
1988  if (unlikely(!(rfd_status & cb_complete))) {
1989  /* If the next buffer has the el bit, but we think the receiver
1990  * is still running, check to see if it really stopped while
1991  * we had interrupts off.
1992  * This allows for a fast restart without re-enabling
1993  * interrupts */
1994  if ((le16_to_cpu(rfd->command) & cb_el) &&
1995  (RU_RUNNING == nic->ru_running))
1996 
1997  if (ioread8(&nic->csr->scb.status) & rus_no_res)
1998  nic->ru_running = RU_SUSPENDED;
1999  pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2000  sizeof(struct rfd),
2002  return -ENODATA;
2003  }
2004 
2005  /* Get actual data size */
2006  if (unlikely(dev->features & NETIF_F_RXFCS))
2007  fcs_pad = 4;
2008  actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
2009  if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
2010  actual_size = RFD_BUF_LEN - sizeof(struct rfd);
2011 
2012  /* Get data */
2013  pci_unmap_single(nic->pdev, rx->dma_addr,
2015 
2016  /* If this buffer has the el bit, but we think the receiver
2017  * is still running, check to see if it really stopped while
2018  * we had interrupts off.
2019  * This allows for a fast restart without re-enabling interrupts.
2020  * This can happen when the RU sees the size change but also sees
2021  * the el bit set. */
2022  if ((le16_to_cpu(rfd->command) & cb_el) &&
2023  (RU_RUNNING == nic->ru_running)) {
2024 
2025  if (ioread8(&nic->csr->scb.status) & rus_no_res)
2026  nic->ru_running = RU_SUSPENDED;
2027  }
2028 
2029  /* Pull off the RFD and put the actual data (minus eth hdr) */
2030  skb_reserve(skb, sizeof(struct rfd));
2031  skb_put(skb, actual_size);
2032  skb->protocol = eth_type_trans(skb, nic->netdev);
2033 
2034  /* If we are receiving all frames, then don't bother
2035  * checking for errors.
2036  */
2037  if (unlikely(dev->features & NETIF_F_RXALL)) {
2038  if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2039  /* Received oversized frame, but keep it. */
2040  nic->rx_over_length_errors++;
2041  goto process_skb;
2042  }
2043 
2044  if (unlikely(!(rfd_status & cb_ok))) {
2045  /* Don't indicate if hardware indicates errors */
2046  dev_kfree_skb_any(skb);
2047  } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2048  /* Don't indicate oversized frames */
2049  nic->rx_over_length_errors++;
2050  dev_kfree_skb_any(skb);
2051  } else {
2052 process_skb:
2053  dev->stats.rx_packets++;
2054  dev->stats.rx_bytes += (actual_size - fcs_pad);
2055  netif_receive_skb(skb);
2056  if (work_done)
2057  (*work_done)++;
2058  }
2059 
2060  rx->skb = NULL;
2061 
2062  return 0;
2063 }
2064 
2065 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2066  unsigned int work_to_do)
2067 {
2068  struct rx *rx;
2069  int restart_required = 0, err = 0;
2070  struct rx *old_before_last_rx, *new_before_last_rx;
2071  struct rfd *old_before_last_rfd, *new_before_last_rfd;
2072 
2073  /* Indicate newly arrived packets */
2074  for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2075  err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2076  /* Hit quota or no more to clean */
2077  if (-EAGAIN == err || -ENODATA == err)
2078  break;
2079  }
2080 
2081 
2082  /* On EAGAIN, hit quota so have more work to do, restart once
2083  * cleanup is complete.
2084  * Else, are we already rnr? then pay attention!!! this ensures that
2085  * the state machine progression never allows a start with a
2086  * partially cleaned list, avoiding a race between hardware
2087  * and rx_to_clean when in NAPI mode */
2088  if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2089  restart_required = 1;
2090 
2091  old_before_last_rx = nic->rx_to_use->prev->prev;
2092  old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2093 
2094  /* Alloc new skbs to refill list */
2095  for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2096  if (unlikely(e100_rx_alloc_skb(nic, rx)))
2097  break; /* Better luck next time (see watchdog) */
2098  }
2099 
2100  new_before_last_rx = nic->rx_to_use->prev->prev;
2101  if (new_before_last_rx != old_before_last_rx) {
2102  /* Set the el-bit on the buffer that is before the last buffer.
2103  * This lets us update the next pointer on the last buffer
2104  * without worrying about hardware touching it.
2105  * We set the size to 0 to prevent hardware from touching this
2106  * buffer.
2107  * When the hardware hits the before last buffer with el-bit
2108  * and size of 0, it will RNR interrupt, the RUS will go into
2109  * the No Resources state. It will not complete nor write to
2110  * this buffer. */
2111  new_before_last_rfd =
2112  (struct rfd *)new_before_last_rx->skb->data;
2113  new_before_last_rfd->size = 0;
2114  new_before_last_rfd->command |= cpu_to_le16(cb_el);
2115  pci_dma_sync_single_for_device(nic->pdev,
2116  new_before_last_rx->dma_addr, sizeof(struct rfd),
2118 
2119  /* Now that we have a new stopping point, we can clear the old
2120  * stopping point. We must sync twice to get the proper
2121  * ordering on the hardware side of things. */
2122  old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2123  pci_dma_sync_single_for_device(nic->pdev,
2124  old_before_last_rx->dma_addr, sizeof(struct rfd),
2126  old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2127  + ETH_FCS_LEN);
2128  pci_dma_sync_single_for_device(nic->pdev,
2129  old_before_last_rx->dma_addr, sizeof(struct rfd),
2131  }
2132 
2133  if (restart_required) {
2134  // ack the rnr?
2135  iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2136  e100_start_receiver(nic, nic->rx_to_clean);
2137  if (work_done)
2138  (*work_done)++;
2139  }
2140 }
2141 
2142 static void e100_rx_clean_list(struct nic *nic)
2143 {
2144  struct rx *rx;
2145  unsigned int i, count = nic->params.rfds.count;
2146 
2147  nic->ru_running = RU_UNINITIALIZED;
2148 
2149  if (nic->rxs) {
2150  for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2151  if (rx->skb) {
2152  pci_unmap_single(nic->pdev, rx->dma_addr,
2154  dev_kfree_skb(rx->skb);
2155  }
2156  }
2157  kfree(nic->rxs);
2158  nic->rxs = NULL;
2159  }
2160 
2161  nic->rx_to_use = nic->rx_to_clean = NULL;
2162 }
2163 
2164 static int e100_rx_alloc_list(struct nic *nic)
2165 {
2166  struct rx *rx;
2167  unsigned int i, count = nic->params.rfds.count;
2168  struct rfd *before_last;
2169 
2170  nic->rx_to_use = nic->rx_to_clean = NULL;
2171  nic->ru_running = RU_UNINITIALIZED;
2172 
2173  if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
2174  return -ENOMEM;
2175 
2176  for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2177  rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2178  rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2179  if (e100_rx_alloc_skb(nic, rx)) {
2180  e100_rx_clean_list(nic);
2181  return -ENOMEM;
2182  }
2183  }
2184  /* Set the el-bit on the buffer that is before the last buffer.
2185  * This lets us update the next pointer on the last buffer without
2186  * worrying about hardware touching it.
2187  * We set the size to 0 to prevent hardware from touching this buffer.
2188  * When the hardware hits the before last buffer with el-bit and size
2189  * of 0, it will RNR interrupt, the RU will go into the No Resources
2190  * state. It will not complete nor write to this buffer. */
2191  rx = nic->rxs->prev->prev;
2192  before_last = (struct rfd *)rx->skb->data;
2193  before_last->command |= cpu_to_le16(cb_el);
2194  before_last->size = 0;
2195  pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
2196  sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
2197 
2198  nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2199  nic->ru_running = RU_SUSPENDED;
2200 
2201  return 0;
2202 }
2203 
2204 static irqreturn_t e100_intr(int irq, void *dev_id)
2205 {
2206  struct net_device *netdev = dev_id;
2207  struct nic *nic = netdev_priv(netdev);
2208  u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2209 
2210  netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2211  "stat_ack = 0x%02X\n", stat_ack);
2212 
2213  if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
2214  stat_ack == stat_ack_not_present) /* Hardware is ejected */
2215  return IRQ_NONE;
2216 
2217  /* Ack interrupt(s) */
2218  iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2219 
2220  /* We hit Receive No Resource (RNR); restart RU after cleaning */
2221  if (stat_ack & stat_ack_rnr)
2222  nic->ru_running = RU_SUSPENDED;
2223 
2224  if (likely(napi_schedule_prep(&nic->napi))) {
2225  e100_disable_irq(nic);
2226  __napi_schedule(&nic->napi);
2227  }
2228 
2229  return IRQ_HANDLED;
2230 }
2231 
2232 static int e100_poll(struct napi_struct *napi, int budget)
2233 {
2234  struct nic *nic = container_of(napi, struct nic, napi);
2235  unsigned int work_done = 0;
2236 
2237  e100_rx_clean(nic, &work_done, budget);
2238  e100_tx_clean(nic);
2239 
2240  /* If budget not fully consumed, exit the polling mode */
2241  if (work_done < budget) {
2242  napi_complete(napi);
2243  e100_enable_irq(nic);
2244  }
2245 
2246  return work_done;
2247 }
2248 
2249 #ifdef CONFIG_NET_POLL_CONTROLLER
2250 static void e100_netpoll(struct net_device *netdev)
2251 {
2252  struct nic *nic = netdev_priv(netdev);
2253 
2254  e100_disable_irq(nic);
2255  e100_intr(nic->pdev->irq, netdev);
2256  e100_tx_clean(nic);
2257  e100_enable_irq(nic);
2258 }
2259 #endif
2260 
2261 static int e100_set_mac_address(struct net_device *netdev, void *p)
2262 {
2263  struct nic *nic = netdev_priv(netdev);
2264  struct sockaddr *addr = p;
2265 
2266  if (!is_valid_ether_addr(addr->sa_data))
2267  return -EADDRNOTAVAIL;
2268 
2269  memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2270  e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2271 
2272  return 0;
2273 }
2274 
2275 static int e100_change_mtu(struct net_device *netdev, int new_mtu)
2276 {
2277  if (new_mtu < ETH_ZLEN || new_mtu > ETH_DATA_LEN)
2278  return -EINVAL;
2279  netdev->mtu = new_mtu;
2280  return 0;
2281 }
2282 
2283 static int e100_asf(struct nic *nic)
2284 {
2285  /* ASF can be enabled from eeprom */
2286  return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2287  (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
2288  !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
2289  ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
2290 }
2291 
2292 static int e100_up(struct nic *nic)
2293 {
2294  int err;
2295 
2296  if ((err = e100_rx_alloc_list(nic)))
2297  return err;
2298  if ((err = e100_alloc_cbs(nic)))
2299  goto err_rx_clean_list;
2300  if ((err = e100_hw_init(nic)))
2301  goto err_clean_cbs;
2302  e100_set_multicast_list(nic->netdev);
2303  e100_start_receiver(nic, NULL);
2304  mod_timer(&nic->watchdog, jiffies);
2305  if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2306  nic->netdev->name, nic->netdev)))
2307  goto err_no_irq;
2308  netif_wake_queue(nic->netdev);
2309  napi_enable(&nic->napi);
2310  /* enable ints _after_ enabling poll, preventing a race between
2311  * disable ints+schedule */
2312  e100_enable_irq(nic);
2313  return 0;
2314 
2315 err_no_irq:
2316  del_timer_sync(&nic->watchdog);
2317 err_clean_cbs:
2318  e100_clean_cbs(nic);
2319 err_rx_clean_list:
2320  e100_rx_clean_list(nic);
2321  return err;
2322 }
2323 
2324 static void e100_down(struct nic *nic)
2325 {
2326  /* wait here for poll to complete */
2327  napi_disable(&nic->napi);
2328  netif_stop_queue(nic->netdev);
2329  e100_hw_reset(nic);
2330  free_irq(nic->pdev->irq, nic->netdev);
2331  del_timer_sync(&nic->watchdog);
2332  netif_carrier_off(nic->netdev);
2333  e100_clean_cbs(nic);
2334  e100_rx_clean_list(nic);
2335 }
2336 
2337 static void e100_tx_timeout(struct net_device *netdev)
2338 {
2339  struct nic *nic = netdev_priv(netdev);
2340 
2341  /* Reset outside of interrupt context, to avoid request_irq
2342  * in interrupt context */
2343  schedule_work(&nic->tx_timeout_task);
2344 }
2345 
2346 static void e100_tx_timeout_task(struct work_struct *work)
2347 {
2348  struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2349  struct net_device *netdev = nic->netdev;
2350 
2351  netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2352  "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2353 
2354  rtnl_lock();
2355  if (netif_running(netdev)) {
2356  e100_down(netdev_priv(netdev));
2357  e100_up(netdev_priv(netdev));
2358  }
2359  rtnl_unlock();
2360 }
2361 
2362 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2363 {
2364  int err;
2365  struct sk_buff *skb;
2366 
2367  /* Use driver resources to perform internal MAC or PHY
2368  * loopback test. A single packet is prepared and transmitted
2369  * in loopback mode, and the test passes if the received
2370  * packet compares byte-for-byte to the transmitted packet. */
2371 
2372  if ((err = e100_rx_alloc_list(nic)))
2373  return err;
2374  if ((err = e100_alloc_cbs(nic)))
2375  goto err_clean_rx;
2376 
2377  /* ICH PHY loopback is broken so do MAC loopback instead */
2378  if (nic->flags & ich && loopback_mode == lb_phy)
2379  loopback_mode = lb_mac;
2380 
2381  nic->loopback = loopback_mode;
2382  if ((err = e100_hw_init(nic)))
2383  goto err_loopback_none;
2384 
2385  if (loopback_mode == lb_phy)
2386  mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2387  BMCR_LOOPBACK);
2388 
2389  e100_start_receiver(nic, NULL);
2390 
2391  if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2392  err = -ENOMEM;
2393  goto err_loopback_none;
2394  }
2395  skb_put(skb, ETH_DATA_LEN);
2396  memset(skb->data, 0xFF, ETH_DATA_LEN);
2397  e100_xmit_frame(skb, nic->netdev);
2398 
2399  msleep(10);
2400 
2401  pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
2403 
2404  if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2405  skb->data, ETH_DATA_LEN))
2406  err = -EAGAIN;
2407 
2408 err_loopback_none:
2409  mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2410  nic->loopback = lb_none;
2411  e100_clean_cbs(nic);
2412  e100_hw_reset(nic);
2413 err_clean_rx:
2414  e100_rx_clean_list(nic);
2415  return err;
2416 }
2417 
2418 #define MII_LED_CONTROL 0x1B
2419 #define E100_82552_LED_OVERRIDE 0x19
2420 #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
2421 #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
2422 
2423 static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2424 {
2425  struct nic *nic = netdev_priv(netdev);
2426  return mii_ethtool_gset(&nic->mii, cmd);
2427 }
2428 
2429 static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
2430 {
2431  struct nic *nic = netdev_priv(netdev);
2432  int err;
2433 
2434  mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2435  err = mii_ethtool_sset(&nic->mii, cmd);
2436  e100_exec_cb(nic, NULL, e100_configure);
2437 
2438  return err;
2439 }
2440 
2441 static void e100_get_drvinfo(struct net_device *netdev,
2442  struct ethtool_drvinfo *info)
2443 {
2444  struct nic *nic = netdev_priv(netdev);
2445  strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2446  strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2447  strlcpy(info->bus_info, pci_name(nic->pdev),
2448  sizeof(info->bus_info));
2449 }
2450 
2451 #define E100_PHY_REGS 0x1C
2452 static int e100_get_regs_len(struct net_device *netdev)
2453 {
2454  struct nic *nic = netdev_priv(netdev);
2455  return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
2456 }
2457 
2458 static void e100_get_regs(struct net_device *netdev,
2459  struct ethtool_regs *regs, void *p)
2460 {
2461  struct nic *nic = netdev_priv(netdev);
2462  u32 *buff = p;
2463  int i;
2464 
2465  regs->version = (1 << 24) | nic->pdev->revision;
2466  buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2467  ioread8(&nic->csr->scb.cmd_lo) << 16 |
2468  ioread16(&nic->csr->scb.status);
2469  for (i = E100_PHY_REGS; i >= 0; i--)
2470  buff[1 + E100_PHY_REGS - i] =
2471  mdio_read(netdev, nic->mii.phy_id, i);
2472  memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2473  e100_exec_cb(nic, NULL, e100_dump);
2474  msleep(10);
2475  memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
2476  sizeof(nic->mem->dump_buf));
2477 }
2478 
2479 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2480 {
2481  struct nic *nic = netdev_priv(netdev);
2482  wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2483  wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2484 }
2485 
2486 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2487 {
2488  struct nic *nic = netdev_priv(netdev);
2489 
2490  if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2491  !device_can_wakeup(&nic->pdev->dev))
2492  return -EOPNOTSUPP;
2493 
2494  if (wol->wolopts)
2495  nic->flags |= wol_magic;
2496  else
2497  nic->flags &= ~wol_magic;
2498 
2499  device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2500 
2501  e100_exec_cb(nic, NULL, e100_configure);
2502 
2503  return 0;
2504 }
2505 
2506 static u32 e100_get_msglevel(struct net_device *netdev)
2507 {
2508  struct nic *nic = netdev_priv(netdev);
2509  return nic->msg_enable;
2510 }
2511 
2512 static void e100_set_msglevel(struct net_device *netdev, u32 value)
2513 {
2514  struct nic *nic = netdev_priv(netdev);
2515  nic->msg_enable = value;
2516 }
2517 
2518 static int e100_nway_reset(struct net_device *netdev)
2519 {
2520  struct nic *nic = netdev_priv(netdev);
2521  return mii_nway_restart(&nic->mii);
2522 }
2523 
2524 static u32 e100_get_link(struct net_device *netdev)
2525 {
2526  struct nic *nic = netdev_priv(netdev);
2527  return mii_link_ok(&nic->mii);
2528 }
2529 
2530 static int e100_get_eeprom_len(struct net_device *netdev)
2531 {
2532  struct nic *nic = netdev_priv(netdev);
2533  return nic->eeprom_wc << 1;
2534 }
2535 
2536 #define E100_EEPROM_MAGIC 0x1234
2537 static int e100_get_eeprom(struct net_device *netdev,
2538  struct ethtool_eeprom *eeprom, u8 *bytes)
2539 {
2540  struct nic *nic = netdev_priv(netdev);
2541 
2542  eeprom->magic = E100_EEPROM_MAGIC;
2543  memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2544 
2545  return 0;
2546 }
2547 
2548 static int e100_set_eeprom(struct net_device *netdev,
2549  struct ethtool_eeprom *eeprom, u8 *bytes)
2550 {
2551  struct nic *nic = netdev_priv(netdev);
2552 
2553  if (eeprom->magic != E100_EEPROM_MAGIC)
2554  return -EINVAL;
2555 
2556  memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2557 
2558  return e100_eeprom_save(nic, eeprom->offset >> 1,
2559  (eeprom->len >> 1) + 1);
2560 }
2561 
2562 static void e100_get_ringparam(struct net_device *netdev,
2563  struct ethtool_ringparam *ring)
2564 {
2565  struct nic *nic = netdev_priv(netdev);
2566  struct param_range *rfds = &nic->params.rfds;
2567  struct param_range *cbs = &nic->params.cbs;
2568 
2569  ring->rx_max_pending = rfds->max;
2570  ring->tx_max_pending = cbs->max;
2571  ring->rx_pending = rfds->count;
2572  ring->tx_pending = cbs->count;
2573 }
2574 
2575 static int e100_set_ringparam(struct net_device *netdev,
2576  struct ethtool_ringparam *ring)
2577 {
2578  struct nic *nic = netdev_priv(netdev);
2579  struct param_range *rfds = &nic->params.rfds;
2580  struct param_range *cbs = &nic->params.cbs;
2581 
2582  if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2583  return -EINVAL;
2584 
2585  if (netif_running(netdev))
2586  e100_down(nic);
2587  rfds->count = max(ring->rx_pending, rfds->min);
2588  rfds->count = min(rfds->count, rfds->max);
2589  cbs->count = max(ring->tx_pending, cbs->min);
2590  cbs->count = min(cbs->count, cbs->max);
2591  netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2592  rfds->count, cbs->count);
2593  if (netif_running(netdev))
2594  e100_up(nic);
2595 
2596  return 0;
2597 }
2598 
2599 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2600  "Link test (on/offline)",
2601  "Eeprom test (on/offline)",
2602  "Self test (offline)",
2603  "Mac loopback (offline)",
2604  "Phy loopback (offline)",
2605 };
2606 #define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2607 
2608 static void e100_diag_test(struct net_device *netdev,
2609  struct ethtool_test *test, u64 *data)
2610 {
2611  struct ethtool_cmd cmd;
2612  struct nic *nic = netdev_priv(netdev);
2613  int i, err;
2614 
2615  memset(data, 0, E100_TEST_LEN * sizeof(u64));
2616  data[0] = !mii_link_ok(&nic->mii);
2617  data[1] = e100_eeprom_load(nic);
2618  if (test->flags & ETH_TEST_FL_OFFLINE) {
2619 
2620  /* save speed, duplex & autoneg settings */
2621  err = mii_ethtool_gset(&nic->mii, &cmd);
2622 
2623  if (netif_running(netdev))
2624  e100_down(nic);
2625  data[2] = e100_self_test(nic);
2626  data[3] = e100_loopback_test(nic, lb_mac);
2627  data[4] = e100_loopback_test(nic, lb_phy);
2628 
2629  /* restore speed, duplex & autoneg settings */
2630  err = mii_ethtool_sset(&nic->mii, &cmd);
2631 
2632  if (netif_running(netdev))
2633  e100_up(nic);
2634  }
2635  for (i = 0; i < E100_TEST_LEN; i++)
2636  test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2637 
2638  msleep_interruptible(4 * 1000);
2639 }
2640 
2641 static int e100_set_phys_id(struct net_device *netdev,
2643 {
2644  struct nic *nic = netdev_priv(netdev);
2645  enum led_state {
2646  led_on = 0x01,
2647  led_off = 0x04,
2648  led_on_559 = 0x05,
2649  led_on_557 = 0x07,
2650  };
2651  u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2653  u16 leds = 0;
2654 
2655  switch (state) {
2656  case ETHTOOL_ID_ACTIVE:
2657  return 2;
2658 
2659  case ETHTOOL_ID_ON:
2660  leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2661  (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2662  break;
2663 
2664  case ETHTOOL_ID_OFF:
2665  leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2666  break;
2667 
2668  case ETHTOOL_ID_INACTIVE:
2669  break;
2670  }
2671 
2672  mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2673  return 0;
2674 }
2675 
2676 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2677  "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2678  "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2679  "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2680  "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2681  "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2682  "tx_heartbeat_errors", "tx_window_errors",
2683  /* device-specific stats */
2684  "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2685  "tx_flow_control_pause", "rx_flow_control_pause",
2686  "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2687  "rx_short_frame_errors", "rx_over_length_errors",
2688 };
2689 #define E100_NET_STATS_LEN 21
2690 #define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2691 
2692 static int e100_get_sset_count(struct net_device *netdev, int sset)
2693 {
2694  switch (sset) {
2695  case ETH_SS_TEST:
2696  return E100_TEST_LEN;
2697  case ETH_SS_STATS:
2698  return E100_STATS_LEN;
2699  default:
2700  return -EOPNOTSUPP;
2701  }
2702 }
2703 
2704 static void e100_get_ethtool_stats(struct net_device *netdev,
2705  struct ethtool_stats *stats, u64 *data)
2706 {
2707  struct nic *nic = netdev_priv(netdev);
2708  int i;
2709 
2710  for (i = 0; i < E100_NET_STATS_LEN; i++)
2711  data[i] = ((unsigned long *)&netdev->stats)[i];
2712 
2713  data[i++] = nic->tx_deferred;
2714  data[i++] = nic->tx_single_collisions;
2715  data[i++] = nic->tx_multiple_collisions;
2716  data[i++] = nic->tx_fc_pause;
2717  data[i++] = nic->rx_fc_pause;
2718  data[i++] = nic->rx_fc_unsupported;
2719  data[i++] = nic->tx_tco_frames;
2720  data[i++] = nic->rx_tco_frames;
2721  data[i++] = nic->rx_short_frame_errors;
2722  data[i++] = nic->rx_over_length_errors;
2723 }
2724 
2725 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2726 {
2727  switch (stringset) {
2728  case ETH_SS_TEST:
2729  memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test));
2730  break;
2731  case ETH_SS_STATS:
2732  memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats));
2733  break;
2734  }
2735 }
2736 
2737 static const struct ethtool_ops e100_ethtool_ops = {
2738  .get_settings = e100_get_settings,
2739  .set_settings = e100_set_settings,
2740  .get_drvinfo = e100_get_drvinfo,
2741  .get_regs_len = e100_get_regs_len,
2742  .get_regs = e100_get_regs,
2743  .get_wol = e100_get_wol,
2744  .set_wol = e100_set_wol,
2745  .get_msglevel = e100_get_msglevel,
2746  .set_msglevel = e100_set_msglevel,
2747  .nway_reset = e100_nway_reset,
2748  .get_link = e100_get_link,
2749  .get_eeprom_len = e100_get_eeprom_len,
2750  .get_eeprom = e100_get_eeprom,
2751  .set_eeprom = e100_set_eeprom,
2752  .get_ringparam = e100_get_ringparam,
2753  .set_ringparam = e100_set_ringparam,
2754  .self_test = e100_diag_test,
2755  .get_strings = e100_get_strings,
2756  .set_phys_id = e100_set_phys_id,
2757  .get_ethtool_stats = e100_get_ethtool_stats,
2758  .get_sset_count = e100_get_sset_count,
2759  .get_ts_info = ethtool_op_get_ts_info,
2760 };
2761 
2762 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2763 {
2764  struct nic *nic = netdev_priv(netdev);
2765 
2766  return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2767 }
2768 
2769 static int e100_alloc(struct nic *nic)
2770 {
2771  nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
2772  &nic->dma_addr);
2773  return nic->mem ? 0 : -ENOMEM;
2774 }
2775 
2776 static void e100_free(struct nic *nic)
2777 {
2778  if (nic->mem) {
2779  pci_free_consistent(nic->pdev, sizeof(struct mem),
2780  nic->mem, nic->dma_addr);
2781  nic->mem = NULL;
2782  }
2783 }
2784 
2785 static int e100_open(struct net_device *netdev)
2786 {
2787  struct nic *nic = netdev_priv(netdev);
2788  int err = 0;
2789 
2790  netif_carrier_off(netdev);
2791  if ((err = e100_up(nic)))
2792  netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2793  return err;
2794 }
2795 
2796 static int e100_close(struct net_device *netdev)
2797 {
2798  e100_down(netdev_priv(netdev));
2799  return 0;
2800 }
2801 
2802 static int e100_set_features(struct net_device *netdev,
2804 {
2805  struct nic *nic = netdev_priv(netdev);
2806  netdev_features_t changed = features ^ netdev->features;
2807 
2808  if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2809  return 0;
2810 
2811  netdev->features = features;
2812  e100_exec_cb(nic, NULL, e100_configure);
2813  return 0;
2814 }
2815 
2816 static const struct net_device_ops e100_netdev_ops = {
2817  .ndo_open = e100_open,
2818  .ndo_stop = e100_close,
2819  .ndo_start_xmit = e100_xmit_frame,
2820  .ndo_validate_addr = eth_validate_addr,
2821  .ndo_set_rx_mode = e100_set_multicast_list,
2822  .ndo_set_mac_address = e100_set_mac_address,
2823  .ndo_change_mtu = e100_change_mtu,
2824  .ndo_do_ioctl = e100_do_ioctl,
2825  .ndo_tx_timeout = e100_tx_timeout,
2826 #ifdef CONFIG_NET_POLL_CONTROLLER
2827  .ndo_poll_controller = e100_netpoll,
2828 #endif
2829  .ndo_set_features = e100_set_features,
2830 };
2831 
2832 static int __devinit e100_probe(struct pci_dev *pdev,
2833  const struct pci_device_id *ent)
2834 {
2835  struct net_device *netdev;
2836  struct nic *nic;
2837  int err;
2838 
2839  if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2840  return -ENOMEM;
2841 
2842  netdev->hw_features |= NETIF_F_RXFCS;
2843  netdev->priv_flags |= IFF_SUPP_NOFCS;
2844  netdev->hw_features |= NETIF_F_RXALL;
2845 
2846  netdev->netdev_ops = &e100_netdev_ops;
2847  SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2849  strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2850 
2851  nic = netdev_priv(netdev);
2852  netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2853  nic->netdev = netdev;
2854  nic->pdev = pdev;
2855  nic->msg_enable = (1 << debug) - 1;
2856  nic->mdio_ctrl = mdio_ctrl_hw;
2857  pci_set_drvdata(pdev, netdev);
2858 
2859  if ((err = pci_enable_device(pdev))) {
2860  netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2861  goto err_out_free_dev;
2862  }
2863 
2864  if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2865  netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2866  err = -ENODEV;
2867  goto err_out_disable_pdev;
2868  }
2869 
2870  if ((err = pci_request_regions(pdev, DRV_NAME))) {
2871  netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2872  goto err_out_disable_pdev;
2873  }
2874 
2875  if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2876  netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2877  goto err_out_free_res;
2878  }
2879 
2880  SET_NETDEV_DEV(netdev, &pdev->dev);
2881 
2882  if (use_io)
2883  netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2884 
2885  nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2886  if (!nic->csr) {
2887  netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2888  err = -ENOMEM;
2889  goto err_out_free_res;
2890  }
2891 
2892  if (ent->driver_data)
2893  nic->flags |= ich;
2894  else
2895  nic->flags &= ~ich;
2896 
2897  e100_get_defaults(nic);
2898 
2899  /* D100 MAC doesn't allow rx of vlan packets with normal MTU */
2900  if (nic->mac < mac_82558_D101_A4)
2901  netdev->features |= NETIF_F_VLAN_CHALLENGED;
2902 
2903  /* locks must be initialized before calling hw_reset */
2904  spin_lock_init(&nic->cb_lock);
2905  spin_lock_init(&nic->cmd_lock);
2906  spin_lock_init(&nic->mdio_lock);
2907 
2908  /* Reset the device before pci_set_master() in case device is in some
2909  * funky state and has an interrupt pending - hint: we don't have the
2910  * interrupt handler registered yet. */
2911  e100_hw_reset(nic);
2912 
2913  pci_set_master(pdev);
2914 
2915  init_timer(&nic->watchdog);
2916  nic->watchdog.function = e100_watchdog;
2917  nic->watchdog.data = (unsigned long)nic;
2918 
2919  INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2920 
2921  if ((err = e100_alloc(nic))) {
2922  netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2923  goto err_out_iounmap;
2924  }
2925 
2926  if ((err = e100_eeprom_load(nic)))
2927  goto err_out_free;
2928 
2929  e100_phy_init(nic);
2930 
2931  memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2932  memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2933  if (!is_valid_ether_addr(netdev->perm_addr)) {
2934  if (!eeprom_bad_csum_allow) {
2935  netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2936  err = -EAGAIN;
2937  goto err_out_free;
2938  } else {
2939  netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2940  }
2941  }
2942 
2943  /* Wol magic packet can be enabled from eeprom */
2944  if ((nic->mac >= mac_82558_D101_A4) &&
2945  (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2946  nic->flags |= wol_magic;
2947  device_set_wakeup_enable(&pdev->dev, true);
2948  }
2949 
2950  /* ack any pending wake events, disable PME */
2951  pci_pme_active(pdev, false);
2952 
2953  strcpy(netdev->name, "eth%d");
2954  if ((err = register_netdev(netdev))) {
2955  netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2956  goto err_out_free;
2957  }
2958  nic->cbs_pool = pci_pool_create(netdev->name,
2959  nic->pdev,
2960  nic->params.cbs.max * sizeof(struct cb),
2961  sizeof(u32),
2962  0);
2963  netif_info(nic, probe, nic->netdev,
2964  "addr 0x%llx, irq %d, MAC addr %pM\n",
2965  (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2966  pdev->irq, netdev->dev_addr);
2967 
2968  return 0;
2969 
2970 err_out_free:
2971  e100_free(nic);
2972 err_out_iounmap:
2973  pci_iounmap(pdev, nic->csr);
2974 err_out_free_res:
2975  pci_release_regions(pdev);
2976 err_out_disable_pdev:
2977  pci_disable_device(pdev);
2978 err_out_free_dev:
2979  pci_set_drvdata(pdev, NULL);
2980  free_netdev(netdev);
2981  return err;
2982 }
2983 
2984 static void __devexit e100_remove(struct pci_dev *pdev)
2985 {
2986  struct net_device *netdev = pci_get_drvdata(pdev);
2987 
2988  if (netdev) {
2989  struct nic *nic = netdev_priv(netdev);
2990  unregister_netdev(netdev);
2991  e100_free(nic);
2992  pci_iounmap(pdev, nic->csr);
2993  pci_pool_destroy(nic->cbs_pool);
2994  free_netdev(netdev);
2995  pci_release_regions(pdev);
2996  pci_disable_device(pdev);
2997  pci_set_drvdata(pdev, NULL);
2998  }
2999 }
3000 
3001 #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */
3002 #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */
3003 #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */
3004 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3005 {
3006  struct net_device *netdev = pci_get_drvdata(pdev);
3007  struct nic *nic = netdev_priv(netdev);
3008 
3009  if (netif_running(netdev))
3010  e100_down(nic);
3011  netif_device_detach(netdev);
3012 
3013  pci_save_state(pdev);
3014 
3015  if ((nic->flags & wol_magic) | e100_asf(nic)) {
3016  /* enable reverse auto-negotiation */
3017  if (nic->phy == phy_82552_v) {
3018  u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3020 
3021  mdio_write(netdev, nic->mii.phy_id,
3022  E100_82552_SMARTSPEED, smartspeed |
3024  }
3025  *enable_wake = true;
3026  } else {
3027  *enable_wake = false;
3028  }
3029 
3030  pci_disable_device(pdev);
3031 }
3032 
3033 static int __e100_power_off(struct pci_dev *pdev, bool wake)
3034 {
3035  if (wake)
3036  return pci_prepare_to_sleep(pdev);
3037 
3038  pci_wake_from_d3(pdev, false);
3040 
3041  return 0;
3042 }
3043 
3044 #ifdef CONFIG_PM
3045 static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
3046 {
3047  bool wake;
3048  __e100_shutdown(pdev, &wake);
3049  return __e100_power_off(pdev, wake);
3050 }
3051 
3052 static int e100_resume(struct pci_dev *pdev)
3053 {
3054  struct net_device *netdev = pci_get_drvdata(pdev);
3055  struct nic *nic = netdev_priv(netdev);
3056 
3057  pci_set_power_state(pdev, PCI_D0);
3058  pci_restore_state(pdev);
3059  /* ack any pending wake events, disable PME */
3060  pci_enable_wake(pdev, 0, 0);
3061 
3062  /* disable reverse auto-negotiation */
3063  if (nic->phy == phy_82552_v) {
3064  u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3066 
3067  mdio_write(netdev, nic->mii.phy_id,
3069  smartspeed & ~(E100_82552_REV_ANEG));
3070  }
3071 
3072  netif_device_attach(netdev);
3073  if (netif_running(netdev))
3074  e100_up(nic);
3075 
3076  return 0;
3077 }
3078 #endif /* CONFIG_PM */
3079 
3080 static void e100_shutdown(struct pci_dev *pdev)
3081 {
3082  bool wake;
3083  __e100_shutdown(pdev, &wake);
3085  __e100_power_off(pdev, wake);
3086 }
3087 
3088 /* ------------------ PCI Error Recovery infrastructure -------------- */
3094 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3095 {
3096  struct net_device *netdev = pci_get_drvdata(pdev);
3097  struct nic *nic = netdev_priv(netdev);
3098 
3099  netif_device_detach(netdev);
3100 
3101  if (state == pci_channel_io_perm_failure)
3103 
3104  if (netif_running(netdev))
3105  e100_down(nic);
3106  pci_disable_device(pdev);
3107 
3108  /* Request a slot reset. */
3110 }
3111 
3118 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3119 {
3120  struct net_device *netdev = pci_get_drvdata(pdev);
3121  struct nic *nic = netdev_priv(netdev);
3122 
3123  if (pci_enable_device(pdev)) {
3124  pr_err("Cannot re-enable PCI device after reset\n");
3126  }
3127  pci_set_master(pdev);
3128 
3129  /* Only one device per card can do a reset */
3130  if (0 != PCI_FUNC(pdev->devfn))
3131  return PCI_ERS_RESULT_RECOVERED;
3132  e100_hw_reset(nic);
3133  e100_phy_init(nic);
3134 
3135  return PCI_ERS_RESULT_RECOVERED;
3136 }
3137 
3145 static void e100_io_resume(struct pci_dev *pdev)
3146 {
3147  struct net_device *netdev = pci_get_drvdata(pdev);
3148  struct nic *nic = netdev_priv(netdev);
3149 
3150  /* ack any pending wake events, disable PME */
3151  pci_enable_wake(pdev, 0, 0);
3152 
3153  netif_device_attach(netdev);
3154  if (netif_running(netdev)) {
3155  e100_open(netdev);
3156  mod_timer(&nic->watchdog, jiffies);
3157  }
3158 }
3159 
3160 static const struct pci_error_handlers e100_err_handler = {
3161  .error_detected = e100_io_error_detected,
3162  .slot_reset = e100_io_slot_reset,
3163  .resume = e100_io_resume,
3164 };
3165 
3166 static struct pci_driver e100_driver = {
3167  .name = DRV_NAME,
3168  .id_table = e100_id_table,
3169  .probe = e100_probe,
3170  .remove = __devexit_p(e100_remove),
3171 #ifdef CONFIG_PM
3172  /* Power Management hooks */
3173  .suspend = e100_suspend,
3174  .resume = e100_resume,
3175 #endif
3176  .shutdown = e100_shutdown,
3177  .err_handler = &e100_err_handler,
3178 };
3179 
3180 static int __init e100_init_module(void)
3181 {
3182  if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3183  pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3184  pr_info("%s\n", DRV_COPYRIGHT);
3185  }
3186  return pci_register_driver(&e100_driver);
3187 }
3188 
3189 static void __exit e100_cleanup_module(void)
3190 {
3191  pci_unregister_driver(&e100_driver);
3192 }
3193 
3194 module_init(e100_init_module);
3195 module_exit(e100_cleanup_module);