Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
et131x.c
Go to the documentation of this file.
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  * http://www.agere.com
8  *
9  * Copyright (c) 2011 Mark Einon <[email protected]>
10  *
11  *------------------------------------------------------------------------------
12  *
13  * SOFTWARE LICENSE
14  *
15  * This software is provided subject to the following terms and conditions,
16  * which you should read carefully before using the software. Using this
17  * software indicates your acceptance of these terms and conditions. If you do
18  * not agree with these terms and conditions, do not use the software.
19  *
20  * Copyright © 2005 Agere Systems Inc.
21  * All rights reserved.
22  *
23  * Redistribution and use in source or binary forms, with or without
24  * modifications, are permitted provided that the following conditions are met:
25  *
26  * . Redistributions of source code must retain the above copyright notice, this
27  * list of conditions and the following Disclaimer as comments in the code as
28  * well as in the documentation and/or other materials provided with the
29  * distribution.
30  *
31  * . Redistributions in binary form must reproduce the above copyright notice,
32  * this list of conditions and the following Disclaimer in the documentation
33  * and/or other materials provided with the distribution.
34  *
35  * . Neither the name of Agere Systems Inc. nor the names of the contributors
36  * may be used to endorse or promote products derived from this software
37  * without specific prior written permission.
38  *
39  * Disclaimer
40  *
41  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
42  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
44  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
45  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
46  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
47  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
48  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
49  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
51  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
52  * DAMAGE.
53  *
54  */
55 
56 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57 
58 #include <linux/pci.h>
59 #include <linux/init.h>
60 #include <linux/module.h>
61 #include <linux/types.h>
62 #include <linux/kernel.h>
63 
64 #include <linux/sched.h>
65 #include <linux/ptrace.h>
66 #include <linux/slab.h>
67 #include <linux/ctype.h>
68 #include <linux/string.h>
69 #include <linux/timer.h>
70 #include <linux/interrupt.h>
71 #include <linux/in.h>
72 #include <linux/delay.h>
73 #include <linux/bitops.h>
74 #include <linux/io.h>
75 
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_arp.h>
80 #include <linux/ioport.h>
81 #include <linux/crc32.h>
82 #include <linux/random.h>
83 #include <linux/phy.h>
84 
85 #include "et131x.h"
86 
87 MODULE_AUTHOR("Victor Soriano <[email protected]>");
88 MODULE_AUTHOR("Mark Einon <[email protected]>");
89 MODULE_LICENSE("Dual BSD/GPL");
90 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
91 
92 /* EEPROM defines */
93 #define MAX_NUM_REGISTER_POLLS 1000
94 #define MAX_NUM_WRITE_RETRIES 2
95 
96 /* MAC defines */
97 #define COUNTER_WRAP_16_BIT 0x10000
98 #define COUNTER_WRAP_12_BIT 0x1000
99 
100 /* PCI defines */
101 #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */
102 #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */
103 
104 /* ISR defines */
105 /*
106  * For interrupts, normal running is:
107  * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt,
108  * watchdog_interrupt & txdma_xfer_done
109  *
110  * In both cases, when flow control is enabled for either Tx or bi-direction,
111  * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the
112  * buffer rings are running low.
113  */
114 #define INT_MASK_DISABLE 0xffffffff
115 
116 /* NOTE: Masking out MAC_STAT Interrupt for now...
117  * #define INT_MASK_ENABLE 0xfff6bf17
118  * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7
119  */
120 #define INT_MASK_ENABLE 0xfffebf17
121 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
122 
123 /* General defines */
124 /* Packet and header sizes */
125 #define NIC_MIN_PACKET_SIZE 60
126 
127 /* Multicast list size */
128 #define NIC_MAX_MCAST_LIST 128
129 
130 /* Supported Filters */
131 #define ET131X_PACKET_TYPE_DIRECTED 0x0001
132 #define ET131X_PACKET_TYPE_MULTICAST 0x0002
133 #define ET131X_PACKET_TYPE_BROADCAST 0x0004
134 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
135 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
136 
137 /* Tx Timeout */
138 #define ET131X_TX_TIMEOUT (1 * HZ)
139 #define NIC_SEND_HANG_THRESHOLD 0
140 
141 /* MP_TCB flags */
142 #define fMP_DEST_MULTI 0x00000001
143 #define fMP_DEST_BROAD 0x00000002
144 
145 /* MP_ADAPTER flags */
146 #define fMP_ADAPTER_RECV_LOOKASIDE 0x00000004
147 #define fMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
148 
149 /* MP_SHARED flags */
150 #define fMP_ADAPTER_LOWER_POWER 0x00200000
151 
152 #define fMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
153 #define fMP_ADAPTER_HARDWARE_ERROR 0x04000000
154 
155 #define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
156 
157 /* Some offsets in PCI config space that are actually used. */
158 #define ET1310_PCI_MAC_ADDRESS 0xA4
159 #define ET1310_PCI_EEPROM_STATUS 0xB2
160 #define ET1310_PCI_ACK_NACK 0xC0
161 #define ET1310_PCI_REPLAY 0xC2
162 #define ET1310_PCI_L0L1LATENCY 0xCF
163 
164 /* PCI Product IDs */
165 #define ET131X_PCI_DEVICE_ID_GIG 0xED00 /* ET1310 1000 Base-T 8 */
166 #define ET131X_PCI_DEVICE_ID_FAST 0xED01 /* ET1310 100 Base-T */
167 
168 /* Define order of magnitude converter */
169 #define NANO_IN_A_MICRO 1000
170 
171 #define PARM_RX_NUM_BUFS_DEF 4
172 #define PARM_RX_TIME_INT_DEF 10
173 #define PARM_RX_MEM_END_DEF 0x2bc
174 #define PARM_TX_TIME_INT_DEF 40
175 #define PARM_TX_NUM_BUFS_DEF 4
176 #define PARM_DMA_CACHE_DEF 0
177 
178 /* RX defines */
179 #define USE_FBR0 1
180 #define FBR_CHUNKS 32
181 #define MAX_DESC_PER_RING_RX 1024
182 
183 /* number of RFDs - default and min */
184 #ifdef USE_FBR0
185 #define RFD_LOW_WATER_MARK 40
186 #define NIC_DEFAULT_NUM_RFD 1024
187 #define NUM_FBRS 2
188 #else
189 #define RFD_LOW_WATER_MARK 20
190 #define NIC_DEFAULT_NUM_RFD 256
191 #define NUM_FBRS 1
192 #endif
193 
194 #define NIC_MIN_NUM_RFD 64
195 #define NUM_PACKETS_HANDLED 256
196 
197 #define ALCATEL_MULTICAST_PKT 0x01000000
198 #define ALCATEL_BROADCAST_PKT 0x02000000
199 
200 /* typedefs for Free Buffer Descriptors */
201 struct fbr_desc {
204  u32 word2; /* Bits 10-31 reserved, 0-9 descriptor */
205 };
206 
207 /* Packet Status Ring Descriptors
208  *
209  * Word 0:
210  *
211  * top 16 bits are from the Alcatel Status Word as enumerated in
212  * PE-MCXMAC Data Sheet IPD DS54 0210-1 (also IPD-DS80 0205-2)
213  *
214  * 0: hp hash pass
215  * 1: ipa IP checksum assist
216  * 2: ipp IP checksum pass
217  * 3: tcpa TCP checksum assist
218  * 4: tcpp TCP checksum pass
219  * 5: wol WOL Event
220  * 6: rxmac_error RXMAC Error Indicator
221  * 7: drop Drop packet
222  * 8: ft Frame Truncated
223  * 9: jp Jumbo Packet
224  * 10: vp VLAN Packet
225  * 11-15: unused
226  * 16: asw_prev_pkt_dropped e.g. IFG too small on previous
227  * 17: asw_RX_DV_event short receive event detected
228  * 18: asw_false_carrier_event bad carrier since last good packet
229  * 19: asw_code_err one or more nibbles signalled as errors
230  * 20: asw_CRC_err CRC error
231  * 21: asw_len_chk_err frame length field incorrect
232  * 22: asw_too_long frame length > 1518 bytes
233  * 23: asw_OK valid CRC + no code error
234  * 24: asw_multicast has a multicast address
235  * 25: asw_broadcast has a broadcast address
236  * 26: asw_dribble_nibble spurious bits after EOP
237  * 27: asw_control_frame is a control frame
238  * 28: asw_pause_frame is a pause frame
239  * 29: asw_unsupported_op unsupported OP code
240  * 30: asw_VLAN_tag VLAN tag detected
241  * 31: asw_long_evt Rx long event
242  *
243  * Word 1:
244  * 0-15: length length in bytes
245  * 16-25: bi Buffer Index
246  * 26-27: ri Ring Index
247  * 28-31: reserved
248  */
249 
253 };
254 
255 /* Typedefs for the RX DMA status word */
256 
257 /*
258  * rx status word 0 holds part of the status bits of the Rx DMA engine
259  * that get copied out to memory by the ET-1310. Word 0 is a 32 bit word
260  * which contains the Free Buffer ring 0 and 1 available offset.
261  *
262  * bit 0-9 FBR1 offset
263  * bit 10 Wrap flag for FBR1
264  * bit 16-25 FBR0 offset
265  * bit 26 Wrap flag for FBR0
266  */
267 
268 /*
269  * RXSTAT_WORD1_t structure holds part of the status bits of the Rx DMA engine
270  * that get copied out to memory by the ET-1310. Word 3 is a 32 bit word
271  * which contains the Packet Status Ring available offset.
272  *
273  * bit 0-15 reserved
274  * bit 16-27 PSRoffset
275  * bit 28 PSRwrap
276  * bit 29-31 unused
277  */
278 
279 /*
280  * struct rx_status_block is a structure representing the status of the Rx
281  * DMA engine it sits in free memory, and is pointed to by 0x101c / 0x1020
282  */
286 };
287 
288 /*
289  * Structure for look-up table holding free buffer ring pointers, addresses
290  * and state.
291  */
292 struct fbr_lookup {
307 };
308 
309 /*
310  * struct rx_ring is the sructure representing the adaptor's local
311  * reference(s) to the rings
312  *
313  ******************************************************************************
314  * IMPORTANT NOTE :- fbr_lookup *fbr[NUM_FBRS] uses index 0 to refer to FBR1
315  * and index 1 to refer to FRB0
316  ******************************************************************************
317  */
318 struct rx_ring {
324 
327 
328  /* RECV */
331 
333 
335 
336  /* lookaside lists */
338 };
339 
340 /* TX defines */
341 /*
342  * word 2 of the control bits in the Tx Descriptor ring for the ET-1310
343  *
344  * 0-15: length of packet
345  * 16-27: VLAN tag
346  * 28: VLAN CFI
347  * 29-31: VLAN priority
348  *
349  * word 3 of the control bits in the Tx Descriptor ring for the ET-1310
350  *
351  * 0: last packet in the sequence
352  * 1: first packet in the sequence
353  * 2: interrupt the processor when this pkt sent
354  * 3: Control word - no packet data
355  * 4: Issue half-duplex backpressure : XON/XOFF
356  * 5: send pause frame
357  * 6: Tx frame has error
358  * 7: append CRC
359  * 8: MAC override
360  * 9: pad packet
361  * 10: Packet is a Huge packet
362  * 11: append VLAN tag
363  * 12: IP checksum assist
364  * 13: TCP checksum assist
365  * 14: UDP checksum assist
366  */
367 
368 /* struct tx_desc represents each descriptor on the ring */
369 struct tx_desc {
372  u32 len_vlan; /* control words how to xmit the */
373  u32 flags; /* data (detailed above) */
374 };
375 
376 /*
377  * The status of the Tx DMA engine it sits in free memory, and is pointed to
378  * by 0x101c / 0x1020. This is a DMA10 type
379  */
380 
381 /* TCB (Transmit Control Block: Host Side) */
382 struct tcb {
383  struct tcb *next; /* Next entry in ring */
384  u32 flags; /* Our flags for the packet */
385  u32 count; /* Used to spot stuck/lost packets */
386  u32 stale; /* Used to spot stuck/lost packets */
387  struct sk_buff *skb; /* Network skb we are tied to */
388  u32 index; /* Ring indexes */
390 };
391 
392 /* Structure representing our local reference(s) to the ring */
393 struct tx_ring {
394  /* TCB (Transmit Control Block) memory and lists */
395  struct tcb *tcb_ring;
396 
397  /* List of TCBs that are ready to be used */
398  struct tcb *tcb_qhead;
399  struct tcb *tcb_qtail;
400 
401  /* list of TCBs that are currently being sent. NOTE that access to all
402  * three of these (including used) are controlled via the
403  * TCBSendQLock. This lock should be secured prior to incementing /
404  * decrementing used, or any queue manipulation on send_head /
405  * tail
406  */
407  struct tcb *send_head;
408  struct tcb *send_tail;
409  int used;
410 
411  /* The actual descriptor ring */
414 
415  /* send_idx indicates where we last wrote to in the descriptor ring. */
417 
418  /* The location of the write-back status block */
421 
422  /* Packets since the last IRQ: used for interrupt coalescing */
424 };
425 
426 /*
427  * Do not change these values: if changed, then change also in respective
428  * TXdma and Rxdma engines
429  */
430 #define NUM_DESC_PER_RING_TX 512 /* TX Do not change these values */
431 #define NUM_TCB 64
432 
433 /*
434  * These values are all superseded by registry entries to facilitate tuning.
435  * Once the desired performance has been achieved, the optimal registry values
436  * should be re-populated to these #defines:
437  */
438 #define TX_ERROR_PERIOD 1000
439 
440 #define LO_MARK_PERCENT_FOR_PSR 15
441 #define LO_MARK_PERCENT_FOR_RX 15
442 
443 /* RFD (Receive Frame Descriptor) */
444 struct rfd {
446  struct sk_buff *skb;
447  u32 len; /* total size of receive frame */
450 };
451 
452 /* Flow Control */
453 #define FLOW_BOTH 0
454 #define FLOW_TXONLY 1
455 #define FLOW_RXONLY 2
456 #define FLOW_NONE 3
457 
458 /* Struct to define some device statistics */
459 struct ce_stats {
460  /* MIB II variables
461  *
462  * NOTE: atomic_t types are only guaranteed to store 24-bits; if we
463  * MUST have 32, then we'll need another way to perform atomic
464  * operations
465  */
473 
474  /* Tx Statistics. */
476 
483 
484  /* Rx Statistics. */
486 
492 
495 };
496 
497 /* The private adapter structure */
500  struct pci_dev *pdev;
501  struct mii_bus *mii_bus;
504 
505  /* Flags that indicate current state of the adapter */
507 
508  /* local link state, to determine if a state change has occurred */
509  int link;
510 
511  /* Configuration */
516 
517  /* Spinlocks */
519 
523 
527 
529 
530  /* Packet Filter and look ahead size */
532 
533  /* multicast list */
536 
537  /* Pointer to the device's PCI register space */
539 
540  /* Registry parameters */
541  u8 wanted_flow; /* Flow we want for 802.3x flow control */
542  u32 registry_jumbo_packet; /* Max supported ethernet packet size */
543 
544  /* Derived from the registry: */
545  u8 flowcontrol; /* flow control validated by the far-end */
546 
547  /* Minimize init-time */
549 
550  /* variable putting the phy into coma mode when boot up with no cable
551  * plugged in after 5 seconds
552  */
554 
555  /* Next two used to save power information at power down. This
556  * information will be used during power up to set up parts of Power
557  * Management in JAGCore
558  */
561 
562  /* Tx Memory Variables */
563  struct tx_ring tx_ring;
564 
565  /* Rx Memory Variables */
566  struct rx_ring rx_ring;
567 
568  /* Stats */
569  struct ce_stats stats;
570 
572 };
573 
574 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
575 {
576  u32 reg;
577  int i;
578 
579  /*
580  * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and
581  * bits 7,1:0 both equal to 1, at least once after reset.
582  * Subsequent operations need only to check that bits 1:0 are equal
583  * to 1 prior to starting a single byte read/write
584  */
585 
586  for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
587  /* Read registers grouped in DWORD1 */
588  if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg))
589  return -EIO;
590 
591  /* I2C idle and Phy Queue Avail both true */
592  if ((reg & 0x3000) == 0x3000) {
593  if (status)
594  *status = reg;
595  return reg & 0xFF;
596  }
597  }
598  return -ETIMEDOUT;
599 }
600 
601 
610 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
611 {
612  struct pci_dev *pdev = adapter->pdev;
613  int index = 0;
614  int retries;
615  int err = 0;
616  int i2c_wack = 0;
617  int writeok = 0;
618  u32 status;
619  u32 val = 0;
620 
621  /*
622  * For an EEPROM, an I2C single byte write is defined as a START
623  * condition followed by the device address, EEPROM address, one byte
624  * of data and a STOP condition. The STOP condition will trigger the
625  * EEPROM's internally timed write cycle to the nonvolatile memory.
626  * All inputs are disabled during this write cycle and the EEPROM will
627  * not respond to any access until the internal write is complete.
628  */
629 
630  err = eeprom_wait_ready(pdev, NULL);
631  if (err)
632  return err;
633 
634  /*
635  * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0,
636  * and bits 1:0 both =0. Bit 5 should be set according to the
637  * type of EEPROM being accessed (1=two byte addressing, 0=one
638  * byte addressing).
639  */
640  if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
642  return -EIO;
643 
644  i2c_wack = 1;
645 
646  /* Prepare EEPROM address for Step 3 */
647 
648  for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
649  /* Write the address to the LBCIF Address Register */
650  if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
651  break;
652  /*
653  * Write the data to the LBCIF Data Register (the I2C write
654  * will begin).
655  */
656  if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
657  break;
658  /*
659  * Monitor bit 1:0 of the LBCIF Status Register. When bits
660  * 1:0 are both equal to 1, the I2C write has completed and the
661  * internal write cycle of the EEPROM is about to start.
662  * (bits 1:0 = 01 is a legal state while waiting from both
663  * equal to 1, but bits 1:0 = 10 is invalid and implies that
664  * something is broken).
665  */
666  err = eeprom_wait_ready(pdev, &status);
667  if (err < 0)
668  return 0;
669 
670  /*
671  * Check bit 3 of the LBCIF Status Register. If equal to 1,
672  * an error has occurred.Don't break here if we are revision
673  * 1, this is so we do a blind write for load bug.
674  */
675  if ((status & LBCIF_STATUS_GENERAL_ERROR)
676  && adapter->pdev->revision == 0)
677  break;
678 
679  /*
680  * Check bit 2 of the LBCIF Status Register. If equal to 1 an
681  * ACK error has occurred on the address phase of the write.
682  * This could be due to an actual hardware failure or the
683  * EEPROM may still be in its internal write cycle from a
684  * previous write. This write operation was ignored and must be
685  *repeated later.
686  */
687  if (status & LBCIF_STATUS_ACK_ERROR) {
688  /*
689  * This could be due to an actual hardware failure
690  * or the EEPROM may still be in its internal write
691  * cycle from a previous write. This write operation
692  * was ignored and must be repeated later.
693  */
694  udelay(10);
695  continue;
696  }
697 
698  writeok = 1;
699  break;
700  }
701 
702  /*
703  * Set bit 6 of the LBCIF Control Register = 0.
704  */
705  udelay(10);
706 
707  while (i2c_wack) {
708  if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
710  writeok = 0;
711 
712  /* Do read until internal ACK_ERROR goes away meaning write
713  * completed
714  */
715  do {
716  pci_write_config_dword(pdev,
718  addr);
719  do {
720  pci_read_config_dword(pdev,
721  LBCIF_DATA_REGISTER, &val);
722  } while ((val & 0x00010000) == 0);
723  } while (val & 0x00040000);
724 
725  if ((val & 0xFF00) != 0xC000 || index == 10000)
726  break;
727  index++;
728  }
729  return writeok ? 0 : -EIO;
730 }
731 
742 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
743 {
744  struct pci_dev *pdev = adapter->pdev;
745  int err;
746  u32 status;
747 
748  /*
749  * A single byte read is similar to the single byte write, with the
750  * exception of the data flow:
751  */
752 
753  err = eeprom_wait_ready(pdev, NULL);
754  if (err)
755  return err;
756  /*
757  * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0,
758  * and bits 1:0 both =0. Bit 5 should be set according to the type
759  * of EEPROM being accessed (1=two byte addressing, 0=one byte
760  * addressing).
761  */
762  if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
764  return -EIO;
765  /*
766  * Write the address to the LBCIF Address Register (I2C read will
767  * begin).
768  */
769  if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
770  return -EIO;
771  /*
772  * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read
773  * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure
774  * has occurred).
775  */
776  err = eeprom_wait_ready(pdev, &status);
777  if (err < 0)
778  return err;
779  /*
780  * Regardless of error status, read data byte from LBCIF Data
781  * Register.
782  */
783  *pdata = err;
784  /*
785  * Check bit 2 of the LBCIF Status Register. If = 1,
786  * then an error has occurred.
787  */
788  return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
789 }
790 
791 static int et131x_init_eeprom(struct et131x_adapter *adapter)
792 {
793  struct pci_dev *pdev = adapter->pdev;
794  u8 eestatus;
795 
796  /* We first need to check the EEPROM Status code located at offset
797  * 0xB2 of config space
798  */
799  pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS,
800  &eestatus);
801 
802  /* THIS IS A WORKAROUND:
803  * I need to call this function twice to get my card in a
804  * LG M1 Express Dual running. I tried also a msleep before this
805  * function, because I thought there could be some time condidions
806  * but it didn't work. Call the whole function twice also work.
807  */
808  if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
809  dev_err(&pdev->dev,
810  "Could not read PCI config space for EEPROM Status\n");
811  return -EIO;
812  }
813 
814  /* Determine if the error(s) we care about are present. If they are
815  * present we need to fail.
816  */
817  if (eestatus & 0x4C) {
818  int write_failed = 0;
819  if (pdev->revision == 0x01) {
820  int i;
821  static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
822 
823  /* Re-write the first 4 bytes if we have an eeprom
824  * present and the revision id is 1, this fixes the
825  * corruption seen with 1310 B Silicon
826  */
827  for (i = 0; i < 3; i++)
828  if (eeprom_write(adapter, i, eedata[i]) < 0)
829  write_failed = 1;
830  }
831  if (pdev->revision != 0x01 || write_failed) {
832  dev_err(&pdev->dev,
833  "Fatal EEPROM Status Error - 0x%04x\n", eestatus);
834 
835  /* This error could mean that there was an error
836  * reading the eeprom or that the eeprom doesn't exist.
837  * We will treat each case the same and not try to
838  * gather additional information that normally would
839  * come from the eeprom, like MAC Address
840  */
841  adapter->has_eeprom = 0;
842  return -EIO;
843  }
844  }
845  adapter->has_eeprom = 1;
846 
847  /* Read the EEPROM for information regarding LED behavior. Refer to
848  * ET1310_phy.c, et131x_xcvr_init(), for its use.
849  */
850  eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
851  eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
852 
853  if (adapter->eeprom_data[0] != 0xcd)
854  /* Disable all optional features */
855  adapter->eeprom_data[1] = 0x00;
856 
857  return 0;
858 }
859 
864 static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
865 {
866  /* Setup the receive dma configuration register for normal operation */
867  u32 csr = 0x2000; /* FBR1 enable */
868 
869  if (adapter->rx_ring.fbr[0]->buffsize == 4096)
870  csr |= 0x0800;
871  else if (adapter->rx_ring.fbr[0]->buffsize == 8192)
872  csr |= 0x1000;
873  else if (adapter->rx_ring.fbr[0]->buffsize == 16384)
874  csr |= 0x1800;
875 #ifdef USE_FBR0
876  csr |= 0x0400; /* FBR0 enable */
877  if (adapter->rx_ring.fbr[1]->buffsize == 256)
878  csr |= 0x0100;
879  else if (adapter->rx_ring.fbr[1]->buffsize == 512)
880  csr |= 0x0200;
881  else if (adapter->rx_ring.fbr[1]->buffsize == 1024)
882  csr |= 0x0300;
883 #endif
884  writel(csr, &adapter->regs->rxdma.csr);
885 
886  csr = readl(&adapter->regs->rxdma.csr);
887  if ((csr & 0x00020000) != 0) {
888  udelay(5);
889  csr = readl(&adapter->regs->rxdma.csr);
890  if ((csr & 0x00020000) != 0) {
891  dev_err(&adapter->pdev->dev,
892  "RX Dma failed to exit halt state. CSR 0x%08x\n",
893  csr);
894  }
895  }
896 }
897 
902 static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
903 {
904  u32 csr;
905  /* Setup the receive dma configuration register */
906  writel(0x00002001, &adapter->regs->rxdma.csr);
907  csr = readl(&adapter->regs->rxdma.csr);
908  if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */
909  udelay(5);
910  csr = readl(&adapter->regs->rxdma.csr);
911  if ((csr & 0x00020000) == 0)
912  dev_err(&adapter->pdev->dev,
913  "RX Dma failed to enter halt state. CSR 0x%08x\n",
914  csr);
915  }
916 }
917 
924 static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
925 {
926  /* Setup the transmit dma configuration register for normal
927  * operation
928  */
930  &adapter->regs->txdma.csr);
931 }
932 
933 static inline void add_10bit(u32 *v, int n)
934 {
935  *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
936 }
937 
938 static inline void add_12bit(u32 *v, int n)
939 {
940  *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
941 }
942 
947 static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
948 {
949  struct mac_regs __iomem *macregs = &adapter->regs->mac;
950  u32 station1;
951  u32 station2;
952  u32 ipg;
953 
954  /* First we need to reset everything. Write to MAC configuration
955  * register 1 to perform reset.
956  */
957  writel(0xC00F0000, &macregs->cfg1);
958 
959  /* Next lets configure the MAC Inter-packet gap register */
960  ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */
961  ipg |= 0x50 << 8; /* ifg enforce 0x50 */
962  writel(ipg, &macregs->ipg);
963 
964  /* Next lets configure the MAC Half Duplex register */
965  /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */
966  writel(0x00A1F037, &macregs->hfdp);
967 
968  /* Next lets configure the MAC Interface Control register */
969  writel(0, &macregs->if_ctrl);
970 
971  /* Let's move on to setting up the mii management configuration */
972  writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */
973 
974  /* Next lets configure the MAC Station Address register. These
975  * values are read from the EEPROM during initialization and stored
976  * in the adapter structure. We write what is stored in the adapter
977  * structure to the MAC Station Address registers high and low. This
978  * station address is used for generating and checking pause control
979  * packets.
980  */
981  station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
982  (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
983  station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
984  (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
985  (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
986  adapter->addr[2];
987  writel(station1, &macregs->station_addr_1);
988  writel(station2, &macregs->station_addr_2);
989 
990  /* Max ethernet packet in bytes that will be passed by the mac without
991  * being truncated. Allow the MAC to pass 4 more than our max packet
992  * size. This is 4 for the Ethernet CRC.
993  *
994  * Packets larger than (registry_jumbo_packet) that do not contain a
995  * VLAN ID will be dropped by the Rx function.
996  */
997  writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len);
998 
999  /* clear out MAC config reset */
1000  writel(0, &macregs->cfg1);
1001 }
1002 
1007 static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
1008 {
1009  int32_t delay = 0;
1010  struct mac_regs __iomem *mac = &adapter->regs->mac;
1011  struct phy_device *phydev = adapter->phydev;
1012  u32 cfg1;
1013  u32 cfg2;
1014  u32 ifctrl;
1015  u32 ctl;
1016 
1017  ctl = readl(&adapter->regs->txmac.ctl);
1018  cfg1 = readl(&mac->cfg1);
1019  cfg2 = readl(&mac->cfg2);
1020  ifctrl = readl(&mac->if_ctrl);
1021 
1022  /* Set up the if mode bits */
1023  cfg2 &= ~0x300;
1024  if (phydev && phydev->speed == SPEED_1000) {
1025  cfg2 |= 0x200;
1026  /* Phy mode bit */
1027  ifctrl &= ~(1 << 24);
1028  } else {
1029  cfg2 |= 0x100;
1030  ifctrl |= (1 << 24);
1031  }
1032 
1033  /* We need to enable Rx/Tx */
1035  /* Initialize loop back to off */
1036  cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW);
1037  if (adapter->flowcontrol == FLOW_RXONLY ||
1038  adapter->flowcontrol == FLOW_BOTH)
1039  cfg1 |= CFG1_RX_FLOW;
1040  writel(cfg1, &mac->cfg1);
1041 
1042  /* Now we need to initialize the MAC Configuration 2 register */
1043  /* preamble 7, check length, huge frame off, pad crc, crc enable
1044  full duplex off */
1045  cfg2 |= 0x7016;
1046  cfg2 &= ~0x0021;
1047 
1048  /* Turn on duplex if needed */
1049  if (phydev && phydev->duplex == DUPLEX_FULL)
1050  cfg2 |= 0x01;
1051 
1052  ifctrl &= ~(1 << 26);
1053  if (phydev && phydev->duplex == DUPLEX_HALF)
1054  ifctrl |= (1<<26); /* Enable ghd */
1055 
1056  writel(ifctrl, &mac->if_ctrl);
1057  writel(cfg2, &mac->cfg2);
1058 
1059  do {
1060  udelay(10);
1061  delay++;
1062  cfg1 = readl(&mac->cfg1);
1063  } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100);
1064 
1065  if (delay == 100) {
1066  dev_warn(&adapter->pdev->dev,
1067  "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
1068  cfg1);
1069  }
1070 
1071  /* Enable txmac */
1072  ctl |= 0x09; /* TX mac enable, FC disable */
1073  writel(ctl, &adapter->regs->txmac.ctl);
1074 
1075  /* Ready to start the RXDMA/TXDMA engine */
1076  if (adapter->flags & fMP_ADAPTER_LOWER_POWER) {
1077  et131x_rx_dma_enable(adapter);
1078  et131x_tx_dma_enable(adapter);
1079  }
1080 }
1081 
1088 static int et1310_in_phy_coma(struct et131x_adapter *adapter)
1089 {
1090  u32 pmcsr;
1091 
1092  pmcsr = readl(&adapter->regs->global.pm_csr);
1093 
1094  return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
1095 }
1096 
1097 static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
1098 {
1099  struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1100  u32 hash1 = 0;
1101  u32 hash2 = 0;
1102  u32 hash3 = 0;
1103  u32 hash4 = 0;
1104  u32 pm_csr;
1105 
1106  /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
1107  * the multi-cast LIST. If it is NOT specified, (and "ALL" is not
1108  * specified) then we should pass NO multi-cast addresses to the
1109  * driver.
1110  */
1112  int i;
1113 
1114  /* Loop through our multicast array and set up the device */
1115  for (i = 0; i < adapter->multicast_addr_count; i++) {
1116  u32 result;
1117 
1118  result = ether_crc(6, adapter->multicast_list[i]);
1119 
1120  result = (result & 0x3F800000) >> 23;
1121 
1122  if (result < 32) {
1123  hash1 |= (1 << result);
1124  } else if ((31 < result) && (result < 64)) {
1125  result -= 32;
1126  hash2 |= (1 << result);
1127  } else if ((63 < result) && (result < 96)) {
1128  result -= 64;
1129  hash3 |= (1 << result);
1130  } else {
1131  result -= 96;
1132  hash4 |= (1 << result);
1133  }
1134  }
1135  }
1136 
1137  /* Write out the new hash to the device */
1138  pm_csr = readl(&adapter->regs->global.pm_csr);
1139  if (!et1310_in_phy_coma(adapter)) {
1140  writel(hash1, &rxmac->multi_hash1);
1141  writel(hash2, &rxmac->multi_hash2);
1142  writel(hash3, &rxmac->multi_hash3);
1143  writel(hash4, &rxmac->multi_hash4);
1144  }
1145 }
1146 
1147 static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
1148 {
1149  struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1150  u32 uni_pf1;
1151  u32 uni_pf2;
1152  u32 uni_pf3;
1153  u32 pm_csr;
1154 
1155  /* Set up unicast packet filter reg 3 to be the first two octets of
1156  * the MAC address for both address
1157  *
1158  * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the
1159  * MAC address for second address
1160  *
1161  * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the
1162  * MAC address for first address
1163  */
1164  uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) |
1165  (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) |
1166  (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) |
1167  adapter->addr[1];
1168 
1169  uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) |
1170  (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) |
1171  (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) |
1172  adapter->addr[5];
1173 
1174  uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) |
1175  (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) |
1176  (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) |
1177  adapter->addr[5];
1178 
1179  pm_csr = readl(&adapter->regs->global.pm_csr);
1180  if (!et1310_in_phy_coma(adapter)) {
1181  writel(uni_pf1, &rxmac->uni_pf_addr1);
1182  writel(uni_pf2, &rxmac->uni_pf_addr2);
1183  writel(uni_pf3, &rxmac->uni_pf_addr3);
1184  }
1185 }
1186 
1187 static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1188 {
1189  struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1190  struct phy_device *phydev = adapter->phydev;
1191  u32 sa_lo;
1192  u32 sa_hi = 0;
1193  u32 pf_ctrl = 0;
1194 
1195  /* Disable the MAC while it is being configured (also disable WOL) */
1196  writel(0x8, &rxmac->ctrl);
1197 
1198  /* Initialize WOL to disabled. */
1199  writel(0, &rxmac->crc0);
1200  writel(0, &rxmac->crc12);
1201  writel(0, &rxmac->crc34);
1202 
1203  /* We need to set the WOL mask0 - mask4 next. We initialize it to
1204  * its default Values of 0x00000000 because there are not WOL masks
1205  * as of this time.
1206  */
1207  writel(0, &rxmac->mask0_word0);
1208  writel(0, &rxmac->mask0_word1);
1209  writel(0, &rxmac->mask0_word2);
1210  writel(0, &rxmac->mask0_word3);
1211 
1212  writel(0, &rxmac->mask1_word0);
1213  writel(0, &rxmac->mask1_word1);
1214  writel(0, &rxmac->mask1_word2);
1215  writel(0, &rxmac->mask1_word3);
1216 
1217  writel(0, &rxmac->mask2_word0);
1218  writel(0, &rxmac->mask2_word1);
1219  writel(0, &rxmac->mask2_word2);
1220  writel(0, &rxmac->mask2_word3);
1221 
1222  writel(0, &rxmac->mask3_word0);
1223  writel(0, &rxmac->mask3_word1);
1224  writel(0, &rxmac->mask3_word2);
1225  writel(0, &rxmac->mask3_word3);
1226 
1227  writel(0, &rxmac->mask4_word0);
1228  writel(0, &rxmac->mask4_word1);
1229  writel(0, &rxmac->mask4_word2);
1230  writel(0, &rxmac->mask4_word3);
1231 
1232  /* Lets setup the WOL Source Address */
1233  sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) |
1234  (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) |
1235  (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) |
1236  adapter->addr[5];
1237  writel(sa_lo, &rxmac->sa_lo);
1238 
1239  sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) |
1240  adapter->addr[1];
1241  writel(sa_hi, &rxmac->sa_hi);
1242 
1243  /* Disable all Packet Filtering */
1244  writel(0, &rxmac->pf_ctrl);
1245 
1246  /* Let's initialize the Unicast Packet filtering address */
1247  if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1248  et1310_setup_device_for_unicast(adapter);
1249  pf_ctrl |= 4; /* Unicast filter */
1250  } else {
1251  writel(0, &rxmac->uni_pf_addr1);
1252  writel(0, &rxmac->uni_pf_addr2);
1253  writel(0, &rxmac->uni_pf_addr3);
1254  }
1255 
1256  /* Let's initialize the Multicast hash */
1257  if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1258  pf_ctrl |= 2; /* Multicast filter */
1259  et1310_setup_device_for_multicast(adapter);
1260  }
1261 
1262  /* Runt packet filtering. Didn't work in version A silicon. */
1263  pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16;
1264  pf_ctrl |= 8; /* Fragment filter */
1265 
1266  if (adapter->registry_jumbo_packet > 8192)
1267  /* In order to transmit jumbo packets greater than 8k, the
1268  * FIFO between RxMAC and RxDMA needs to be reduced in size
1269  * to (16k - Jumbo packet size). In order to implement this,
1270  * we must use "cut through" mode in the RxMAC, which chops
1271  * packets down into segments which are (max_size * 16). In
1272  * this case we selected 256 bytes, since this is the size of
1273  * the PCI-Express TLP's that the 1310 uses.
1274  *
1275  * seg_en on, fc_en off, size 0x10
1276  */
1277  writel(0x41, &rxmac->mcif_ctrl_max_seg);
1278  else
1279  writel(0, &rxmac->mcif_ctrl_max_seg);
1280 
1281  /* Initialize the MCIF water marks */
1282  writel(0, &rxmac->mcif_water_mark);
1283 
1284  /* Initialize the MIF control */
1285  writel(0, &rxmac->mif_ctrl);
1286 
1287  /* Initialize the Space Available Register */
1288  writel(0, &rxmac->space_avail);
1289 
1290  /* Initialize the the mif_ctrl register
1291  * bit 3: Receive code error. One or more nibbles were signaled as
1292  * errors during the reception of the packet. Clear this
1293  * bit in Gigabit, set it in 100Mbit. This was derived
1294  * experimentally at UNH.
1295  * bit 4: Receive CRC error. The packet's CRC did not match the
1296  * internally generated CRC.
1297  * bit 5: Receive length check error. Indicates that frame length
1298  * field value in the packet does not match the actual data
1299  * byte length and is not a type field.
1300  * bit 16: Receive frame truncated.
1301  * bit 17: Drop packet enable
1302  */
1303  if (phydev && phydev->speed == SPEED_100)
1304  writel(0x30038, &rxmac->mif_ctrl);
1305  else
1306  writel(0x30030, &rxmac->mif_ctrl);
1307 
1308  /* Finally we initialize RxMac to be enabled & WOL disabled. Packet
1309  * filter is always enabled since it is where the runt packets are
1310  * supposed to be dropped. For version A silicon, runt packet
1311  * dropping doesn't work, so it is disabled in the pf_ctrl register,
1312  * but we still leave the packet filter on.
1313  */
1314  writel(pf_ctrl, &rxmac->pf_ctrl);
1315  writel(0x9, &rxmac->ctrl);
1316 }
1317 
1318 static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1319 {
1320  struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1321 
1322  /* We need to update the Control Frame Parameters
1323  * cfpt - control frame pause timer set to 64 (0x40)
1324  * cfep - control frame extended pause timer set to 0x0
1325  */
1326  if (adapter->flowcontrol == FLOW_NONE)
1327  writel(0, &txmac->cf_param);
1328  else
1329  writel(0x40, &txmac->cf_param);
1330 }
1331 
1332 static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1333 {
1334  struct macstat_regs __iomem *macstat =
1335  &adapter->regs->macstat;
1336 
1337  /* Next we need to initialize all the macstat registers to zero on
1338  * the device.
1339  */
1340  writel(0, &macstat->txrx_0_64_byte_frames);
1341  writel(0, &macstat->txrx_65_127_byte_frames);
1342  writel(0, &macstat->txrx_128_255_byte_frames);
1343  writel(0, &macstat->txrx_256_511_byte_frames);
1344  writel(0, &macstat->txrx_512_1023_byte_frames);
1345  writel(0, &macstat->txrx_1024_1518_byte_frames);
1346  writel(0, &macstat->txrx_1519_1522_gvln_frames);
1347 
1348  writel(0, &macstat->rx_bytes);
1349  writel(0, &macstat->rx_packets);
1350  writel(0, &macstat->rx_fcs_errs);
1351  writel(0, &macstat->rx_multicast_packets);
1352  writel(0, &macstat->rx_broadcast_packets);
1353  writel(0, &macstat->rx_control_frames);
1354  writel(0, &macstat->rx_pause_frames);
1355  writel(0, &macstat->rx_unknown_opcodes);
1356  writel(0, &macstat->rx_align_errs);
1357  writel(0, &macstat->rx_frame_len_errs);
1358  writel(0, &macstat->rx_code_errs);
1359  writel(0, &macstat->rx_carrier_sense_errs);
1360  writel(0, &macstat->rx_undersize_packets);
1361  writel(0, &macstat->rx_oversize_packets);
1362  writel(0, &macstat->rx_fragment_packets);
1363  writel(0, &macstat->rx_jabbers);
1364  writel(0, &macstat->rx_drops);
1365 
1366  writel(0, &macstat->tx_bytes);
1367  writel(0, &macstat->tx_packets);
1368  writel(0, &macstat->tx_multicast_packets);
1369  writel(0, &macstat->tx_broadcast_packets);
1370  writel(0, &macstat->tx_pause_frames);
1371  writel(0, &macstat->tx_deferred);
1372  writel(0, &macstat->tx_excessive_deferred);
1373  writel(0, &macstat->tx_single_collisions);
1374  writel(0, &macstat->tx_multiple_collisions);
1375  writel(0, &macstat->tx_late_collisions);
1376  writel(0, &macstat->tx_excessive_collisions);
1377  writel(0, &macstat->tx_total_collisions);
1378  writel(0, &macstat->tx_pause_honored_frames);
1379  writel(0, &macstat->tx_drops);
1380  writel(0, &macstat->tx_jabbers);
1381  writel(0, &macstat->tx_fcs_errs);
1382  writel(0, &macstat->tx_control_frames);
1383  writel(0, &macstat->tx_oversize_frames);
1384  writel(0, &macstat->tx_undersize_frames);
1385  writel(0, &macstat->tx_fragments);
1386  writel(0, &macstat->carry_reg1);
1387  writel(0, &macstat->carry_reg2);
1388 
1389  /* Unmask any counters that we want to track the overflow of.
1390  * Initially this will be all counters. It may become clear later
1391  * that we do not need to track all counters.
1392  */
1393  writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1394  writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1395 }
1396 
1406 static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1407  u8 reg, u16 *value)
1408 {
1409  struct mac_regs __iomem *mac = &adapter->regs->mac;
1410  int status = 0;
1411  u32 delay = 0;
1412  u32 mii_addr;
1413  u32 mii_cmd;
1414  u32 mii_indicator;
1415 
1416  /* Save a local copy of the registers we are dealing with so we can
1417  * set them back
1418  */
1419  mii_addr = readl(&mac->mii_mgmt_addr);
1420  mii_cmd = readl(&mac->mii_mgmt_cmd);
1421 
1422  /* Stop the current operation */
1423  writel(0, &mac->mii_mgmt_cmd);
1424 
1425  /* Set up the register we need to read from on the correct PHY */
1426  writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1427 
1428  writel(0x1, &mac->mii_mgmt_cmd);
1429 
1430  do {
1431  udelay(50);
1432  delay++;
1433  mii_indicator = readl(&mac->mii_mgmt_indicator);
1434  } while ((mii_indicator & MGMT_WAIT) && delay < 50);
1435 
1436  /* If we hit the max delay, we could not read the register */
1437  if (delay == 50) {
1438  dev_warn(&adapter->pdev->dev,
1439  "reg 0x%08x could not be read\n", reg);
1440  dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1441  mii_indicator);
1442 
1443  status = -EIO;
1444  }
1445 
1446  /* If we hit here we were able to read the register and we need to
1447  * return the value to the caller */
1448  *value = readl(&mac->mii_mgmt_stat) & 0xFFFF;
1449 
1450  /* Stop the read operation */
1451  writel(0, &mac->mii_mgmt_cmd);
1452 
1453  /* set the registers we touched back to the state at which we entered
1454  * this function
1455  */
1456  writel(mii_addr, &mac->mii_mgmt_addr);
1457  writel(mii_cmd, &mac->mii_mgmt_cmd);
1458 
1459  return status;
1460 }
1461 
1462 static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1463 {
1464  struct phy_device *phydev = adapter->phydev;
1465 
1466  if (!phydev)
1467  return -EIO;
1468 
1469  return et131x_phy_mii_read(adapter, phydev->addr, reg, value);
1470 }
1471 
1482 static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
1483 {
1484  struct mac_regs __iomem *mac = &adapter->regs->mac;
1485  struct phy_device *phydev = adapter->phydev;
1486  int status = 0;
1487  u8 addr;
1488  u32 delay = 0;
1489  u32 mii_addr;
1490  u32 mii_cmd;
1491  u32 mii_indicator;
1492 
1493  if (!phydev)
1494  return -EIO;
1495 
1496  addr = phydev->addr;
1497 
1498  /* Save a local copy of the registers we are dealing with so we can
1499  * set them back
1500  */
1501  mii_addr = readl(&mac->mii_mgmt_addr);
1502  mii_cmd = readl(&mac->mii_mgmt_cmd);
1503 
1504  /* Stop the current operation */
1505  writel(0, &mac->mii_mgmt_cmd);
1506 
1507  /* Set up the register we need to write to on the correct PHY */
1508  writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1509 
1510  /* Add the value to write to the registers to the mac */
1511  writel(value, &mac->mii_mgmt_ctrl);
1512 
1513  do {
1514  udelay(50);
1515  delay++;
1516  mii_indicator = readl(&mac->mii_mgmt_indicator);
1517  } while ((mii_indicator & MGMT_BUSY) && delay < 100);
1518 
1519  /* If we hit the max delay, we could not write the register */
1520  if (delay == 100) {
1521  u16 tmp;
1522 
1523  dev_warn(&adapter->pdev->dev,
1524  "reg 0x%08x could not be written", reg);
1525  dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1526  mii_indicator);
1527  dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1528  readl(&mac->mii_mgmt_cmd));
1529 
1530  et131x_mii_read(adapter, reg, &tmp);
1531 
1532  status = -EIO;
1533  }
1534  /* Stop the write operation */
1535  writel(0, &mac->mii_mgmt_cmd);
1536 
1537  /*
1538  * set the registers we touched back to the state at which we entered
1539  * this function
1540  */
1541  writel(mii_addr, &mac->mii_mgmt_addr);
1542  writel(mii_cmd, &mac->mii_mgmt_cmd);
1543 
1544  return status;
1545 }
1546 
1547 /* Still used from _mac for BIT_READ */
1548 static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
1549  u16 action, u16 regnum, u16 bitnum,
1550  u8 *value)
1551 {
1552  u16 reg;
1553  u16 mask = 0x0001 << bitnum;
1554 
1555  /* Read the requested register */
1556  et131x_mii_read(adapter, regnum, &reg);
1557 
1558  switch (action) {
1559  case TRUEPHY_BIT_READ:
1560  *value = (reg & mask) >> bitnum;
1561  break;
1562 
1563  case TRUEPHY_BIT_SET:
1564  et131x_mii_write(adapter, regnum, reg | mask);
1565  break;
1566 
1567  case TRUEPHY_BIT_CLEAR:
1568  et131x_mii_write(adapter, regnum, reg & ~mask);
1569  break;
1570 
1571  default:
1572  break;
1573  }
1574 }
1575 
1576 static void et1310_config_flow_control(struct et131x_adapter *adapter)
1577 {
1578  struct phy_device *phydev = adapter->phydev;
1579 
1580  if (phydev->duplex == DUPLEX_HALF) {
1581  adapter->flowcontrol = FLOW_NONE;
1582  } else {
1583  char remote_pause, remote_async_pause;
1584 
1585  et1310_phy_access_mii_bit(adapter,
1586  TRUEPHY_BIT_READ, 5, 10, &remote_pause);
1587  et1310_phy_access_mii_bit(adapter,
1588  TRUEPHY_BIT_READ, 5, 11,
1589  &remote_async_pause);
1590 
1591  if ((remote_pause == TRUEPHY_BIT_SET) &&
1592  (remote_async_pause == TRUEPHY_BIT_SET)) {
1593  adapter->flowcontrol = adapter->wanted_flow;
1594  } else if ((remote_pause == TRUEPHY_BIT_SET) &&
1595  (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1596  if (adapter->wanted_flow == FLOW_BOTH)
1597  adapter->flowcontrol = FLOW_BOTH;
1598  else
1599  adapter->flowcontrol = FLOW_NONE;
1600  } else if ((remote_pause == TRUEPHY_BIT_CLEAR) &&
1601  (remote_async_pause == TRUEPHY_BIT_CLEAR)) {
1602  adapter->flowcontrol = FLOW_NONE;
1603  } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT &&
1604  remote_async_pause == TRUEPHY_SET_BIT) */
1605  if (adapter->wanted_flow == FLOW_BOTH)
1606  adapter->flowcontrol = FLOW_RXONLY;
1607  else
1608  adapter->flowcontrol = FLOW_NONE;
1609  }
1610  }
1611 }
1612 
1617 static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1618 {
1619  struct ce_stats *stats = &adapter->stats;
1620  struct macstat_regs __iomem *macstat =
1621  &adapter->regs->macstat;
1622 
1623  stats->tx_collisions += readl(&macstat->tx_total_collisions);
1624  stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1625  stats->tx_deferred += readl(&macstat->tx_deferred);
1626  stats->tx_excessive_collisions +=
1627  readl(&macstat->tx_multiple_collisions);
1628  stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1629  stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1630  stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1631 
1632  stats->rx_align_errs += readl(&macstat->rx_align_errs);
1633  stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1634  stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1635  stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1636  stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1637  stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1638  stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1639 }
1640 
1649 static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1650 {
1651  u32 carry_reg1;
1652  u32 carry_reg2;
1653 
1654  /* Read the interrupt bits from the register(s). These are Clear On
1655  * Write.
1656  */
1657  carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1658  carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1659 
1660  writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1661  writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1662 
1663  /* We need to do update the host copy of all the MAC_STAT counters.
1664  * For each counter, check it's overflow bit. If the overflow bit is
1665  * set, then increment the host version of the count by one complete
1666  * revolution of the counter. This routine is called when the counter
1667  * block indicates that one of the counters has wrapped.
1668  */
1669  if (carry_reg1 & (1 << 14))
1670  adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1671  if (carry_reg1 & (1 << 8))
1672  adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1673  if (carry_reg1 & (1 << 7))
1674  adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1675  if (carry_reg1 & (1 << 2))
1676  adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1677  if (carry_reg1 & (1 << 6))
1678  adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1679  if (carry_reg1 & (1 << 3))
1680  adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1681  if (carry_reg1 & (1 << 0))
1682  adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1683  if (carry_reg2 & (1 << 16))
1684  adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1685  if (carry_reg2 & (1 << 15))
1686  adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1687  if (carry_reg2 & (1 << 6))
1688  adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1689  if (carry_reg2 & (1 << 8))
1690  adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1691  if (carry_reg2 & (1 << 5))
1692  adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1693  if (carry_reg2 & (1 << 4))
1694  adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1695  if (carry_reg2 & (1 << 2))
1696  adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1697 }
1698 
1699 static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1700 {
1701  struct net_device *netdev = bus->priv;
1702  struct et131x_adapter *adapter = netdev_priv(netdev);
1703  u16 value;
1704  int ret;
1705 
1706  ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1707 
1708  if (ret < 0)
1709  return ret;
1710  else
1711  return value;
1712 }
1713 
1714 static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1715  int reg, u16 value)
1716 {
1717  struct net_device *netdev = bus->priv;
1718  struct et131x_adapter *adapter = netdev_priv(netdev);
1719 
1720  return et131x_mii_write(adapter, reg, value);
1721 }
1722 
1723 static int et131x_mdio_reset(struct mii_bus *bus)
1724 {
1725  struct net_device *netdev = bus->priv;
1726  struct et131x_adapter *adapter = netdev_priv(netdev);
1727 
1728  et131x_mii_write(adapter, MII_BMCR, BMCR_RESET);
1729 
1730  return 0;
1731 }
1732 
1743 static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
1744 {
1745  u16 data;
1746 
1747  et131x_mii_read(adapter, MII_BMCR, &data);
1748  data &= ~BMCR_PDOWN;
1749  if (down)
1750  data |= BMCR_PDOWN;
1751  et131x_mii_write(adapter, MII_BMCR, data);
1752 }
1753 
1759 static void et131x_xcvr_init(struct et131x_adapter *adapter)
1760 {
1761  u16 imr;
1762  u16 isr;
1763  u16 lcr2;
1764 
1765  et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr);
1766  et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr);
1767 
1768  /* Set the link status interrupt only. Bad behavior when link status
1769  * and auto neg are set, we run into a nested interrupt problem
1770  */
1771  imr |= (ET_PHY_INT_MASK_AUTONEGSTAT |
1774 
1775  et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr);
1776 
1777  /* Set the LED behavior such that LED 1 indicates speed (off =
1778  * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
1779  * link and activity (on for link, blink off for activity).
1780  *
1781  * NOTE: Some customizations have been added here for specific
1782  * vendors; The LED behavior is now determined by vendor data in the
1783  * EEPROM. However, the above description is the default.
1784  */
1785  if ((adapter->eeprom_data[1] & 0x4) == 0) {
1786  et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1787 
1790 
1791  if ((adapter->eeprom_data[1] & 0x8) == 0)
1793  else
1794  lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1795 
1796  et131x_mii_write(adapter, PHY_LED_2, lcr2);
1797  }
1798 }
1799 
1806 static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1807 {
1808  struct global_regs __iomem *regs = &adapter->regs->global;
1809 
1810  writel(0, &regs->rxq_start_addr);
1811  writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr);
1812 
1813  if (adapter->registry_jumbo_packet < 2048) {
1814  /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word
1815  * block of RAM that the driver can split between Tx
1816  * and Rx as it desires. Our default is to split it
1817  * 50/50:
1818  */
1821  } else if (adapter->registry_jumbo_packet < 8192) {
1822  /* For jumbo packets > 2k but < 8k, split 50-50. */
1825  } else {
1826  /* 9216 is the only packet size greater than 8k that
1827  * is available. The Tx buffer has to be big enough
1828  * for one whole packet on the Tx side. We'll make
1829  * the Tx 9408, and give the rest to Rx
1830  */
1831  writel(0x01b3, &regs->rxq_end_addr);
1832  writel(0x01b4, &regs->txq_start_addr);
1833  }
1834 
1835  /* Initialize the loopback register. Disable all loopbacks. */
1836  writel(0, &regs->loopback);
1837 
1838  /* MSI Register */
1839  writel(0, &regs->msi_config);
1840 
1841  /* By default, disable the watchdog timer. It will be enabled when
1842  * a packet is queued.
1843  */
1844  writel(0, &regs->watchdog_timer);
1845 }
1846 
1851 static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1852 {
1853  struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1854  struct rx_ring *rx_local = &adapter->rx_ring;
1855  struct fbr_desc *fbr_entry;
1856  u32 entry;
1857  u32 psr_num_des;
1858  unsigned long flags;
1859 
1860  /* Halt RXDMA to perform the reconfigure. */
1861  et131x_rx_dma_disable(adapter);
1862 
1863  /* Load the completion writeback physical address
1864  *
1865  * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
1866  * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
1867  * are ever returned, make sure the high part is retrieved here
1868  * before storing the adjusted address.
1869  */
1870  writel((u32) ((u64)rx_local->rx_status_bus >> 32),
1871  &rx_dma->dma_wb_base_hi);
1872  writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo);
1873 
1874  memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1875 
1876  /* Set the address and parameters of the packet status ring into the
1877  * 1310's registers
1878  */
1879  writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32),
1880  &rx_dma->psr_base_hi);
1881  writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo);
1882  writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des);
1883  writel(0, &rx_dma->psr_full_offset);
1884 
1885  psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
1886  writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1887  &rx_dma->psr_min_des);
1888 
1889  spin_lock_irqsave(&adapter->rcv_lock, flags);
1890 
1891  /* These local variables track the PSR in the adapter structure */
1892  rx_local->local_psr_full = 0;
1893 
1894  /* Now's the best time to initialize FBR1 contents */
1895  fbr_entry = (struct fbr_desc *) rx_local->fbr[0]->ring_virtaddr;
1896  for (entry = 0; entry < rx_local->fbr[0]->num_entries; entry++) {
1897  fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry];
1898  fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry];
1899  fbr_entry->word2 = entry;
1900  fbr_entry++;
1901  }
1902 
1903  /* Set the address and parameters of Free buffer ring 1 (and 0 if
1904  * required) into the 1310's registers
1905  */
1906  writel((u32) (rx_local->fbr[0]->real_physaddr >> 32),
1907  &rx_dma->fbr1_base_hi);
1908  writel((u32) rx_local->fbr[0]->real_physaddr, &rx_dma->fbr1_base_lo);
1909  writel(rx_local->fbr[0]->num_entries - 1, &rx_dma->fbr1_num_des);
1911 
1912  /* This variable tracks the free buffer ring 1 full position, so it
1913  * has to match the above.
1914  */
1915  rx_local->fbr[0]->local_full = ET_DMA10_WRAP;
1916  writel(
1917  ((rx_local->fbr[0]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1918  &rx_dma->fbr1_min_des);
1919 
1920 #ifdef USE_FBR0
1921  /* Now's the best time to initialize FBR0 contents */
1922  fbr_entry = (struct fbr_desc *) rx_local->fbr[1]->ring_virtaddr;
1923  for (entry = 0; entry < rx_local->fbr[1]->num_entries; entry++) {
1924  fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry];
1925  fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry];
1926  fbr_entry->word2 = entry;
1927  fbr_entry++;
1928  }
1929 
1930  writel((u32) (rx_local->fbr[1]->real_physaddr >> 32),
1931  &rx_dma->fbr0_base_hi);
1932  writel((u32) rx_local->fbr[1]->real_physaddr, &rx_dma->fbr0_base_lo);
1933  writel(rx_local->fbr[1]->num_entries - 1, &rx_dma->fbr0_num_des);
1935 
1936  /* This variable tracks the free buffer ring 0 full position, so it
1937  * has to match the above.
1938  */
1939  rx_local->fbr[1]->local_full = ET_DMA10_WRAP;
1940  writel(
1941  ((rx_local->fbr[1]->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1942  &rx_dma->fbr0_min_des);
1943 #endif
1944 
1945  /* Program the number of packets we will receive before generating an
1946  * interrupt.
1947  * For version B silicon, this value gets updated once autoneg is
1948  *complete.
1949  */
1951 
1952  /* The "time_done" is not working correctly to coalesce interrupts
1953  * after a given time period, but rather is giving us an interrupt
1954  * regardless of whether we have received packets.
1955  * This value gets updated once autoneg is complete.
1956  */
1958 
1959  spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1960 }
1961 
1969 static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1970 {
1971  struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1972 
1973  /* Load the hardware with the start of the transmit descriptor ring. */
1974  writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32),
1975  &txdma->pr_base_hi);
1976  writel((u32) adapter->tx_ring.tx_desc_ring_pa,
1977  &txdma->pr_base_lo);
1978 
1979  /* Initialise the transmit DMA engine */
1980  writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1981 
1982  /* Load the completion writeback physical address */
1983  writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32),
1984  &txdma->dma_wb_base_hi);
1985  writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
1986 
1987  *adapter->tx_ring.tx_status = 0;
1988 
1989  writel(0, &txdma->service_request);
1990  adapter->tx_ring.send_idx = 0;
1991 }
1992 
1999 static void et131x_adapter_setup(struct et131x_adapter *adapter)
2000 {
2001  /* Configure the JAGCore */
2002  et131x_configure_global_regs(adapter);
2003 
2004  et1310_config_mac_regs1(adapter);
2005 
2006  /* Configure the MMC registers */
2007  /* All we need to do is initialize the Memory Control Register */
2008  writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
2009 
2010  et1310_config_rxmac_regs(adapter);
2011  et1310_config_txmac_regs(adapter);
2012 
2013  et131x_config_rx_dma_regs(adapter);
2014  et131x_config_tx_dma_regs(adapter);
2015 
2016  et1310_config_macstat_regs(adapter);
2017 
2018  et1310_phy_power_down(adapter, 0);
2019  et131x_xcvr_init(adapter);
2020 }
2021 
2026 static void et131x_soft_reset(struct et131x_adapter *adapter)
2027 {
2028  /* Disable MAC Core */
2029  writel(0xc00f0000, &adapter->regs->mac.cfg1);
2030 
2031  /* Set everything to a reset value */
2032  writel(0x7F, &adapter->regs->global.sw_reset);
2033  writel(0x000f0000, &adapter->regs->mac.cfg1);
2034  writel(0x00000000, &adapter->regs->mac.cfg1);
2035 }
2036 
2044 static void et131x_enable_interrupts(struct et131x_adapter *adapter)
2045 {
2046  u32 mask;
2047 
2048  /* Enable all global interrupts */
2049  if (adapter->flowcontrol == FLOW_TXONLY ||
2050  adapter->flowcontrol == FLOW_BOTH)
2051  mask = INT_MASK_ENABLE;
2052  else
2053  mask = INT_MASK_ENABLE_NO_FLOW;
2054 
2055  writel(mask, &adapter->regs->global.int_mask);
2056 }
2057 
2064 static void et131x_disable_interrupts(struct et131x_adapter *adapter)
2065 {
2066  /* Disable all global interrupts */
2067  writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
2068 }
2069 
2074 static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
2075 {
2076  /* Setup the tramsmit dma configuration register */
2078  &adapter->regs->txdma.csr);
2079 }
2080 
2085 static void et131x_enable_txrx(struct net_device *netdev)
2086 {
2087  struct et131x_adapter *adapter = netdev_priv(netdev);
2088 
2089  /* Enable the Tx and Rx DMA engines (if not already enabled) */
2090  et131x_rx_dma_enable(adapter);
2091  et131x_tx_dma_enable(adapter);
2092 
2093  /* Enable device interrupts */
2094  if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)
2095  et131x_enable_interrupts(adapter);
2096 
2097  /* We're ready to move some data, so start the queue */
2098  netif_start_queue(netdev);
2099 }
2100 
2105 static void et131x_disable_txrx(struct net_device *netdev)
2106 {
2107  struct et131x_adapter *adapter = netdev_priv(netdev);
2108 
2109  /* First thing is to stop the queue */
2110  netif_stop_queue(netdev);
2111 
2112  /* Stop the Tx and Rx DMA engines */
2113  et131x_rx_dma_disable(adapter);
2114  et131x_tx_dma_disable(adapter);
2115 
2116  /* Disable device interrupts */
2117  et131x_disable_interrupts(adapter);
2118 }
2119 
2124 static void et131x_init_send(struct et131x_adapter *adapter)
2125 {
2126  struct tcb *tcb;
2127  u32 ct;
2128  struct tx_ring *tx_ring;
2129 
2130  /* Setup some convenience pointers */
2131  tx_ring = &adapter->tx_ring;
2132  tcb = adapter->tx_ring.tcb_ring;
2133 
2134  tx_ring->tcb_qhead = tcb;
2135 
2136  memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
2137 
2138  /* Go through and set up each TCB */
2139  for (ct = 0; ct++ < NUM_TCB; tcb++)
2140  /* Set the link pointer in HW TCB to the next TCB in the
2141  * chain
2142  */
2143  tcb->next = tcb + 1;
2144 
2145  /* Set the tail pointer */
2146  tcb--;
2147  tx_ring->tcb_qtail = tcb;
2148  tcb->next = NULL;
2149  /* Curr send queue should now be empty */
2150  tx_ring->send_head = NULL;
2151  tx_ring->send_tail = NULL;
2152 }
2153 
2174 static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
2175 {
2176  unsigned long flags;
2177  u32 pmcsr;
2178 
2179  pmcsr = readl(&adapter->regs->global.pm_csr);
2180 
2181  /* Save the GbE PHY speed and duplex modes. Need to restore this
2182  * when cable is plugged back in
2183  */
2184  /*
2185  * TODO - when PM is re-enabled, check if we need to
2186  * perform a similar task as this -
2187  * adapter->pdown_speed = adapter->ai_force_speed;
2188  * adapter->pdown_duplex = adapter->ai_force_duplex;
2189  */
2190 
2191  /* Stop sending packets. */
2192  spin_lock_irqsave(&adapter->send_hw_lock, flags);
2193  adapter->flags |= fMP_ADAPTER_LOWER_POWER;
2194  spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
2195 
2196  /* Wait for outstanding Receive packets */
2197 
2198  et131x_disable_txrx(adapter->netdev);
2199 
2200  /* Gate off JAGCore 3 clock domains */
2201  pmcsr &= ~ET_PMCSR_INIT;
2202  writel(pmcsr, &adapter->regs->global.pm_csr);
2203 
2204  /* Program gigE PHY in to Coma mode */
2205  pmcsr |= ET_PM_PHY_SW_COMA;
2206  writel(pmcsr, &adapter->regs->global.pm_csr);
2207 }
2208 
2213 static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
2214 {
2215  u32 pmcsr;
2216 
2217  pmcsr = readl(&adapter->regs->global.pm_csr);
2218 
2219  /* Disable phy_sw_coma register and re-enable JAGCore clocks */
2220  pmcsr |= ET_PMCSR_INIT;
2221  pmcsr &= ~ET_PM_PHY_SW_COMA;
2222  writel(pmcsr, &adapter->regs->global.pm_csr);
2223 
2224  /* Restore the GbE PHY speed and duplex modes;
2225  * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY
2226  */
2227  /* TODO - when PM is re-enabled, check if we need to
2228  * perform a similar task as this -
2229  * adapter->ai_force_speed = adapter->pdown_speed;
2230  * adapter->ai_force_duplex = adapter->pdown_duplex;
2231  */
2232 
2233  /* Re-initialize the send structures */
2234  et131x_init_send(adapter);
2235 
2236  /* Bring the device back to the state it was during init prior to
2237  * autonegotiation being complete. This way, when we get the auto-neg
2238  * complete interrupt, we can complete init by calling ConfigMacREGS2.
2239  */
2240  et131x_soft_reset(adapter);
2241 
2242  /* setup et1310 as per the documentation ?? */
2243  et131x_adapter_setup(adapter);
2244 
2245  /* Allow Tx to restart */
2246  adapter->flags &= ~fMP_ADAPTER_LOWER_POWER;
2247 
2248  et131x_enable_txrx(adapter->netdev);
2249 }
2250 
2251 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
2252 {
2253  u32 tmp_free_buff_ring = *free_buff_ring;
2254  tmp_free_buff_ring++;
2255  /* This works for all cases where limit < 1024. The 1023 case
2256  works because 1023++ is 1024 which means the if condition is not
2257  taken but the carry of the bit into the wrap bit toggles the wrap
2258  value correctly */
2259  if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
2260  tmp_free_buff_ring &= ~ET_DMA10_MASK;
2261  tmp_free_buff_ring ^= ET_DMA10_WRAP;
2262  }
2263  /* For the 1023 case */
2264  tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP);
2265  *free_buff_ring = tmp_free_buff_ring;
2266  return tmp_free_buff_ring;
2267 }
2268 
2276 static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
2277  u64 *phys_addr, u64 *offset,
2278  u64 mask)
2279 {
2280  u64 new_addr = *phys_addr & ~mask;
2281 
2282  *offset = 0;
2283 
2284  if (new_addr != *phys_addr) {
2285  /* Move to next aligned block */
2286  new_addr += mask + 1;
2287  /* Return offset for adjusting virt addr */
2288  *offset = new_addr - *phys_addr;
2289  /* Return new physical address */
2290  *phys_addr = new_addr;
2291  }
2292 }
2293 
2303 static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
2304 {
2305  u32 i, j;
2306  u32 bufsize;
2307  u32 pktstat_ringsize, fbr_chunksize;
2308  struct rx_ring *rx_ring;
2309 
2310  /* Setup some convenience pointers */
2311  rx_ring = &adapter->rx_ring;
2312 
2313  /* Alloc memory for the lookup table */
2314 #ifdef USE_FBR0
2315  rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2316 #endif
2317  rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL);
2318 
2319  /* The first thing we will do is configure the sizes of the buffer
2320  * rings. These will change based on jumbo packet support. Larger
2321  * jumbo packets increases the size of each entry in FBR0, and the
2322  * number of entries in FBR0, while at the same time decreasing the
2323  * number of entries in FBR1.
2324  *
2325  * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1
2326  * entries are huge in order to accommodate a "jumbo" frame, then it
2327  * will have less entries. Conversely, FBR1 will now be relied upon
2328  * to carry more "normal" frames, thus it's entry size also increases
2329  * and the number of entries goes up too (since it now carries
2330  * "small" + "regular" packets.
2331  *
2332  * In this scheme, we try to maintain 512 entries between the two
2333  * rings. Also, FBR1 remains a constant size - when it's size doubles
2334  * the number of entries halves. FBR0 increases in size, however.
2335  */
2336 
2337  if (adapter->registry_jumbo_packet < 2048) {
2338 #ifdef USE_FBR0
2339  rx_ring->fbr[1]->buffsize = 256;
2340  rx_ring->fbr[1]->num_entries = 512;
2341 #endif
2342  rx_ring->fbr[0]->buffsize = 2048;
2343  rx_ring->fbr[0]->num_entries = 512;
2344  } else if (adapter->registry_jumbo_packet < 4096) {
2345 #ifdef USE_FBR0
2346  rx_ring->fbr[1]->buffsize = 512;
2347  rx_ring->fbr[1]->num_entries = 1024;
2348 #endif
2349  rx_ring->fbr[0]->buffsize = 4096;
2350  rx_ring->fbr[0]->num_entries = 512;
2351  } else {
2352 #ifdef USE_FBR0
2353  rx_ring->fbr[1]->buffsize = 1024;
2354  rx_ring->fbr[1]->num_entries = 768;
2355 #endif
2356  rx_ring->fbr[0]->buffsize = 16384;
2357  rx_ring->fbr[0]->num_entries = 128;
2358  }
2359 
2360 #ifdef USE_FBR0
2361  adapter->rx_ring.psr_num_entries =
2362  adapter->rx_ring.fbr[1]->num_entries +
2363  adapter->rx_ring.fbr[0]->num_entries;
2364 #else
2365  adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr[0]->num_entries;
2366 #endif
2367 
2368  /* Allocate an area of memory for Free Buffer Ring 1 */
2369  bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2370  0xfff;
2371  rx_ring->fbr[0]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2372  bufsize,
2373  &rx_ring->fbr[0]->ring_physaddr,
2374  GFP_KERNEL);
2375  if (!rx_ring->fbr[0]->ring_virtaddr) {
2376  dev_err(&adapter->pdev->dev,
2377  "Cannot alloc memory for Free Buffer Ring 1\n");
2378  return -ENOMEM;
2379  }
2380 
2381  /* Save physical address
2382  *
2383  * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2384  * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2385  * are ever returned, make sure the high part is retrieved here
2386  * before storing the adjusted address.
2387  */
2388  rx_ring->fbr[0]->real_physaddr = rx_ring->fbr[0]->ring_physaddr;
2389 
2390  /* Align Free Buffer Ring 1 on a 4K boundary */
2391  et131x_align_allocated_memory(adapter,
2392  &rx_ring->fbr[0]->real_physaddr,
2393  &rx_ring->fbr[0]->offset, 0x0FFF);
2394 
2395  rx_ring->fbr[0]->ring_virtaddr =
2396  (void *)((u8 *) rx_ring->fbr[0]->ring_virtaddr +
2397  rx_ring->fbr[0]->offset);
2398 
2399 #ifdef USE_FBR0
2400  /* Allocate an area of memory for Free Buffer Ring 0 */
2401  bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2402  0xfff;
2403  rx_ring->fbr[1]->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2404  bufsize,
2405  &rx_ring->fbr[1]->ring_physaddr,
2406  GFP_KERNEL);
2407  if (!rx_ring->fbr[1]->ring_virtaddr) {
2408  dev_err(&adapter->pdev->dev,
2409  "Cannot alloc memory for Free Buffer Ring 0\n");
2410  return -ENOMEM;
2411  }
2412 
2413  /* Save physical address
2414  *
2415  * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
2416  * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2417  * are ever returned, make sure the high part is retrieved here before
2418  * storing the adjusted address.
2419  */
2420  rx_ring->fbr[1]->real_physaddr = rx_ring->fbr[1]->ring_physaddr;
2421 
2422  /* Align Free Buffer Ring 0 on a 4K boundary */
2423  et131x_align_allocated_memory(adapter,
2424  &rx_ring->fbr[1]->real_physaddr,
2425  &rx_ring->fbr[1]->offset, 0x0FFF);
2426 
2427  rx_ring->fbr[1]->ring_virtaddr =
2428  (void *)((u8 *) rx_ring->fbr[1]->ring_virtaddr +
2429  rx_ring->fbr[1]->offset);
2430 #endif
2431  for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
2432  u64 fbr1_tmp_physaddr;
2433  u64 fbr1_offset;
2434  u32 fbr1_align;
2435 
2436  /* This code allocates an area of memory big enough for N
2437  * free buffers + (buffer_size - 1) so that the buffers can
2438  * be aligned on 4k boundaries. If each buffer were aligned
2439  * to a buffer_size boundary, the effect would be to double
2440  * the size of FBR0. By allocating N buffers at once, we
2441  * reduce this overhead.
2442  */
2443  if (rx_ring->fbr[0]->buffsize > 4096)
2444  fbr1_align = 4096;
2445  else
2446  fbr1_align = rx_ring->fbr[0]->buffsize;
2447 
2448  fbr_chunksize =
2449  (FBR_CHUNKS * rx_ring->fbr[0]->buffsize) + fbr1_align - 1;
2450  rx_ring->fbr[0]->mem_virtaddrs[i] =
2451  dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2452  &rx_ring->fbr[0]->mem_physaddrs[i],
2453  GFP_KERNEL);
2454 
2455  if (!rx_ring->fbr[0]->mem_virtaddrs[i]) {
2456  dev_err(&adapter->pdev->dev,
2457  "Could not alloc memory\n");
2458  return -ENOMEM;
2459  }
2460 
2461  /* See NOTE in "Save Physical Address" comment above */
2462  fbr1_tmp_physaddr = rx_ring->fbr[0]->mem_physaddrs[i];
2463 
2464  et131x_align_allocated_memory(adapter,
2465  &fbr1_tmp_physaddr,
2466  &fbr1_offset, (fbr1_align - 1));
2467 
2468  for (j = 0; j < FBR_CHUNKS; j++) {
2469  u32 index = (i * FBR_CHUNKS) + j;
2470 
2471  /* Save the Virtual address of this index for quick
2472  * access later
2473  */
2474  rx_ring->fbr[0]->virt[index] =
2475  (u8 *) rx_ring->fbr[0]->mem_virtaddrs[i] +
2476  (j * rx_ring->fbr[0]->buffsize) + fbr1_offset;
2477 
2478  /* now store the physical address in the descriptor
2479  * so the device can access it
2480  */
2481  rx_ring->fbr[0]->bus_high[index] =
2482  (u32) (fbr1_tmp_physaddr >> 32);
2483  rx_ring->fbr[0]->bus_low[index] =
2484  (u32) fbr1_tmp_physaddr;
2485 
2486  fbr1_tmp_physaddr += rx_ring->fbr[0]->buffsize;
2487 
2488  rx_ring->fbr[0]->buffer1[index] =
2489  rx_ring->fbr[0]->virt[index];
2490  rx_ring->fbr[0]->buffer2[index] =
2491  rx_ring->fbr[0]->virt[index] - 4;
2492  }
2493  }
2494 
2495 #ifdef USE_FBR0
2496  /* Same for FBR0 (if in use) */
2497  for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
2498  u64 fbr0_tmp_physaddr;
2499  u64 fbr0_offset;
2500 
2501  fbr_chunksize =
2502  ((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
2503  rx_ring->fbr[1]->mem_virtaddrs[i] =
2504  dma_alloc_coherent(&adapter->pdev->dev, fbr_chunksize,
2505  &rx_ring->fbr[1]->mem_physaddrs[i],
2506  GFP_KERNEL);
2507 
2508  if (!rx_ring->fbr[1]->mem_virtaddrs[i]) {
2509  dev_err(&adapter->pdev->dev,
2510  "Could not alloc memory\n");
2511  return -ENOMEM;
2512  }
2513 
2514  /* See NOTE in "Save Physical Address" comment above */
2515  fbr0_tmp_physaddr = rx_ring->fbr[1]->mem_physaddrs[i];
2516 
2517  et131x_align_allocated_memory(adapter,
2518  &fbr0_tmp_physaddr,
2519  &fbr0_offset,
2520  rx_ring->fbr[1]->buffsize - 1);
2521 
2522  for (j = 0; j < FBR_CHUNKS; j++) {
2523  u32 index = (i * FBR_CHUNKS) + j;
2524 
2525  rx_ring->fbr[1]->virt[index] =
2526  (u8 *) rx_ring->fbr[1]->mem_virtaddrs[i] +
2527  (j * rx_ring->fbr[1]->buffsize) + fbr0_offset;
2528 
2529  rx_ring->fbr[1]->bus_high[index] =
2530  (u32) (fbr0_tmp_physaddr >> 32);
2531  rx_ring->fbr[1]->bus_low[index] =
2532  (u32) fbr0_tmp_physaddr;
2533 
2534  fbr0_tmp_physaddr += rx_ring->fbr[1]->buffsize;
2535 
2536  rx_ring->fbr[1]->buffer1[index] =
2537  rx_ring->fbr[1]->virt[index];
2538  rx_ring->fbr[1]->buffer2[index] =
2539  rx_ring->fbr[1]->virt[index] - 4;
2540  }
2541  }
2542 #endif
2543 
2544  /* Allocate an area of memory for FIFO of Packet Status ring entries */
2545  pktstat_ringsize =
2546  sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries;
2547 
2548  rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
2549  pktstat_ringsize,
2550  &rx_ring->ps_ring_physaddr,
2551  GFP_KERNEL);
2552 
2553  if (!rx_ring->ps_ring_virtaddr) {
2554  dev_err(&adapter->pdev->dev,
2555  "Cannot alloc memory for Packet Status Ring\n");
2556  return -ENOMEM;
2557  }
2558  pr_info("Packet Status Ring %llx\n",
2559  (unsigned long long) rx_ring->ps_ring_physaddr);
2560 
2561  /*
2562  * NOTE : dma_alloc_coherent(), used above to alloc DMA regions,
2563  * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
2564  * are ever returned, make sure the high part is retrieved here before
2565  * storing the adjusted address.
2566  */
2567 
2568  /* Allocate an area of memory for writeback of status information */
2569  rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
2570  sizeof(struct rx_status_block),
2571  &rx_ring->rx_status_bus,
2572  GFP_KERNEL);
2573  if (!rx_ring->rx_status_block) {
2574  dev_err(&adapter->pdev->dev,
2575  "Cannot alloc memory for Status Block\n");
2576  return -ENOMEM;
2577  }
2578  rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
2579  pr_info("PRS %llx\n", (unsigned long long)rx_ring->rx_status_bus);
2580 
2581  /* Recv
2582  * kmem_cache_create initializes a lookaside list. After successful
2583  * creation, nonpaged fixed-size blocks can be allocated from and
2584  * freed to the lookaside list.
2585  * RFDs will be allocated from this pool.
2586  */
2587  rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name,
2588  sizeof(struct rfd),
2589  0,
2590  SLAB_CACHE_DMA |
2592  NULL);
2593 
2594  adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE;
2595 
2596  /* The RFDs are going to be put on lists later on, so initialize the
2597  * lists now.
2598  */
2599  INIT_LIST_HEAD(&rx_ring->recv_list);
2600  return 0;
2601 }
2602 
2607 static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2608 {
2609  u32 index;
2610  u32 bufsize;
2611  u32 pktstat_ringsize;
2612  struct rfd *rfd;
2613  struct rx_ring *rx_ring;
2614 
2615  /* Setup some convenience pointers */
2616  rx_ring = &adapter->rx_ring;
2617 
2618  /* Free RFDs and associated packet descriptors */
2619  WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2620 
2621  while (!list_empty(&rx_ring->recv_list)) {
2622  rfd = (struct rfd *) list_entry(rx_ring->recv_list.next,
2623  struct rfd, list_node);
2624 
2625  list_del(&rfd->list_node);
2626  rfd->skb = NULL;
2627  kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd);
2628  }
2629 
2630  /* Free Free Buffer Ring 1 */
2631  if (rx_ring->fbr[0]->ring_virtaddr) {
2632  /* First the packet memory */
2633  for (index = 0; index <
2634  (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); index++) {
2635  if (rx_ring->fbr[0]->mem_virtaddrs[index]) {
2636  u32 fbr1_align;
2637 
2638  if (rx_ring->fbr[0]->buffsize > 4096)
2639  fbr1_align = 4096;
2640  else
2641  fbr1_align = rx_ring->fbr[0]->buffsize;
2642 
2643  bufsize =
2644  (rx_ring->fbr[0]->buffsize * FBR_CHUNKS) +
2645  fbr1_align - 1;
2646 
2647  dma_free_coherent(&adapter->pdev->dev,
2648  bufsize,
2649  rx_ring->fbr[0]->mem_virtaddrs[index],
2650  rx_ring->fbr[0]->mem_physaddrs[index]);
2651 
2652  rx_ring->fbr[0]->mem_virtaddrs[index] = NULL;
2653  }
2654  }
2655 
2656  /* Now the FIFO itself */
2657  rx_ring->fbr[0]->ring_virtaddr = (void *)((u8 *)
2658  rx_ring->fbr[0]->ring_virtaddr - rx_ring->fbr[0]->offset);
2659 
2660  bufsize =
2661  (sizeof(struct fbr_desc) * rx_ring->fbr[0]->num_entries) +
2662  0xfff;
2663 
2664  dma_free_coherent(&adapter->pdev->dev, bufsize,
2665  rx_ring->fbr[0]->ring_virtaddr,
2666  rx_ring->fbr[0]->ring_physaddr);
2667 
2668  rx_ring->fbr[0]->ring_virtaddr = NULL;
2669  }
2670 
2671 #ifdef USE_FBR0
2672  /* Now the same for Free Buffer Ring 0 */
2673  if (rx_ring->fbr[1]->ring_virtaddr) {
2674  /* First the packet memory */
2675  for (index = 0; index <
2676  (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); index++) {
2677  if (rx_ring->fbr[1]->mem_virtaddrs[index]) {
2678  bufsize =
2679  (rx_ring->fbr[1]->buffsize *
2680  (FBR_CHUNKS + 1)) - 1;
2681 
2682  dma_free_coherent(&adapter->pdev->dev,
2683  bufsize,
2684  rx_ring->fbr[1]->mem_virtaddrs[index],
2685  rx_ring->fbr[1]->mem_physaddrs[index]);
2686 
2687  rx_ring->fbr[1]->mem_virtaddrs[index] = NULL;
2688  }
2689  }
2690 
2691  /* Now the FIFO itself */
2692  rx_ring->fbr[1]->ring_virtaddr = (void *)((u8 *)
2693  rx_ring->fbr[1]->ring_virtaddr - rx_ring->fbr[1]->offset);
2694 
2695  bufsize =
2696  (sizeof(struct fbr_desc) * rx_ring->fbr[1]->num_entries) +
2697  0xfff;
2698 
2699  dma_free_coherent(&adapter->pdev->dev,
2700  bufsize,
2701  rx_ring->fbr[1]->ring_virtaddr,
2702  rx_ring->fbr[1]->ring_physaddr);
2703 
2704  rx_ring->fbr[1]->ring_virtaddr = NULL;
2705  }
2706 #endif
2707 
2708  /* Free Packet Status Ring */
2709  if (rx_ring->ps_ring_virtaddr) {
2710  pktstat_ringsize =
2711  sizeof(struct pkt_stat_desc) *
2712  adapter->rx_ring.psr_num_entries;
2713 
2714  dma_free_coherent(&adapter->pdev->dev, pktstat_ringsize,
2715  rx_ring->ps_ring_virtaddr,
2716  rx_ring->ps_ring_physaddr);
2717 
2718  rx_ring->ps_ring_virtaddr = NULL;
2719  }
2720 
2721  /* Free area of memory for the writeback of status information */
2722  if (rx_ring->rx_status_block) {
2723  dma_free_coherent(&adapter->pdev->dev,
2724  sizeof(struct rx_status_block),
2725  rx_ring->rx_status_block, rx_ring->rx_status_bus);
2726  rx_ring->rx_status_block = NULL;
2727  }
2728 
2729  /* Destroy the lookaside (RFD) pool */
2730  if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) {
2732  adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
2733  }
2734 
2735  /* Free the FBR Lookup Table */
2736 #ifdef USE_FBR0
2737  kfree(rx_ring->fbr[1]);
2738 #endif
2739 
2740  kfree(rx_ring->fbr[0]);
2741 
2742  /* Reset Counters */
2743  rx_ring->num_ready_recv = 0;
2744 }
2745 
2752 static int et131x_init_recv(struct et131x_adapter *adapter)
2753 {
2754  int status = -ENOMEM;
2755  struct rfd *rfd = NULL;
2756  u32 rfdct;
2757  u32 numrfd = 0;
2758  struct rx_ring *rx_ring;
2759 
2760  /* Setup some convenience pointers */
2761  rx_ring = &adapter->rx_ring;
2762 
2763  /* Setup each RFD */
2764  for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2765  rfd = kmem_cache_alloc(rx_ring->recv_lookaside,
2766  GFP_ATOMIC | GFP_DMA);
2767 
2768  if (!rfd) {
2769  dev_err(&adapter->pdev->dev,
2770  "Couldn't alloc RFD out of kmem_cache\n");
2771  status = -ENOMEM;
2772  continue;
2773  }
2774 
2775  rfd->skb = NULL;
2776 
2777  /* Add this RFD to the recv_list */
2778  list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2779 
2780  /* Increment both the available RFD's, and the total RFD's. */
2781  rx_ring->num_ready_recv++;
2782  numrfd++;
2783  }
2784 
2785  if (numrfd > NIC_MIN_NUM_RFD)
2786  status = 0;
2787 
2788  rx_ring->num_rfd = numrfd;
2789 
2790  if (status != 0) {
2791  kmem_cache_free(rx_ring->recv_lookaside, rfd);
2792  dev_err(&adapter->pdev->dev,
2793  "Allocation problems in et131x_init_recv\n");
2794  }
2795  return status;
2796 }
2797 
2802 static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2803 {
2804  struct phy_device *phydev = adapter->phydev;
2805 
2806  if (!phydev)
2807  return;
2808 
2809  /* For version B silicon, we do not use the RxDMA timer for 10 and 100
2810  * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
2811  */
2812  if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2813  writel(0, &adapter->regs->rxdma.max_pkt_time);
2814  writel(1, &adapter->regs->rxdma.num_pkt_done);
2815  }
2816 }
2817 
2823 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2824 {
2825  struct rx_ring *rx_local = &adapter->rx_ring;
2826  struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2827  u16 buff_index = rfd->bufferindex;
2828  u8 ring_index = rfd->ringindex;
2829  unsigned long flags;
2830 
2831  /* We don't use any of the OOB data besides status. Otherwise, we
2832  * need to clean up OOB data
2833  */
2834  if (
2835 #ifdef USE_FBR0
2836  (ring_index == 0 && buff_index < rx_local->fbr[1]->num_entries) ||
2837 #endif
2838  (ring_index == 1 && buff_index < rx_local->fbr[0]->num_entries)) {
2839  spin_lock_irqsave(&adapter->fbr_lock, flags);
2840 
2841  if (ring_index == 1) {
2842  struct fbr_desc *next = (struct fbr_desc *)
2843  (rx_local->fbr[0]->ring_virtaddr) +
2844  INDEX10(rx_local->fbr[0]->local_full);
2845 
2846  /* Handle the Free Buffer Ring advancement here. Write
2847  * the PA / Buffer Index for the returned buffer into
2848  * the oldest (next to be freed)FBR entry
2849  */
2850  next->addr_hi = rx_local->fbr[0]->bus_high[buff_index];
2851  next->addr_lo = rx_local->fbr[0]->bus_low[buff_index];
2852  next->word2 = buff_index;
2853 
2854  writel(bump_free_buff_ring(
2855  &rx_local->fbr[0]->local_full,
2856  rx_local->fbr[0]->num_entries - 1),
2857  &rx_dma->fbr1_full_offset);
2858  }
2859 #ifdef USE_FBR0
2860  else {
2861  struct fbr_desc *next = (struct fbr_desc *)
2862  rx_local->fbr[1]->ring_virtaddr +
2863  INDEX10(rx_local->fbr[1]->local_full);
2864 
2865  /* Handle the Free Buffer Ring advancement here. Write
2866  * the PA / Buffer Index for the returned buffer into
2867  * the oldest (next to be freed) FBR entry
2868  */
2869  next->addr_hi = rx_local->fbr[1]->bus_high[buff_index];
2870  next->addr_lo = rx_local->fbr[1]->bus_low[buff_index];
2871  next->word2 = buff_index;
2872 
2873  writel(bump_free_buff_ring(
2874  &rx_local->fbr[1]->local_full,
2875  rx_local->fbr[1]->num_entries - 1),
2876  &rx_dma->fbr0_full_offset);
2877  }
2878 #endif
2879  spin_unlock_irqrestore(&adapter->fbr_lock, flags);
2880  } else {
2881  dev_err(&adapter->pdev->dev,
2882  "%s illegal Buffer Index returned\n", __func__);
2883  }
2884 
2885  /* The processing on this RFD is done, so put it back on the tail of
2886  * our list
2887  */
2888  spin_lock_irqsave(&adapter->rcv_lock, flags);
2889  list_add_tail(&rfd->list_node, &rx_local->recv_list);
2890  rx_local->num_ready_recv++;
2891  spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2892 
2893  WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2894 }
2895 
2907 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2908 {
2909  struct rx_ring *rx_local = &adapter->rx_ring;
2910  struct rx_status_block *status;
2911  struct pkt_stat_desc *psr;
2912  struct rfd *rfd;
2913  u32 i;
2914  u8 *buf;
2915  unsigned long flags;
2916  struct list_head *element;
2917  u8 ring_index;
2918  u16 buff_index;
2919  u32 len;
2920  u32 word0;
2921  u32 word1;
2922 
2923  /* RX Status block is written by the DMA engine prior to every
2924  * interrupt. It contains the next to be used entry in the Packet
2925  * Status Ring, and also the two Free Buffer rings.
2926  */
2927  status = rx_local->rx_status_block;
2928  word1 = status->word1 >> 16; /* Get the useful bits */
2929 
2930  /* Check the PSR and wrap bits do not match */
2931  if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2932  /* Looks like this ring is not updated yet */
2933  return NULL;
2934 
2935  /* The packet status ring indicates that data is available. */
2936  psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) +
2937  (rx_local->local_psr_full & 0xFFF);
2938 
2939  /* Grab any information that is required once the PSR is
2940  * advanced, since we can no longer rely on the memory being
2941  * accurate
2942  */
2943  len = psr->word1 & 0xFFFF;
2944  ring_index = (psr->word1 >> 26) & 0x03;
2945  buff_index = (psr->word1 >> 16) & 0x3FF;
2946  word0 = psr->word0;
2947 
2948  /* Indicate that we have used this PSR entry. */
2949  /* FIXME wrap 12 */
2950  add_12bit(&rx_local->local_psr_full, 1);
2951  if (
2952  (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) {
2953  /* Clear psr full and toggle the wrap bit */
2954  rx_local->local_psr_full &= ~0xFFF;
2955  rx_local->local_psr_full ^= 0x1000;
2956  }
2957 
2958  writel(rx_local->local_psr_full,
2959  &adapter->regs->rxdma.psr_full_offset);
2960 
2961 #ifndef USE_FBR0
2962  if (ring_index != 1)
2963  return NULL;
2964 #endif
2965 
2966 #ifdef USE_FBR0
2967  if (ring_index > 1 ||
2968  (ring_index == 0 &&
2969  buff_index > rx_local->fbr[1]->num_entries - 1) ||
2970  (ring_index == 1 &&
2971  buff_index > rx_local->fbr[0]->num_entries - 1)) {
2972 #else
2973  if (ring_index != 1 || buff_index > rx_local->fbr[0]->num_entries - 1) {
2974 #endif
2975  /* Illegal buffer or ring index cannot be used by S/W*/
2976  dev_err(&adapter->pdev->dev,
2977  "NICRxPkts PSR Entry %d indicates "
2978  "length of %d and/or bad bi(%d)\n",
2979  rx_local->local_psr_full & 0xFFF,
2980  len, buff_index);
2981  return NULL;
2982  }
2983 
2984  /* Get and fill the RFD. */
2985  spin_lock_irqsave(&adapter->rcv_lock, flags);
2986 
2987  rfd = NULL;
2988  element = rx_local->recv_list.next;
2989  rfd = (struct rfd *) list_entry(element, struct rfd, list_node);
2990 
2991  if (rfd == NULL) {
2992  spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2993  return NULL;
2994  }
2995 
2996  list_del(&rfd->list_node);
2997  rx_local->num_ready_recv--;
2998 
2999  spin_unlock_irqrestore(&adapter->rcv_lock, flags);
3000 
3001  rfd->bufferindex = buff_index;
3002  rfd->ringindex = ring_index;
3003 
3004  /* In V1 silicon, there is a bug which screws up filtering of
3005  * runt packets. Therefore runt packet filtering is disabled
3006  * in the MAC and the packets are dropped here. They are
3007  * also counted here.
3008  */
3009  if (len < (NIC_MIN_PACKET_SIZE + 4)) {
3010  adapter->stats.rx_other_errs++;
3011  len = 0;
3012  }
3013 
3014  if (len) {
3015  /* Determine if this is a multicast packet coming in */
3016  if ((word0 & ALCATEL_MULTICAST_PKT) &&
3017  !(word0 & ALCATEL_BROADCAST_PKT)) {
3018  /* Promiscuous mode and Multicast mode are
3019  * not mutually exclusive as was first
3020  * thought. I guess Promiscuous is just
3021  * considered a super-set of the other
3022  * filters. Generally filter is 0x2b when in
3023  * promiscuous mode.
3024  */
3025  if ((adapter->packet_filter &
3027  && !(adapter->packet_filter &
3029  && !(adapter->packet_filter &
3031  /*
3032  * Note - ring_index for fbr[] array is reversed
3033  * 1 for FBR0 etc
3034  */
3035  buf = rx_local->fbr[(ring_index == 0 ? 1 : 0)]->
3036  virt[buff_index];
3037 
3038  /* Loop through our list to see if the
3039  * destination address of this packet
3040  * matches one in our list.
3041  */
3042  for (i = 0; i < adapter->multicast_addr_count;
3043  i++) {
3044  if (buf[0] ==
3045  adapter->multicast_list[i][0]
3046  && buf[1] ==
3047  adapter->multicast_list[i][1]
3048  && buf[2] ==
3049  adapter->multicast_list[i][2]
3050  && buf[3] ==
3051  adapter->multicast_list[i][3]
3052  && buf[4] ==
3053  adapter->multicast_list[i][4]
3054  && buf[5] ==
3055  adapter->multicast_list[i][5]) {
3056  break;
3057  }
3058  }
3059 
3060  /* If our index is equal to the number
3061  * of Multicast address we have, then
3062  * this means we did not find this
3063  * packet's matching address in our
3064  * list. Set the len to zero,
3065  * so we free our RFD when we return
3066  * from this function.
3067  */
3068  if (i == adapter->multicast_addr_count)
3069  len = 0;
3070  }
3071 
3072  if (len > 0)
3073  adapter->stats.multicast_pkts_rcvd++;
3074  } else if (word0 & ALCATEL_BROADCAST_PKT)
3075  adapter->stats.broadcast_pkts_rcvd++;
3076  else
3077  /* Not sure what this counter measures in
3078  * promiscuous mode. Perhaps we should check
3079  * the MAC address to see if it is directed
3080  * to us in promiscuous mode.
3081  */
3082  adapter->stats.unicast_pkts_rcvd++;
3083  }
3084 
3085  if (len > 0) {
3086  struct sk_buff *skb = NULL;
3087 
3088  /*rfd->len = len - 4; */
3089  rfd->len = len;
3090 
3091  skb = dev_alloc_skb(rfd->len + 2);
3092  if (!skb) {
3093  dev_err(&adapter->pdev->dev,
3094  "Couldn't alloc an SKB for Rx\n");
3095  return NULL;
3096  }
3097 
3098  adapter->net_stats.rx_bytes += rfd->len;
3099 
3100  /*
3101  * Note - ring_index for fbr[] array is reversed,
3102  * 1 for FBR0 etc
3103  */
3104  memcpy(skb_put(skb, rfd->len),
3105  rx_local->fbr[(ring_index == 0 ? 1 : 0)]->virt[buff_index],
3106  rfd->len);
3107 
3108  skb->dev = adapter->netdev;
3109  skb->protocol = eth_type_trans(skb, adapter->netdev);
3110  skb->ip_summed = CHECKSUM_NONE;
3111 
3112  netif_rx_ni(skb);
3113  } else {
3114  rfd->len = 0;
3115  }
3116 
3117  nic_return_rfd(adapter, rfd);
3118  return rfd;
3119 }
3120 
3127 static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
3128 {
3129  struct rfd *rfd = NULL;
3130  u32 count = 0;
3131  bool done = true;
3132 
3133  /* Process up to available RFD's */
3134  while (count < NUM_PACKETS_HANDLED) {
3135  if (list_empty(&adapter->rx_ring.recv_list)) {
3136  WARN_ON(adapter->rx_ring.num_ready_recv != 0);
3137  done = false;
3138  break;
3139  }
3140 
3141  rfd = nic_rx_pkts(adapter);
3142 
3143  if (rfd == NULL)
3144  break;
3145 
3146  /* Do not receive any packets until a filter has been set.
3147  * Do not receive any packets until we have link.
3148  * If length is zero, return the RFD in order to advance the
3149  * Free buffer ring.
3150  */
3151  if (!adapter->packet_filter ||
3152  !netif_carrier_ok(adapter->netdev) ||
3153  rfd->len == 0)
3154  continue;
3155 
3156  /* Increment the number of packets we received */
3157  adapter->net_stats.rx_packets++;
3158 
3159  /* Set the status on the packet, either resources or success */
3160  if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) {
3161  dev_warn(&adapter->pdev->dev,
3162  "RFD's are running out\n");
3163  }
3164  count++;
3165  }
3166 
3167  if (count == NUM_PACKETS_HANDLED || !done) {
3168  adapter->rx_ring.unfinished_receives = true;
3170  &adapter->regs->global.watchdog_timer);
3171  } else
3172  /* Watchdog timer will disable itself if appropriate. */
3173  adapter->rx_ring.unfinished_receives = false;
3174 }
3175 
3188 static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
3189 {
3190  int desc_size = 0;
3191  struct tx_ring *tx_ring = &adapter->tx_ring;
3192 
3193  /* Allocate memory for the TCB's (Transmit Control Block) */
3194  adapter->tx_ring.tcb_ring =
3195  kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
3196  if (!adapter->tx_ring.tcb_ring) {
3197  dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
3198  return -ENOMEM;
3199  }
3200 
3201  /* Allocate enough memory for the Tx descriptor ring, and allocate
3202  * some extra so that the ring can be aligned on a 4k boundary.
3203  */
3204  desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1;
3205  tx_ring->tx_desc_ring =
3206  (struct tx_desc *) dma_alloc_coherent(&adapter->pdev->dev,
3207  desc_size,
3208  &tx_ring->tx_desc_ring_pa,
3209  GFP_KERNEL);
3210  if (!adapter->tx_ring.tx_desc_ring) {
3211  dev_err(&adapter->pdev->dev,
3212  "Cannot alloc memory for Tx Ring\n");
3213  return -ENOMEM;
3214  }
3215 
3216  /* Save physical address
3217  *
3218  * NOTE: dma_alloc_coherent(), used above to alloc DMA regions,
3219  * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
3220  * are ever returned, make sure the high part is retrieved here before
3221  * storing the adjusted address.
3222  */
3223  /* Allocate memory for the Tx status block */
3224  tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
3225  sizeof(u32),
3226  &tx_ring->tx_status_pa,
3227  GFP_KERNEL);
3228  if (!adapter->tx_ring.tx_status_pa) {
3229  dev_err(&adapter->pdev->dev,
3230  "Cannot alloc memory for Tx status block\n");
3231  return -ENOMEM;
3232  }
3233  return 0;
3234 }
3235 
3242 static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
3243 {
3244  int desc_size = 0;
3245 
3246  if (adapter->tx_ring.tx_desc_ring) {
3247  /* Free memory relating to Tx rings here */
3248  desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX)
3249  + 4096 - 1;
3250  dma_free_coherent(&adapter->pdev->dev,
3251  desc_size,
3252  adapter->tx_ring.tx_desc_ring,
3253  adapter->tx_ring.tx_desc_ring_pa);
3254  adapter->tx_ring.tx_desc_ring = NULL;
3255  }
3256 
3257  /* Free memory for the Tx status block */
3258  if (adapter->tx_ring.tx_status) {
3259  dma_free_coherent(&adapter->pdev->dev,
3260  sizeof(u32),
3261  adapter->tx_ring.tx_status,
3262  adapter->tx_ring.tx_status_pa);
3263 
3264  adapter->tx_ring.tx_status = NULL;
3265  }
3266  /* Free the memory for the tcb structures */
3267  kfree(adapter->tx_ring.tcb_ring);
3268 }
3269 
3277 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
3278 {
3279  u32 i;
3280  struct tx_desc desc[24]; /* 24 x 16 byte */
3281  u32 frag = 0;
3282  u32 thiscopy, remainder;
3283  struct sk_buff *skb = tcb->skb;
3284  u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
3285  struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
3286  unsigned long flags;
3287  struct phy_device *phydev = adapter->phydev;
3288 
3289  /* Part of the optimizations of this send routine restrict us to
3290  * sending 24 fragments at a pass. In practice we should never see
3291  * more than 5 fragments.
3292  *
3293  * NOTE: The older version of this function (below) can handle any
3294  * number of fragments. If needed, we can call this function,
3295  * although it is less efficient.
3296  */
3297  if (nr_frags > 23)
3298  return -EIO;
3299 
3300  memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
3301 
3302  for (i = 0; i < nr_frags; i++) {
3303  /* If there is something in this element, lets get a
3304  * descriptor from the ring and get the necessary data
3305  */
3306  if (i == 0) {
3307  /* If the fragments are smaller than a standard MTU,
3308  * then map them to a single descriptor in the Tx
3309  * Desc ring. However, if they're larger, as is
3310  * possible with support for jumbo packets, then
3311  * split them each across 2 descriptors.
3312  *
3313  * This will work until we determine why the hardware
3314  * doesn't seem to like large fragments.
3315  */
3316  if ((skb->len - skb->data_len) <= 1514) {
3317  desc[frag].addr_hi = 0;
3318  /* Low 16bits are length, high is vlan and
3319  unused currently so zero */
3320  desc[frag].len_vlan =
3321  skb->len - skb->data_len;
3322 
3323  /* NOTE: Here, the dma_addr_t returned from
3324  * dma_map_single() is implicitly cast as a
3325  * u32. Although dma_addr_t can be
3326  * 64-bit, the address returned by
3327  * dma_map_single() is always 32-bit
3328  * addressable (as defined by the pci/dma
3329  * subsystem)
3330  */
3331  desc[frag++].addr_lo =
3332  dma_map_single(&adapter->pdev->dev,
3333  skb->data,
3334  skb->len -
3335  skb->data_len,
3336  DMA_TO_DEVICE);
3337  } else {
3338  desc[frag].addr_hi = 0;
3339  desc[frag].len_vlan =
3340  (skb->len - skb->data_len) / 2;
3341 
3342  /* NOTE: Here, the dma_addr_t returned from
3343  * dma_map_single() is implicitly cast as a
3344  * u32. Although dma_addr_t can be
3345  * 64-bit, the address returned by
3346  * dma_map_single() is always 32-bit
3347  * addressable (as defined by the pci/dma
3348  * subsystem)
3349  */
3350  desc[frag++].addr_lo =
3351  dma_map_single(&adapter->pdev->dev,
3352  skb->data,
3353  ((skb->len -
3354  skb->data_len) / 2),
3355  DMA_TO_DEVICE);
3356  desc[frag].addr_hi = 0;
3357 
3358  desc[frag].len_vlan =
3359  (skb->len - skb->data_len) / 2;
3360 
3361  /* NOTE: Here, the dma_addr_t returned from
3362  * dma_map_single() is implicitly cast as a
3363  * u32. Although dma_addr_t can be
3364  * 64-bit, the address returned by
3365  * dma_map_single() is always 32-bit
3366  * addressable (as defined by the pci/dma
3367  * subsystem)
3368  */
3369  desc[frag++].addr_lo =
3370  dma_map_single(&adapter->pdev->dev,
3371  skb->data +
3372  ((skb->len -
3373  skb->data_len) / 2),
3374  ((skb->len -
3375  skb->data_len) / 2),
3376  DMA_TO_DEVICE);
3377  }
3378  } else {
3379  desc[frag].addr_hi = 0;
3380  desc[frag].len_vlan =
3381  frags[i - 1].size;
3382 
3383  /* NOTE: Here, the dma_addr_t returned from
3384  * dma_map_page() is implicitly cast as a u32.
3385  * Although dma_addr_t can be 64-bit, the address
3386  * returned by dma_map_page() is always 32-bit
3387  * addressable (as defined by the pci/dma subsystem)
3388  */
3389  desc[frag++].addr_lo = skb_frag_dma_map(
3390  &adapter->pdev->dev,
3391  &frags[i - 1],
3392  0,
3393  frags[i - 1].size,
3394  DMA_TO_DEVICE);
3395  }
3396  }
3397 
3398  if (phydev && phydev->speed == SPEED_1000) {
3399  if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) {
3400  /* Last element & Interrupt flag */
3401  desc[frag - 1].flags = 0x5;
3402  adapter->tx_ring.since_irq = 0;
3403  } else { /* Last element */
3404  desc[frag - 1].flags = 0x1;
3405  }
3406  } else
3407  desc[frag - 1].flags = 0x5;
3408 
3409  desc[0].flags |= 2; /* First element flag */
3410 
3411  tcb->index_start = adapter->tx_ring.send_idx;
3412  tcb->stale = 0;
3413 
3414  spin_lock_irqsave(&adapter->send_hw_lock, flags);
3415 
3416  thiscopy = NUM_DESC_PER_RING_TX -
3417  INDEX10(adapter->tx_ring.send_idx);
3418 
3419  if (thiscopy >= frag) {
3420  remainder = 0;
3421  thiscopy = frag;
3422  } else {
3423  remainder = frag - thiscopy;
3424  }
3425 
3426  memcpy(adapter->tx_ring.tx_desc_ring +
3427  INDEX10(adapter->tx_ring.send_idx), desc,
3428  sizeof(struct tx_desc) * thiscopy);
3429 
3430  add_10bit(&adapter->tx_ring.send_idx, thiscopy);
3431 
3432  if (INDEX10(adapter->tx_ring.send_idx) == 0 ||
3433  INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
3434  adapter->tx_ring.send_idx &= ~ET_DMA10_MASK;
3435  adapter->tx_ring.send_idx ^= ET_DMA10_WRAP;
3436  }
3437 
3438  if (remainder) {
3439  memcpy(adapter->tx_ring.tx_desc_ring,
3440  desc + thiscopy,
3441  sizeof(struct tx_desc) * remainder);
3442 
3443  add_10bit(&adapter->tx_ring.send_idx, remainder);
3444  }
3445 
3446  if (INDEX10(adapter->tx_ring.send_idx) == 0) {
3447  if (adapter->tx_ring.send_idx)
3448  tcb->index = NUM_DESC_PER_RING_TX - 1;
3449  else
3451  } else
3452  tcb->index = adapter->tx_ring.send_idx - 1;
3453 
3454  spin_lock(&adapter->tcb_send_qlock);
3455 
3456  if (adapter->tx_ring.send_tail)
3457  adapter->tx_ring.send_tail->next = tcb;
3458  else
3459  adapter->tx_ring.send_head = tcb;
3460 
3461  adapter->tx_ring.send_tail = tcb;
3462 
3463  WARN_ON(tcb->next != NULL);
3464 
3465  adapter->tx_ring.used++;
3466 
3467  spin_unlock(&adapter->tcb_send_qlock);
3468 
3469  /* Write the new write pointer back to the device. */
3470  writel(adapter->tx_ring.send_idx,
3471  &adapter->regs->txdma.service_request);
3472 
3473  /* For Gig only, we use Tx Interrupt coalescing. Enable the software
3474  * timer to wake us up if this packet isn't followed by N more.
3475  */
3476  if (phydev && phydev->speed == SPEED_1000) {
3478  &adapter->regs->global.watchdog_timer);
3479  }
3480  spin_unlock_irqrestore(&adapter->send_hw_lock, flags);
3481 
3482  return 0;
3483 }
3484 
3494 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
3495 {
3496  int status;
3497  struct tcb *tcb = NULL;
3498  u16 *shbufva;
3499  unsigned long flags;
3500 
3501  /* All packets must have at least a MAC address and a protocol type */
3502  if (skb->len < ETH_HLEN)
3503  return -EIO;
3504 
3505  /* Get a TCB for this packet */
3506  spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3507 
3508  tcb = adapter->tx_ring.tcb_qhead;
3509 
3510  if (tcb == NULL) {
3511  spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3512  return -ENOMEM;
3513  }
3514 
3515  adapter->tx_ring.tcb_qhead = tcb->next;
3516 
3517  if (adapter->tx_ring.tcb_qhead == NULL)
3518  adapter->tx_ring.tcb_qtail = NULL;
3519 
3520  spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3521 
3522  tcb->skb = skb;
3523 
3524  if (skb->data != NULL && skb->len - skb->data_len >= 6) {
3525  shbufva = (u16 *) skb->data;
3526 
3527  if ((shbufva[0] == 0xffff) &&
3528  (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
3529  tcb->flags |= fMP_DEST_BROAD;
3530  } else if ((shbufva[0] & 0x3) == 0x0001) {
3531  tcb->flags |= fMP_DEST_MULTI;
3532  }
3533  }
3534 
3535  tcb->next = NULL;
3536 
3537  /* Call the NIC specific send handler. */
3538  status = nic_send_packet(adapter, tcb);
3539 
3540  if (status != 0) {
3541  spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3542 
3543  if (adapter->tx_ring.tcb_qtail)
3544  adapter->tx_ring.tcb_qtail->next = tcb;
3545  else
3546  /* Apparently ready Q is empty. */
3547  adapter->tx_ring.tcb_qhead = tcb;
3548 
3549  adapter->tx_ring.tcb_qtail = tcb;
3550  spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3551  return status;
3552  }
3553  WARN_ON(adapter->tx_ring.used > NUM_TCB);
3554  return 0;
3555 }
3556 
3564 static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
3565 {
3566  int status = 0;
3567  struct et131x_adapter *adapter = netdev_priv(netdev);
3568 
3569  /* Send these packets
3570  *
3571  * NOTE: The Linux Tx entry point is only given one packet at a time
3572  * to Tx, so the PacketCount and it's array used makes no sense here
3573  */
3574 
3575  /* TCB is not available */
3576  if (adapter->tx_ring.used >= NUM_TCB) {
3577  /* NOTE: If there's an error on send, no need to queue the
3578  * packet under Linux; if we just send an error up to the
3579  * netif layer, it will resend the skb to us.
3580  */
3581  status = -ENOMEM;
3582  } else {
3583  /* We need to see if the link is up; if it's not, make the
3584  * netif layer think we're good and drop the packet
3585  */
3586  if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) ||
3587  !netif_carrier_ok(netdev)) {
3588  dev_kfree_skb_any(skb);
3589  skb = NULL;
3590 
3591  adapter->net_stats.tx_dropped++;
3592  } else {
3593  status = send_packet(skb, adapter);
3594  if (status != 0 && status != -ENOMEM) {
3595  /* On any other error, make netif think we're
3596  * OK and drop the packet
3597  */
3598  dev_kfree_skb_any(skb);
3599  skb = NULL;
3600  adapter->net_stats.tx_dropped++;
3601  }
3602  }
3603  }
3604  return status;
3605 }
3606 
3615 static inline void free_send_packet(struct et131x_adapter *adapter,
3616  struct tcb *tcb)
3617 {
3618  unsigned long flags;
3619  struct tx_desc *desc = NULL;
3620  struct net_device_stats *stats = &adapter->net_stats;
3621 
3622  if (tcb->flags & fMP_DEST_BROAD)
3623  atomic_inc(&adapter->stats.broadcast_pkts_xmtd);
3624  else if (tcb->flags & fMP_DEST_MULTI)
3625  atomic_inc(&adapter->stats.multicast_pkts_xmtd);
3626  else
3627  atomic_inc(&adapter->stats.unicast_pkts_xmtd);
3628 
3629  if (tcb->skb) {
3630  stats->tx_bytes += tcb->skb->len;
3631 
3632  /* Iterate through the TX descriptors on the ring
3633  * corresponding to this packet and umap the fragments
3634  * they point to
3635  */
3636  do {
3637  desc = (struct tx_desc *)
3638  (adapter->tx_ring.tx_desc_ring +
3639  INDEX10(tcb->index_start));
3640 
3641  dma_unmap_single(&adapter->pdev->dev,
3642  desc->addr_lo,
3643  desc->len_vlan, DMA_TO_DEVICE);
3644 
3645  add_10bit(&tcb->index_start, 1);
3646  if (INDEX10(tcb->index_start) >=
3648  tcb->index_start &= ~ET_DMA10_MASK;
3649  tcb->index_start ^= ET_DMA10_WRAP;
3650  }
3651  } while (desc != (adapter->tx_ring.tx_desc_ring +
3652  INDEX10(tcb->index)));
3653 
3654  dev_kfree_skb_any(tcb->skb);
3655  }
3656 
3657  memset(tcb, 0, sizeof(struct tcb));
3658 
3659  /* Add the TCB to the Ready Q */
3660  spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
3661 
3662  adapter->net_stats.tx_packets++;
3663 
3664  if (adapter->tx_ring.tcb_qtail)
3665  adapter->tx_ring.tcb_qtail->next = tcb;
3666  else
3667  /* Apparently ready Q is empty. */
3668  adapter->tx_ring.tcb_qhead = tcb;
3669 
3670  adapter->tx_ring.tcb_qtail = tcb;
3671 
3672  spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
3673  WARN_ON(adapter->tx_ring.used < 0);
3674 }
3675 
3682 static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
3683 {
3684  struct tcb *tcb;
3685  unsigned long flags;
3686  u32 freed = 0;
3687 
3688  /* Any packets being sent? Check the first TCB on the send list */
3689  spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3690 
3691  tcb = adapter->tx_ring.send_head;
3692 
3693  while (tcb != NULL && freed < NUM_TCB) {
3694  struct tcb *next = tcb->next;
3695 
3696  adapter->tx_ring.send_head = next;
3697 
3698  if (next == NULL)
3699  adapter->tx_ring.send_tail = NULL;
3700 
3701  adapter->tx_ring.used--;
3702 
3703  spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3704 
3705  freed++;
3706  free_send_packet(adapter, tcb);
3707 
3708  spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3709 
3710  tcb = adapter->tx_ring.send_head;
3711  }
3712 
3713  WARN_ON(freed == NUM_TCB);
3714 
3715  spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3716 
3717  adapter->tx_ring.used = 0;
3718 }
3719 
3729 static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
3730 {
3731  unsigned long flags;
3732  u32 serviced;
3733  struct tcb *tcb;
3734  u32 index;
3735 
3736  serviced = readl(&adapter->regs->txdma.new_service_complete);
3737  index = INDEX10(serviced);
3738 
3739  /* Has the ring wrapped? Process any descriptors that do not have
3740  * the same "wrap" indicator as the current completion indicator
3741  */
3742  spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3743 
3744  tcb = adapter->tx_ring.send_head;
3745 
3746  while (tcb &&
3747  ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
3748  index < INDEX10(tcb->index)) {
3749  adapter->tx_ring.used--;
3750  adapter->tx_ring.send_head = tcb->next;
3751  if (tcb->next == NULL)
3752  adapter->tx_ring.send_tail = NULL;
3753 
3754  spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3755  free_send_packet(adapter, tcb);
3756  spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3757 
3758  /* Goto the next packet */
3759  tcb = adapter->tx_ring.send_head;
3760  }
3761  while (tcb &&
3762  !((serviced ^ tcb->index) & ET_DMA10_WRAP)
3763  && index > (tcb->index & ET_DMA10_MASK)) {
3764  adapter->tx_ring.used--;
3765  adapter->tx_ring.send_head = tcb->next;
3766  if (tcb->next == NULL)
3767  adapter->tx_ring.send_tail = NULL;
3768 
3769  spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3770  free_send_packet(adapter, tcb);
3771  spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3772 
3773  /* Goto the next packet */
3774  tcb = adapter->tx_ring.send_head;
3775  }
3776 
3777  /* Wake up the queue when we hit a low-water mark */
3778  if (adapter->tx_ring.used <= NUM_TCB / 3)
3779  netif_wake_queue(adapter->netdev);
3780 
3781  spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3782 }
3783 
3784 static int et131x_get_settings(struct net_device *netdev,
3785  struct ethtool_cmd *cmd)
3786 {
3787  struct et131x_adapter *adapter = netdev_priv(netdev);
3788 
3789  return phy_ethtool_gset(adapter->phydev, cmd);
3790 }
3791 
3792 static int et131x_set_settings(struct net_device *netdev,
3793  struct ethtool_cmd *cmd)
3794 {
3795  struct et131x_adapter *adapter = netdev_priv(netdev);
3796 
3797  return phy_ethtool_sset(adapter->phydev, cmd);
3798 }
3799 
3800 static int et131x_get_regs_len(struct net_device *netdev)
3801 {
3802 #define ET131X_REGS_LEN 256
3803  return ET131X_REGS_LEN * sizeof(u32);
3804 }
3805 
3806 static void et131x_get_regs(struct net_device *netdev,
3807  struct ethtool_regs *regs, void *regs_data)
3808 {
3809  struct et131x_adapter *adapter = netdev_priv(netdev);
3810  struct address_map __iomem *aregs = adapter->regs;
3811  u32 *regs_buff = regs_data;
3812  u32 num = 0;
3813 
3814  memset(regs_data, 0, et131x_get_regs_len(netdev));
3815 
3816  regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
3817  adapter->pdev->device;
3818 
3819  /* PHY regs */
3820  et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]);
3821  et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]);
3822  et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]);
3823  et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]);
3824  et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]);
3825  et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]);
3826  et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]);
3827  /* Autoneg next page transmit reg */
3828  et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]);
3829  /* Link partner next page reg */
3830  et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]);
3831  et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]);
3832  et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]);
3833  et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]);
3834  et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]);
3835  et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]);
3836  et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3837  (u16 *)&regs_buff[num++]);
3838  et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL,
3839  (u16 *)&regs_buff[num++]);
3840  et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1,
3841  (u16 *)&regs_buff[num++]);
3842  et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL,
3843  (u16 *)&regs_buff[num++]);
3844  et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]);
3845  et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]);
3846  et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]);
3847  et131x_mii_read(adapter, PHY_INTERRUPT_STATUS,
3848  (u16 *)&regs_buff[num++]);
3849  et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]);
3850  et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]);
3851  et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]);
3852 
3853  /* Global regs */
3854  regs_buff[num++] = readl(&aregs->global.txq_start_addr);
3855  regs_buff[num++] = readl(&aregs->global.txq_end_addr);
3856  regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
3857  regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
3858  regs_buff[num++] = readl(&aregs->global.pm_csr);
3859  regs_buff[num++] = adapter->stats.interrupt_status;
3860  regs_buff[num++] = readl(&aregs->global.int_mask);
3861  regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
3862  regs_buff[num++] = readl(&aregs->global.int_status_alias);
3863  regs_buff[num++] = readl(&aregs->global.sw_reset);
3864  regs_buff[num++] = readl(&aregs->global.slv_timer);
3865  regs_buff[num++] = readl(&aregs->global.msi_config);
3866  regs_buff[num++] = readl(&aregs->global.loopback);
3867  regs_buff[num++] = readl(&aregs->global.watchdog_timer);
3868 
3869  /* TXDMA regs */
3870  regs_buff[num++] = readl(&aregs->txdma.csr);
3871  regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
3872  regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
3873  regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
3874  regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
3875  regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
3876  regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
3877  regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
3878  regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
3879  regs_buff[num++] = readl(&aregs->txdma.service_request);
3880  regs_buff[num++] = readl(&aregs->txdma.service_complete);
3881  regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
3882  regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
3883  regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
3884  regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
3885  regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
3886  regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
3887  regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
3888  regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
3889  regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
3890  regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
3891  regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
3892  regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
3893  regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
3894  regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
3895  regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
3896 
3897  /* RXDMA regs */
3898  regs_buff[num++] = readl(&aregs->rxdma.csr);
3899  regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
3900  regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
3901  regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
3902  regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
3903  regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
3904  regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
3905  regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
3906  regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
3907  regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
3908  regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
3909  regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
3910  regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
3911  regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
3912  regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
3913  regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
3914  regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
3915  regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
3916  regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
3917  regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
3918  regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
3919  regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
3920  regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
3921  regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
3922  regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
3923  regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
3924  regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
3925  regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
3926  regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
3927 }
3928 
3929 #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */
3930 static void et131x_get_drvinfo(struct net_device *netdev,
3931  struct ethtool_drvinfo *info)
3932 {
3933  struct et131x_adapter *adapter = netdev_priv(netdev);
3934 
3937  strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN);
3938 }
3939 
3940 static struct ethtool_ops et131x_ethtool_ops = {
3941  .get_settings = et131x_get_settings,
3942  .set_settings = et131x_set_settings,
3943  .get_drvinfo = et131x_get_drvinfo,
3944  .get_regs_len = et131x_get_regs_len,
3945  .get_regs = et131x_get_regs,
3946  .get_link = ethtool_op_get_link,
3947 };
3952 static void et131x_hwaddr_init(struct et131x_adapter *adapter)
3953 {
3954  /* If have our default mac from init and no mac address from
3955  * EEPROM then we need to generate the last octet and set it on the
3956  * device
3957  */
3958  if (is_zero_ether_addr(adapter->rom_addr)) {
3959  /*
3960  * We need to randomly generate the last octet so we
3961  * decrease our chances of setting the mac address to
3962  * same as another one of our cards in the system
3963  */
3964  get_random_bytes(&adapter->addr[5], 1);
3965  /*
3966  * We have the default value in the register we are
3967  * working with so we need to copy the current
3968  * address into the permanent address
3969  */
3970  memcpy(adapter->rom_addr,
3971  adapter->addr, ETH_ALEN);
3972  } else {
3973  /* We do not have an override address, so set the
3974  * current address to the permanent address and add
3975  * it to the device
3976  */
3977  memcpy(adapter->addr,
3978  adapter->rom_addr, ETH_ALEN);
3979  }
3980 }
3981 
3990 static int et131x_pci_init(struct et131x_adapter *adapter,
3991  struct pci_dev *pdev)
3992 {
3993  u16 max_payload;
3994  int i, rc;
3995 
3996  rc = et131x_init_eeprom(adapter);
3997  if (rc < 0)
3998  goto out;
3999 
4000  if (!pci_is_pcie(pdev)) {
4001  dev_err(&pdev->dev, "Missing PCIe capabilities\n");
4002  goto err_out;
4003  }
4004 
4005  /* Let's set up the PORT LOGIC Register. First we need to know what
4006  * the max_payload_size is
4007  */
4008  if (pcie_capability_read_word(pdev, PCI_EXP_DEVCAP, &max_payload)) {
4009  dev_err(&pdev->dev,
4010  "Could not read PCI config space for Max Payload Size\n");
4011  goto err_out;
4012  }
4013 
4014  /* Program the Ack/Nak latency and replay timers */
4015  max_payload &= 0x07;
4016 
4017  if (max_payload < 2) {
4018  static const u16 acknak[2] = { 0x76, 0xD0 };
4019  static const u16 replay[2] = { 0x1E0, 0x2ED };
4020 
4021  if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
4022  acknak[max_payload])) {
4023  dev_err(&pdev->dev,
4024  "Could not write PCI config space for ACK/NAK\n");
4025  goto err_out;
4026  }
4027  if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
4028  replay[max_payload])) {
4029  dev_err(&pdev->dev,
4030  "Could not write PCI config space for Replay Timer\n");
4031  goto err_out;
4032  }
4033  }
4034 
4035  /* l0s and l1 latency timers. We are using default values.
4036  * Representing 001 for L0s and 010 for L1
4037  */
4038  if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
4039  dev_err(&pdev->dev,
4040  "Could not write PCI config space for Latency Timers\n");
4041  goto err_out;
4042  }
4043 
4044  /* Change the max read size to 2k */
4046  PCI_EXP_DEVCTL_READRQ, 0x4 << 12)) {
4047  dev_err(&pdev->dev,
4048  "Couldn't change PCI config space for Max read size\n");
4049  goto err_out;
4050  }
4051 
4052  /* Get MAC address from config space if an eeprom exists, otherwise
4053  * the MAC address there will not be valid
4054  */
4055  if (!adapter->has_eeprom) {
4056  et131x_hwaddr_init(adapter);
4057  return 0;
4058  }
4059 
4060  for (i = 0; i < ETH_ALEN; i++) {
4061  if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
4062  adapter->rom_addr + i)) {
4063  dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
4064  goto err_out;
4065  }
4066  }
4067  memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
4068 out:
4069  return rc;
4070 err_out:
4071  rc = -EIO;
4072  goto out;
4073 }
4074 
4082 static void et131x_error_timer_handler(unsigned long data)
4083 {
4084  struct et131x_adapter *adapter = (struct et131x_adapter *) data;
4085  struct phy_device *phydev = adapter->phydev;
4086 
4087  if (et1310_in_phy_coma(adapter)) {
4088  /* Bring the device immediately out of coma, to
4089  * prevent it from sleeping indefinitely, this
4090  * mechanism could be improved! */
4091  et1310_disable_phy_coma(adapter);
4092  adapter->boot_coma = 20;
4093  } else {
4094  et1310_update_macstat_host_counters(adapter);
4095  }
4096 
4097  if (!phydev->link && adapter->boot_coma < 11)
4098  adapter->boot_coma++;
4099 
4100  if (adapter->boot_coma == 10) {
4101  if (!phydev->link) {
4102  if (!et1310_in_phy_coma(adapter)) {
4103  /* NOTE - This was originally a 'sync with
4104  * interrupt'. How to do that under Linux?
4105  */
4106  et131x_enable_interrupts(adapter);
4107  et1310_enable_phy_coma(adapter);
4108  }
4109  }
4110  }
4111 
4112  /* This is a periodic timer, so reschedule */
4113  mod_timer(&adapter->error_timer, jiffies +
4114  TX_ERROR_PERIOD * HZ / 1000);
4115 }
4116 
4125 static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
4126 {
4127  int status;
4128 
4129  /* Allocate memory for the Tx Ring */
4130  status = et131x_tx_dma_memory_alloc(adapter);
4131  if (status != 0) {
4132  dev_err(&adapter->pdev->dev,
4133  "et131x_tx_dma_memory_alloc FAILED\n");
4134  return status;
4135  }
4136  /* Receive buffer memory allocation */
4137  status = et131x_rx_dma_memory_alloc(adapter);
4138  if (status != 0) {
4139  dev_err(&adapter->pdev->dev,
4140  "et131x_rx_dma_memory_alloc FAILED\n");
4141  et131x_tx_dma_memory_free(adapter);
4142  return status;
4143  }
4144 
4145  /* Init receive data structures */
4146  status = et131x_init_recv(adapter);
4147  if (status != 0) {
4148  dev_err(&adapter->pdev->dev,
4149  "et131x_init_recv FAILED\n");
4150  et131x_tx_dma_memory_free(adapter);
4151  et131x_rx_dma_memory_free(adapter);
4152  }
4153  return status;
4154 }
4155 
4160 static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
4161 {
4162  /* Free DMA memory */
4163  et131x_tx_dma_memory_free(adapter);
4164  et131x_rx_dma_memory_free(adapter);
4165 }
4166 
4167 static void et131x_adjust_link(struct net_device *netdev)
4168 {
4169  struct et131x_adapter *adapter = netdev_priv(netdev);
4170  struct phy_device *phydev = adapter->phydev;
4171 
4172  if (netif_carrier_ok(netdev)) {
4173  adapter->boot_coma = 20;
4174 
4175  if (phydev && phydev->speed == SPEED_10) {
4176  /*
4177  * NOTE - Is there a way to query this without
4178  * TruePHY?
4179  * && TRU_QueryCoreType(adapter->hTruePhy, 0)==
4180  * EMI_TRUEPHY_A13O) {
4181  */
4182  u16 register18;
4183 
4184  et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4185  &register18);
4186  et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4187  register18 | 0x4);
4188  et131x_mii_write(adapter, PHY_INDEX_REG,
4189  register18 | 0x8402);
4190  et131x_mii_write(adapter, PHY_DATA_REG,
4191  register18 | 511);
4192  et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4193  register18);
4194  }
4195 
4196  et1310_config_flow_control(adapter);
4197 
4198  if (phydev && phydev->speed == SPEED_1000 &&
4199  adapter->registry_jumbo_packet > 2048) {
4200  u16 reg;
4201 
4202  et131x_mii_read(adapter, PHY_CONFIG, &reg);
4205  et131x_mii_write(adapter, PHY_CONFIG, reg);
4206  }
4207 
4208  et131x_set_rx_dma_timer(adapter);
4209  et1310_config_mac_regs2(adapter);
4210  }
4211 
4212  if (phydev && phydev->link != adapter->link) {
4213  /*
4214  * Check to see if we are in coma mode and if
4215  * so, disable it because we will not be able
4216  * to read PHY values until we are out.
4217  */
4218  if (et1310_in_phy_coma(adapter))
4219  et1310_disable_phy_coma(adapter);
4220 
4221  if (phydev->link) {
4222  adapter->boot_coma = 20;
4223  } else {
4224  dev_warn(&adapter->pdev->dev,
4225  "Link down - cable problem ?\n");
4226  adapter->boot_coma = 0;
4227 
4228  if (phydev->speed == SPEED_10) {
4229  /* NOTE - Is there a way to query this without
4230  * TruePHY?
4231  * && TRU_QueryCoreType(adapter->hTruePhy, 0) ==
4232  * EMI_TRUEPHY_A13O)
4233  */
4234  u16 register18;
4235 
4236  et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
4237  &register18);
4238  et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4239  register18 | 0x4);
4240  et131x_mii_write(adapter, PHY_INDEX_REG,
4241  register18 | 0x8402);
4242  et131x_mii_write(adapter, PHY_DATA_REG,
4243  register18 | 511);
4244  et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG,
4245  register18);
4246  }
4247 
4248  /* Free the packets being actively sent & stopped */
4249  et131x_free_busy_send_packets(adapter);
4250 
4251  /* Re-initialize the send structures */
4252  et131x_init_send(adapter);
4253 
4254  /*
4255  * Bring the device back to the state it was during
4256  * init prior to autonegotiation being complete. This
4257  * way, when we get the auto-neg complete interrupt,
4258  * we can complete init by calling config_mac_regs2.
4259  */
4260  et131x_soft_reset(adapter);
4261 
4262  /* Setup ET1310 as per the documentation */
4263  et131x_adapter_setup(adapter);
4264 
4265  /* perform reset of tx/rx */
4266  et131x_disable_txrx(netdev);
4267  et131x_enable_txrx(netdev);
4268  }
4269 
4270  adapter->link = phydev->link;
4271 
4272  phy_print_status(phydev);
4273  }
4274 }
4275 
4276 static int et131x_mii_probe(struct net_device *netdev)
4277 {
4278  struct et131x_adapter *adapter = netdev_priv(netdev);
4279  struct phy_device *phydev = NULL;
4280 
4281  phydev = phy_find_first(adapter->mii_bus);
4282  if (!phydev) {
4283  dev_err(&adapter->pdev->dev, "no PHY found\n");
4284  return -ENODEV;
4285  }
4286 
4287  phydev = phy_connect(netdev, dev_name(&phydev->dev),
4288  &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII);
4289 
4290  if (IS_ERR(phydev)) {
4291  dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
4292  return PTR_ERR(phydev);
4293  }
4294 
4295  phydev->supported &= (SUPPORTED_10baseT_Half
4300  | SUPPORTED_MII
4301  | SUPPORTED_TP);
4302 
4303  if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
4305 
4306  phydev->advertising = phydev->supported;
4307  adapter->phydev = phydev;
4308 
4309  dev_info(&adapter->pdev->dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
4310  phydev->drv->name, dev_name(&phydev->dev));
4311 
4312  return 0;
4313 }
4314 
4323 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
4324  struct pci_dev *pdev)
4325 {
4326  static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
4327 
4328  struct et131x_adapter *adapter;
4329 
4330  /* Allocate private adapter struct and copy in relevant information */
4331  adapter = netdev_priv(netdev);
4332  adapter->pdev = pci_dev_get(pdev);
4333  adapter->netdev = netdev;
4334 
4335  /* Initialize spinlocks here */
4336  spin_lock_init(&adapter->lock);
4337  spin_lock_init(&adapter->tcb_send_qlock);
4338  spin_lock_init(&adapter->tcb_ready_qlock);
4339  spin_lock_init(&adapter->send_hw_lock);
4340  spin_lock_init(&adapter->rcv_lock);
4341  spin_lock_init(&adapter->rcv_pend_lock);
4342  spin_lock_init(&adapter->fbr_lock);
4343  spin_lock_init(&adapter->phy_lock);
4344 
4345  adapter->registry_jumbo_packet = 1514; /* 1514-9216 */
4346 
4347  /* Set the MAC address to a default */
4348  memcpy(adapter->addr, default_mac, ETH_ALEN);
4349 
4350  return adapter;
4351 }
4352 
4361 static void __devexit et131x_pci_remove(struct pci_dev *pdev)
4362 {
4363  struct net_device *netdev = pci_get_drvdata(pdev);
4364  struct et131x_adapter *adapter = netdev_priv(netdev);
4365 
4366  unregister_netdev(netdev);
4367  phy_disconnect(adapter->phydev);
4368  mdiobus_unregister(adapter->mii_bus);
4369  kfree(adapter->mii_bus->irq);
4370  mdiobus_free(adapter->mii_bus);
4371 
4372  et131x_adapter_memory_free(adapter);
4373  iounmap(adapter->regs);
4374  pci_dev_put(pdev);
4375 
4376  free_netdev(netdev);
4377  pci_release_regions(pdev);
4378  pci_disable_device(pdev);
4379 }
4380 
4385 static void et131x_up(struct net_device *netdev)
4386 {
4387  struct et131x_adapter *adapter = netdev_priv(netdev);
4388 
4389  et131x_enable_txrx(netdev);
4390  phy_start(adapter->phydev);
4391 }
4392 
4397 static void et131x_down(struct net_device *netdev)
4398 {
4399  struct et131x_adapter *adapter = netdev_priv(netdev);
4400 
4401  /* Save the timestamp for the TX watchdog, prevent a timeout */
4402  netdev->trans_start = jiffies;
4403 
4404  phy_stop(adapter->phydev);
4405  et131x_disable_txrx(netdev);
4406 }
4407 
4408 #ifdef CONFIG_PM_SLEEP
4409 static int et131x_suspend(struct device *dev)
4410 {
4411  struct pci_dev *pdev = to_pci_dev(dev);
4412  struct net_device *netdev = pci_get_drvdata(pdev);
4413 
4414  if (netif_running(netdev)) {
4415  netif_device_detach(netdev);
4416  et131x_down(netdev);
4417  pci_save_state(pdev);
4418  }
4419 
4420  return 0;
4421 }
4422 
4423 static int et131x_resume(struct device *dev)
4424 {
4425  struct pci_dev *pdev = to_pci_dev(dev);
4426  struct net_device *netdev = pci_get_drvdata(pdev);
4427 
4428  if (netif_running(netdev)) {
4429  pci_restore_state(pdev);
4430  et131x_up(netdev);
4431  netif_device_attach(netdev);
4432  }
4433 
4434  return 0;
4435 }
4436 
4437 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
4438 #define ET131X_PM_OPS (&et131x_pm_ops)
4439 #else
4440 #define ET131X_PM_OPS NULL
4441 #endif
4442 
4451 {
4452  bool handled = true;
4453  struct net_device *netdev = (struct net_device *)dev_id;
4454  struct et131x_adapter *adapter = NULL;
4455  u32 status;
4456 
4457  if (!netif_device_present(netdev)) {
4458  handled = false;
4459  goto out;
4460  }
4461 
4462  adapter = netdev_priv(netdev);
4463 
4464  /* If the adapter is in low power state, then it should not
4465  * recognize any interrupt
4466  */
4467 
4468  /* Disable Device Interrupts */
4469  et131x_disable_interrupts(adapter);
4470 
4471  /* Get a copy of the value in the interrupt status register
4472  * so we can process the interrupting section
4473  */
4474  status = readl(&adapter->regs->global.int_status);
4475 
4476  if (adapter->flowcontrol == FLOW_TXONLY ||
4477  adapter->flowcontrol == FLOW_BOTH) {
4478  status &= ~INT_MASK_ENABLE;
4479  } else {
4480  status &= ~INT_MASK_ENABLE_NO_FLOW;
4481  }
4482 
4483  /* Make sure this is our interrupt */
4484  if (!status) {
4485  handled = false;
4486  et131x_enable_interrupts(adapter);
4487  goto out;
4488  }
4489 
4490  /* This is our interrupt, so process accordingly */
4491 
4492  if (status & ET_INTR_WATCHDOG) {
4493  struct tcb *tcb = adapter->tx_ring.send_head;
4494 
4495  if (tcb)
4496  if (++tcb->stale > 1)
4497  status |= ET_INTR_TXDMA_ISR;
4498 
4499  if (adapter->rx_ring.unfinished_receives)
4500  status |= ET_INTR_RXDMA_XFR_DONE;
4501  else if (tcb == NULL)
4502  writel(0, &adapter->regs->global.watchdog_timer);
4503 
4504  status &= ~ET_INTR_WATCHDOG;
4505  }
4506 
4507  if (status == 0) {
4508  /* This interrupt has in some way been "handled" by
4509  * the ISR. Either it was a spurious Rx interrupt, or
4510  * it was a Tx interrupt that has been filtered by
4511  * the ISR.
4512  */
4513  et131x_enable_interrupts(adapter);
4514  goto out;
4515  }
4516 
4517  /* We need to save the interrupt status value for use in our
4518  * DPC. We will clear the software copy of that in that
4519  * routine.
4520  */
4521  adapter->stats.interrupt_status = status;
4522 
4523  /* Schedule the ISR handler as a bottom-half task in the
4524  * kernel's tq_immediate queue, and mark the queue for
4525  * execution
4526  */
4527  schedule_work(&adapter->task);
4528 out:
4529  return IRQ_RETVAL(handled);
4530 }
4531 
4539 static void et131x_isr_handler(struct work_struct *work)
4540 {
4541  struct et131x_adapter *adapter =
4542  container_of(work, struct et131x_adapter, task);
4543  u32 status = adapter->stats.interrupt_status;
4544  struct address_map __iomem *iomem = adapter->regs;
4545 
4546  /*
4547  * These first two are by far the most common. Once handled, we clear
4548  * their two bits in the status word. If the word is now zero, we
4549  * exit.
4550  */
4551  /* Handle all the completed Transmit interrupts */
4552  if (status & ET_INTR_TXDMA_ISR)
4553  et131x_handle_send_interrupt(adapter);
4554 
4555  /* Handle all the completed Receives interrupts */
4556  if (status & ET_INTR_RXDMA_XFR_DONE)
4557  et131x_handle_recv_interrupt(adapter);
4558 
4559  status &= 0xffffffd7;
4560 
4561  if (status) {
4562  /* Handle the TXDMA Error interrupt */
4563  if (status & ET_INTR_TXDMA_ERR) {
4564  u32 txdma_err;
4565 
4566  /* Following read also clears the register (COR) */
4567  txdma_err = readl(&iomem->txdma.tx_dma_error);
4568 
4569  dev_warn(&adapter->pdev->dev,
4570  "TXDMA_ERR interrupt, error = %d\n",
4571  txdma_err);
4572  }
4573 
4574  /* Handle Free Buffer Ring 0 and 1 Low interrupt */
4575  if (status &
4577  /*
4578  * This indicates the number of unused buffers in
4579  * RXDMA free buffer ring 0 is <= the limit you
4580  * programmed. Free buffer resources need to be
4581  * returned. Free buffers are consumed as packets
4582  * are passed from the network to the host. The host
4583  * becomes aware of the packets from the contents of
4584  * the packet status ring. This ring is queried when
4585  * the packet done interrupt occurs. Packets are then
4586  * passed to the OS. When the OS is done with the
4587  * packets the resources can be returned to the
4588  * ET1310 for re-use. This interrupt is one method of
4589  * returning resources.
4590  */
4591 
4592  /* If the user has flow control on, then we will
4593  * send a pause packet, otherwise just exit
4594  */
4595  if (adapter->flowcontrol == FLOW_TXONLY ||
4596  adapter->flowcontrol == FLOW_BOTH) {
4597  u32 pm_csr;
4598 
4599  /* Tell the device to send a pause packet via
4600  * the back pressure register (bp req and
4601  * bp xon/xoff)
4602  */
4603  pm_csr = readl(&iomem->global.pm_csr);
4604  if (!et1310_in_phy_coma(adapter))
4605  writel(3, &iomem->txmac.bp_ctrl);
4606  }
4607  }
4608 
4609  /* Handle Packet Status Ring Low Interrupt */
4610  if (status & ET_INTR_RXDMA_STAT_LOW) {
4611 
4612  /*
4613  * Same idea as with the two Free Buffer Rings.
4614  * Packets going from the network to the host each
4615  * consume a free buffer resource and a packet status
4616  * resource. These resoures are passed to the OS.
4617  * When the OS is done with the resources, they need
4618  * to be returned to the ET1310. This is one method
4619  * of returning the resources.
4620  */
4621  }
4622 
4623  /* Handle RXDMA Error Interrupt */
4624  if (status & ET_INTR_RXDMA_ERR) {
4625  /*
4626  * The rxdma_error interrupt is sent when a time-out
4627  * on a request issued by the JAGCore has occurred or
4628  * a completion is returned with an un-successful
4629  * status. In both cases the request is considered
4630  * complete. The JAGCore will automatically re-try the
4631  * request in question. Normally information on events
4632  * like these are sent to the host using the "Advanced
4633  * Error Reporting" capability. This interrupt is
4634  * another way of getting similar information. The
4635  * only thing required is to clear the interrupt by
4636  * reading the ISR in the global resources. The
4637  * JAGCore will do a re-try on the request. Normally
4638  * you should never see this interrupt. If you start
4639  * to see this interrupt occurring frequently then
4640  * something bad has occurred. A reset might be the
4641  * thing to do.
4642  */
4643  /* TRAP();*/
4644 
4645  dev_warn(&adapter->pdev->dev,
4646  "RxDMA_ERR interrupt, error %x\n",
4647  readl(&iomem->txmac.tx_test));
4648  }
4649 
4650  /* Handle the Wake on LAN Event */
4651  if (status & ET_INTR_WOL) {
4652  /*
4653  * This is a secondary interrupt for wake on LAN.
4654  * The driver should never see this, if it does,
4655  * something serious is wrong. We will TRAP the
4656  * message when we are in DBG mode, otherwise we
4657  * will ignore it.
4658  */
4659  dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
4660  }
4661 
4662  /* Let's move on to the TxMac */
4663  if (status & ET_INTR_TXMAC) {
4664  u32 err = readl(&iomem->txmac.err);
4665 
4666  /*
4667  * When any of the errors occur and TXMAC generates
4668  * an interrupt to report these errors, it usually
4669  * means that TXMAC has detected an error in the data
4670  * stream retrieved from the on-chip Tx Q. All of
4671  * these errors are catastrophic and TXMAC won't be
4672  * able to recover data when these errors occur. In
4673  * a nutshell, the whole Tx path will have to be reset
4674  * and re-configured afterwards.
4675  */
4676  dev_warn(&adapter->pdev->dev,
4677  "TXMAC interrupt, error 0x%08x\n",
4678  err);
4679 
4680  /* If we are debugging, we want to see this error,
4681  * otherwise we just want the device to be reset and
4682  * continue
4683  */
4684  }
4685 
4686  /* Handle RXMAC Interrupt */
4687  if (status & ET_INTR_RXMAC) {
4688  /*
4689  * These interrupts are catastrophic to the device,
4690  * what we need to do is disable the interrupts and
4691  * set the flag to cause us to reset so we can solve
4692  * this issue.
4693  */
4694  /* MP_SET_FLAG( adapter,
4695  fMP_ADAPTER_HARDWARE_ERROR); */
4696 
4697  dev_warn(&adapter->pdev->dev,
4698  "RXMAC interrupt, error 0x%08x. Requesting reset\n",
4699  readl(&iomem->rxmac.err_reg));
4700 
4701  dev_warn(&adapter->pdev->dev,
4702  "Enable 0x%08x, Diag 0x%08x\n",
4703  readl(&iomem->rxmac.ctrl),
4704  readl(&iomem->rxmac.rxq_diag));
4705 
4706  /*
4707  * If we are debugging, we want to see this error,
4708  * otherwise we just want the device to be reset and
4709  * continue
4710  */
4711  }
4712 
4713  /* Handle MAC_STAT Interrupt */
4714  if (status & ET_INTR_MAC_STAT) {
4715  /*
4716  * This means at least one of the un-masked counters
4717  * in the MAC_STAT block has rolled over. Use this
4718  * to maintain the top, software managed bits of the
4719  * counter(s).
4720  */
4721  et1310_handle_macstat_interrupt(adapter);
4722  }
4723 
4724  /* Handle SLV Timeout Interrupt */
4725  if (status & ET_INTR_SLV_TIMEOUT) {
4726  /*
4727  * This means a timeout has occurred on a read or
4728  * write request to one of the JAGCore registers. The
4729  * Global Resources block has terminated the request
4730  * and on a read request, returned a "fake" value.
4731  * The most likely reasons are: Bad Address or the
4732  * addressed module is in a power-down state and
4733  * can't respond.
4734  */
4735  }
4736  }
4737  et131x_enable_interrupts(adapter);
4738 }
4739 
4746 static struct net_device_stats *et131x_stats(struct net_device *netdev)
4747 {
4748  struct et131x_adapter *adapter = netdev_priv(netdev);
4749  struct net_device_stats *stats = &adapter->net_stats;
4750  struct ce_stats *devstat = &adapter->stats;
4751 
4752  stats->rx_errors = devstat->rx_length_errs +
4753  devstat->rx_align_errs +
4754  devstat->rx_crc_errs +
4755  devstat->rx_code_violations +
4756  devstat->rx_other_errs;
4757  stats->tx_errors = devstat->tx_max_pkt_errs;
4758  stats->multicast = devstat->multicast_pkts_rcvd;
4759  stats->collisions = devstat->tx_collisions;
4760 
4761  stats->rx_length_errors = devstat->rx_length_errs;
4762  stats->rx_over_errors = devstat->rx_overflows;
4763  stats->rx_crc_errors = devstat->rx_crc_errs;
4764 
4765  /* NOTE: These stats don't have corresponding values in CE_STATS,
4766  * so we're going to have to update these directly from within the
4767  * TX/RX code
4768  */
4769  /* stats->rx_bytes = 20; devstat->; */
4770  /* stats->tx_bytes = 20; devstat->; */
4771  /* stats->rx_dropped = devstat->; */
4772  /* stats->tx_dropped = devstat->; */
4773 
4774  /* NOTE: Not used, can't find analogous statistics */
4775  /* stats->rx_frame_errors = devstat->; */
4776  /* stats->rx_fifo_errors = devstat->; */
4777  /* stats->rx_missed_errors = devstat->; */
4778 
4779  /* stats->tx_aborted_errors = devstat->; */
4780  /* stats->tx_carrier_errors = devstat->; */
4781  /* stats->tx_fifo_errors = devstat->; */
4782  /* stats->tx_heartbeat_errors = devstat->; */
4783  /* stats->tx_window_errors = devstat->; */
4784  return stats;
4785 }
4786 
4793 static int et131x_open(struct net_device *netdev)
4794 {
4795  struct et131x_adapter *adapter = netdev_priv(netdev);
4796  struct pci_dev *pdev = adapter->pdev;
4797  unsigned int irq = pdev->irq;
4798  int result;
4799 
4800  /* Start the timer to track NIC errors */
4801  init_timer(&adapter->error_timer);
4802  adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000;
4803  adapter->error_timer.function = et131x_error_timer_handler;
4804  adapter->error_timer.data = (unsigned long)adapter;
4805  add_timer(&adapter->error_timer);
4806 
4807  result = request_irq(irq, et131x_isr,
4808  IRQF_SHARED, netdev->name, netdev);
4809  if (result) {
4810  dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
4811  return result;
4812  }
4813 
4814  adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE;
4815 
4816  et131x_up(netdev);
4817 
4818  return result;
4819 }
4820 
4827 static int et131x_close(struct net_device *netdev)
4828 {
4829  struct et131x_adapter *adapter = netdev_priv(netdev);
4830 
4831  et131x_down(netdev);
4832 
4833  adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
4834  free_irq(adapter->pdev->irq, netdev);
4835 
4836  /* Stop the error timer */
4837  return del_timer_sync(&adapter->error_timer);
4838 }
4839 
4848 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
4849  int cmd)
4850 {
4851  struct et131x_adapter *adapter = netdev_priv(netdev);
4852 
4853  if (!adapter->phydev)
4854  return -EINVAL;
4855 
4856  return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
4857 }
4858 
4867 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
4868 {
4869  int filter = adapter->packet_filter;
4870  int status = 0;
4871  u32 ctrl;
4872  u32 pf_ctrl;
4873 
4874  ctrl = readl(&adapter->regs->rxmac.ctrl);
4875  pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
4876 
4877  /* Default to disabled packet filtering. Enable it in the individual
4878  * case statements that require the device to filter something
4879  */
4880  ctrl |= 0x04;
4881 
4882  /* Set us to be in promiscuous mode so we receive everything, this
4883  * is also true when we get a packet filter of 0
4884  */
4885  if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
4886  pf_ctrl &= ~7; /* Clear filter bits */
4887  else {
4888  /*
4889  * Set us up with Multicast packet filtering. Three cases are
4890  * possible - (1) we have a multi-cast list, (2) we receive ALL
4891  * multicast entries or (3) we receive none.
4892  */
4893  if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
4894  pf_ctrl &= ~2; /* Multicast filter bit */
4895  else {
4896  et1310_setup_device_for_multicast(adapter);
4897  pf_ctrl |= 2;
4898  ctrl &= ~0x04;
4899  }
4900 
4901  /* Set us up with Unicast packet filtering */
4902  if (filter & ET131X_PACKET_TYPE_DIRECTED) {
4903  et1310_setup_device_for_unicast(adapter);
4904  pf_ctrl |= 4;
4905  ctrl &= ~0x04;
4906  }
4907 
4908  /* Set us up with Broadcast packet filtering */
4909  if (filter & ET131X_PACKET_TYPE_BROADCAST) {
4910  pf_ctrl |= 1; /* Broadcast filter bit */
4911  ctrl &= ~0x04;
4912  } else
4913  pf_ctrl &= ~1;
4914 
4915  /* Setup the receive mac configuration registers - Packet
4916  * Filter control + the enable / disable for packet filter
4917  * in the control reg.
4918  */
4919  writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
4920  writel(ctrl, &adapter->regs->rxmac.ctrl);
4921  }
4922  return status;
4923 }
4924 
4929 static void et131x_multicast(struct net_device *netdev)
4930 {
4931  struct et131x_adapter *adapter = netdev_priv(netdev);
4932  int packet_filter;
4933  unsigned long flags;
4934  struct netdev_hw_addr *ha;
4935  int i;
4936 
4937  spin_lock_irqsave(&adapter->lock, flags);
4938 
4939  /* Before we modify the platform-independent filter flags, store them
4940  * locally. This allows us to determine if anything's changed and if
4941  * we even need to bother the hardware
4942  */
4943  packet_filter = adapter->packet_filter;
4944 
4945  /* Clear the 'multicast' flag locally; because we only have a single
4946  * flag to check multicast, and multiple multicast addresses can be
4947  * set, this is the easiest way to determine if more than one
4948  * multicast address is being set.
4949  */
4950  packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
4951 
4952  /* Check the net_device flags and set the device independent flags
4953  * accordingly
4954  */
4955 
4956  if (netdev->flags & IFF_PROMISC)
4958  else
4959  adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
4960 
4961  if (netdev->flags & IFF_ALLMULTI)
4963 
4964  if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST)
4966 
4967  if (netdev_mc_count(netdev) < 1) {
4968  adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
4970  } else
4972 
4973  /* Set values in the private adapter struct */
4974  i = 0;
4975  netdev_for_each_mc_addr(ha, netdev) {
4976  if (i == NIC_MAX_MCAST_LIST)
4977  break;
4978  memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN);
4979  }
4980  adapter->multicast_addr_count = i;
4981 
4982  /* Are the new flags different from the previous ones? If not, then no
4983  * action is required
4984  *
4985  * NOTE - This block will always update the multicast_list with the
4986  * hardware, even if the addresses aren't the same.
4987  */
4988  if (packet_filter != adapter->packet_filter) {
4989  /* Call the device's filter function */
4990  et131x_set_packet_filter(adapter);
4991  }
4992  spin_unlock_irqrestore(&adapter->lock, flags);
4993 }
4994 
5002 static int et131x_tx(struct sk_buff *skb, struct net_device *netdev)
5003 {
5004  int status = 0;
5005  struct et131x_adapter *adapter = netdev_priv(netdev);
5006 
5007  /* stop the queue if it's getting full */
5008  if (adapter->tx_ring.used >= NUM_TCB - 1 &&
5009  !netif_queue_stopped(netdev))
5010  netif_stop_queue(netdev);
5011 
5012  /* Save the timestamp for the TX timeout watchdog */
5013  netdev->trans_start = jiffies;
5014 
5015  /* Call the device-specific data Tx routine */
5016  status = et131x_send_packets(skb, netdev);
5017 
5018  /* Check status and manage the netif queue if necessary */
5019  if (status != 0) {
5020  if (status == -ENOMEM)
5021  status = NETDEV_TX_BUSY;
5022  else
5023  status = NETDEV_TX_OK;
5024  }
5025  return status;
5026 }
5027 
5036 static void et131x_tx_timeout(struct net_device *netdev)
5037 {
5038  struct et131x_adapter *adapter = netdev_priv(netdev);
5039  struct tcb *tcb;
5040  unsigned long flags;
5041 
5042  /* If the device is closed, ignore the timeout */
5043  if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE))
5044  return;
5045 
5046  /* Any nonrecoverable hardware error?
5047  * Checks adapter->flags for any failure in phy reading
5048  */
5049  if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR)
5050  return;
5051 
5052  /* Hardware failure? */
5053  if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) {
5054  dev_err(&adapter->pdev->dev, "hardware error - reset\n");
5055  return;
5056  }
5057 
5058  /* Is send stuck? */
5059  spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
5060 
5061  tcb = adapter->tx_ring.send_head;
5062 
5063  if (tcb != NULL) {
5064  tcb->count++;
5065 
5066  if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
5067  spin_unlock_irqrestore(&adapter->tcb_send_qlock,
5068  flags);
5069 
5070  dev_warn(&adapter->pdev->dev,
5071  "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n",
5072  tcb->index,
5073  tcb->flags);
5074 
5075  adapter->net_stats.tx_errors++;
5076 
5077  /* perform reset of tx/rx */
5078  et131x_disable_txrx(netdev);
5079  et131x_enable_txrx(netdev);
5080  return;
5081  }
5082  }
5083 
5084  spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
5085 }
5086 
5094 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
5095 {
5096  int result = 0;
5097  struct et131x_adapter *adapter = netdev_priv(netdev);
5098 
5099  /* Make sure the requested MTU is valid */
5100  if (new_mtu < 64 || new_mtu > 9216)
5101  return -EINVAL;
5102 
5103  et131x_disable_txrx(netdev);
5104  et131x_handle_send_interrupt(adapter);
5105  et131x_handle_recv_interrupt(adapter);
5106 
5107  /* Set the new MTU */
5108  netdev->mtu = new_mtu;
5109 
5110  /* Free Rx DMA memory */
5111  et131x_adapter_memory_free(adapter);
5112 
5113  /* Set the config parameter for Jumbo Packet support */
5114  adapter->registry_jumbo_packet = new_mtu + 14;
5115  et131x_soft_reset(adapter);
5116 
5117  /* Alloc and init Rx DMA memory */
5118  result = et131x_adapter_memory_alloc(adapter);
5119  if (result != 0) {
5120  dev_warn(&adapter->pdev->dev,
5121  "Change MTU failed; couldn't re-alloc DMA memory\n");
5122  return result;
5123  }
5124 
5125  et131x_init_send(adapter);
5126 
5127  et131x_hwaddr_init(adapter);
5128  memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5129 
5130  /* Init the device with the new settings */
5131  et131x_adapter_setup(adapter);
5132 
5133  et131x_enable_txrx(netdev);
5134 
5135  return result;
5136 }
5137 
5147 static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac)
5148 {
5149  int result = 0;
5150  struct et131x_adapter *adapter = netdev_priv(netdev);
5151  struct sockaddr *address = new_mac;
5152 
5153  /* begin blux */
5154 
5155  if (adapter == NULL)
5156  return -ENODEV;
5157 
5158  /* Make sure the requested MAC is valid */
5159  if (!is_valid_ether_addr(address->sa_data))
5160  return -EADDRNOTAVAIL;
5161 
5162  et131x_disable_txrx(netdev);
5163  et131x_handle_send_interrupt(adapter);
5164  et131x_handle_recv_interrupt(adapter);
5165 
5166  /* Set the new MAC */
5167  /* netdev->set_mac_address = &new_mac; */
5168 
5169  memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len);
5170 
5171  netdev_info(netdev, "Setting MAC address to %pM\n",
5172  netdev->dev_addr);
5173 
5174  /* Free Rx DMA memory */
5175  et131x_adapter_memory_free(adapter);
5176 
5177  et131x_soft_reset(adapter);
5178 
5179  /* Alloc and init Rx DMA memory */
5180  result = et131x_adapter_memory_alloc(adapter);
5181  if (result != 0) {
5182  dev_err(&adapter->pdev->dev,
5183  "Change MAC failed; couldn't re-alloc DMA memory\n");
5184  return result;
5185  }
5186 
5187  et131x_init_send(adapter);
5188 
5189  et131x_hwaddr_init(adapter);
5190 
5191  /* Init the device with the new settings */
5192  et131x_adapter_setup(adapter);
5193 
5194  et131x_enable_txrx(netdev);
5195 
5196  return result;
5197 }
5198 
5199 static const struct net_device_ops et131x_netdev_ops = {
5200  .ndo_open = et131x_open,
5201  .ndo_stop = et131x_close,
5202  .ndo_start_xmit = et131x_tx,
5203  .ndo_set_rx_mode = et131x_multicast,
5204  .ndo_tx_timeout = et131x_tx_timeout,
5205  .ndo_change_mtu = et131x_change_mtu,
5206  .ndo_set_mac_address = et131x_set_mac_addr,
5207  .ndo_validate_addr = eth_validate_addr,
5208  .ndo_get_stats = et131x_stats,
5209  .ndo_do_ioctl = et131x_ioctl,
5210 };
5211 
5224 static int __devinit et131x_pci_setup(struct pci_dev *pdev,
5225  const struct pci_device_id *ent)
5226 {
5227  struct net_device *netdev;
5228  struct et131x_adapter *adapter;
5229  int rc;
5230  int ii;
5231 
5232  rc = pci_enable_device(pdev);
5233  if (rc < 0) {
5234  dev_err(&pdev->dev, "pci_enable_device() failed\n");
5235  goto out;
5236  }
5237 
5238  /* Perform some basic PCI checks */
5239  if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5240  dev_err(&pdev->dev, "Can't find PCI device's base address\n");
5241  rc = -ENODEV;
5242  goto err_disable;
5243  }
5244 
5245  rc = pci_request_regions(pdev, DRIVER_NAME);
5246  if (rc < 0) {
5247  dev_err(&pdev->dev, "Can't get PCI resources\n");
5248  goto err_disable;
5249  }
5250 
5251  pci_set_master(pdev);
5252 
5253  /* Check the DMA addressing support of this device */
5254  if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
5255  rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
5256  if (rc < 0) {
5257  dev_err(&pdev->dev,
5258  "Unable to obtain 64 bit DMA for consistent allocations\n");
5259  goto err_release_res;
5260  }
5261  } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
5262  rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
5263  if (rc < 0) {
5264  dev_err(&pdev->dev,
5265  "Unable to obtain 32 bit DMA for consistent allocations\n");
5266  goto err_release_res;
5267  }
5268  } else {
5269  dev_err(&pdev->dev, "No usable DMA addressing method\n");
5270  rc = -EIO;
5271  goto err_release_res;
5272  }
5273 
5274  /* Allocate netdev and private adapter structs */
5275  netdev = alloc_etherdev(sizeof(struct et131x_adapter));
5276  if (!netdev) {
5277  dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
5278  rc = -ENOMEM;
5279  goto err_release_res;
5280  }
5281 
5283  netdev->netdev_ops = &et131x_netdev_ops;
5284 
5285  SET_NETDEV_DEV(netdev, &pdev->dev);
5286  SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
5287 
5288  adapter = et131x_adapter_init(netdev, pdev);
5289 
5290  rc = et131x_pci_init(adapter, pdev);
5291  if (rc < 0)
5292  goto err_free_dev;
5293 
5294  /* Map the bus-relative registers to system virtual memory */
5295  adapter->regs = pci_ioremap_bar(pdev, 0);
5296  if (!adapter->regs) {
5297  dev_err(&pdev->dev, "Cannot map device registers\n");
5298  rc = -ENOMEM;
5299  goto err_free_dev;
5300  }
5301 
5302  /* If Phy COMA mode was enabled when we went down, disable it here. */
5303  writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
5304 
5305  /* Issue a global reset to the et1310 */
5306  et131x_soft_reset(adapter);
5307 
5308  /* Disable all interrupts (paranoid) */
5309  et131x_disable_interrupts(adapter);
5310 
5311  /* Allocate DMA memory */
5312  rc = et131x_adapter_memory_alloc(adapter);
5313  if (rc < 0) {
5314  dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
5315  goto err_iounmap;
5316  }
5317 
5318  /* Init send data structures */
5319  et131x_init_send(adapter);
5320 
5321  /* Set up the task structure for the ISR's deferred handler */
5322  INIT_WORK(&adapter->task, et131x_isr_handler);
5323 
5324  /* Copy address into the net_device struct */
5325  memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN);
5326 
5327  /* Init variable for counting how long we do not have link status */
5328  adapter->boot_coma = 0;
5329  et1310_disable_phy_coma(adapter);
5330 
5331  rc = -ENOMEM;
5332 
5333  /* Setup the mii_bus struct */
5334  adapter->mii_bus = mdiobus_alloc();
5335  if (!adapter->mii_bus) {
5336  dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
5337  goto err_mem_free;
5338  }
5339 
5340  adapter->mii_bus->name = "et131x_eth_mii";
5341  snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
5342  (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
5343  adapter->mii_bus->priv = netdev;
5344  adapter->mii_bus->read = et131x_mdio_read;
5345  adapter->mii_bus->write = et131x_mdio_write;
5346  adapter->mii_bus->reset = et131x_mdio_reset;
5347  adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
5348  if (!adapter->mii_bus->irq) {
5349  dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
5350  goto err_mdio_free;
5351  }
5352 
5353  for (ii = 0; ii < PHY_MAX_ADDR; ii++)
5354  adapter->mii_bus->irq[ii] = PHY_POLL;
5355 
5356  rc = mdiobus_register(adapter->mii_bus);
5357  if (rc < 0) {
5358  dev_err(&pdev->dev, "failed to register MII bus\n");
5359  goto err_mdio_free_irq;
5360  }
5361 
5362  rc = et131x_mii_probe(netdev);
5363  if (rc < 0) {
5364  dev_err(&pdev->dev, "failed to probe MII bus\n");
5365  goto err_mdio_unregister;
5366  }
5367 
5368  /* Setup et1310 as per the documentation */
5369  et131x_adapter_setup(adapter);
5370 
5371  /* We can enable interrupts now
5372  *
5373  * NOTE - Because registration of interrupt handler is done in the
5374  * device's open(), defer enabling device interrupts to that
5375  * point
5376  */
5377 
5378  /* Register the net_device struct with the Linux network layer */
5379  rc = register_netdev(netdev);
5380  if (rc < 0) {
5381  dev_err(&pdev->dev, "register_netdev() failed\n");
5382  goto err_phy_disconnect;
5383  }
5384 
5385  /* Register the net_device struct with the PCI subsystem. Save a copy
5386  * of the PCI config space for this device now that the device has
5387  * been initialized, just in case it needs to be quickly restored.
5388  */
5389  pci_set_drvdata(pdev, netdev);
5390 out:
5391  return rc;
5392 
5393 err_phy_disconnect:
5394  phy_disconnect(adapter->phydev);
5395 err_mdio_unregister:
5396  mdiobus_unregister(adapter->mii_bus);
5397 err_mdio_free_irq:
5398  kfree(adapter->mii_bus->irq);
5399 err_mdio_free:
5400  mdiobus_free(adapter->mii_bus);
5401 err_mem_free:
5402  et131x_adapter_memory_free(adapter);
5403 err_iounmap:
5404  iounmap(adapter->regs);
5405 err_free_dev:
5406  pci_dev_put(pdev);
5407  free_netdev(netdev);
5408 err_release_res:
5409  pci_release_regions(pdev);
5410 err_disable:
5411  pci_disable_device(pdev);
5412  goto out;
5413 }
5414 
5415 static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
5418  {0,}
5419 };
5420 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
5421 
5422 static struct pci_driver et131x_driver = {
5423  .name = DRIVER_NAME,
5424  .id_table = et131x_pci_table,
5425  .probe = et131x_pci_setup,
5426  .remove = __devexit_p(et131x_pci_remove),
5427  .driver.pm = ET131X_PM_OPS,
5428 };
5429 
5430 module_pci_driver(et131x_driver);