Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
natsemi.c
Go to the documentation of this file.
1 /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
2 /*
3  Written/copyright 1999-2001 by Donald Becker.
4  Portions copyright (c) 2001,2002 Sun Microsystems ([email protected])
5  Portions copyright 2001,2002 Manfred Spraul ([email protected])
6  Portions copyright 2004 Harald Welte <[email protected]>
7 
8  This software may be used and distributed according to the terms of
9  the GNU General Public License (GPL), incorporated herein by reference.
10  Drivers based on or derived from this code fall under the GPL and must
11  retain the authorship, copyright and license notice. This file is not
12  a complete program and may only be used when the entire operating
13  system is licensed under the GPL. License for under other terms may be
14  available. Contact the original author for details.
15 
16  The original author may be reached as [email protected], or at
17  Scyld Computing Corporation
18  410 Severn Ave., Suite 210
19  Annapolis MD 21403
20 
21  Support information and updates available at
22  http://www.scyld.com/network/netsemi.html
23  [link no longer provides useful info -jgarzik]
24 
25 
26  TODO:
27  * big endian support with CFG:BEM instead of cpu_to_le32
28 */
29 
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/string.h>
33 #include <linux/timer.h>
34 #include <linux/errno.h>
35 #include <linux/ioport.h>
36 #include <linux/slab.h>
37 #include <linux/interrupt.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/skbuff.h>
42 #include <linux/init.h>
43 #include <linux/spinlock.h>
44 #include <linux/ethtool.h>
45 #include <linux/delay.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/mii.h>
48 #include <linux/crc32.h>
49 #include <linux/bitops.h>
50 #include <linux/prefetch.h>
51 #include <asm/processor.h> /* Processor type for cache alignment. */
52 #include <asm/io.h>
53 #include <asm/irq.h>
54 #include <asm/uaccess.h>
55 
56 #define DRV_NAME "natsemi"
57 #define DRV_VERSION "2.1"
58 #define DRV_RELDATE "Sept 11, 2006"
59 
60 #define RX_OFFSET 2
61 
62 /* Updated to recommendations in pci-skeleton v2.03. */
63 
64 /* The user-configurable values.
65  These may be modified when a driver module is loaded.*/
66 
67 #define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
68  NETIF_MSG_LINK | \
69  NETIF_MSG_WOL | \
70  NETIF_MSG_RX_ERR | \
71  NETIF_MSG_TX_ERR)
72 static int debug = -1;
73 
74 static int mtu;
75 
76 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
77  This chip uses a 512 element hash table based on the Ethernet CRC. */
78 static const int multicast_filter_limit = 100;
79 
80 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
81  Setting to > 1518 effectively disables this feature. */
82 static int rx_copybreak;
83 
84 static int dspcfg_workaround = 1;
85 
86 /* Used to pass the media type, etc.
87  Both 'options[]' and 'full_duplex[]' should exist for driver
88  interoperability.
89  The media type is usually passed in 'options[]'.
90 */
91 #define MAX_UNITS 8 /* More are supported, limit only on options */
92 static int options[MAX_UNITS];
93 static int full_duplex[MAX_UNITS];
94 
95 /* Operational parameters that are set at compile time. */
96 
97 /* Keep the ring sizes a power of two for compile efficiency.
98  The compiler will convert <unsigned>'%'<2^N> into a bit mask.
99  Making the Tx ring too large decreases the effectiveness of channel
100  bonding and packet priority.
101  There are no ill effects from too-large receive rings. */
102 #define TX_RING_SIZE 16
103 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
104 #define RX_RING_SIZE 32
105 
106 /* Operational parameters that usually are not changed. */
107 /* Time in jiffies before concluding the transmitter is hung. */
108 #define TX_TIMEOUT (2*HZ)
109 
110 #define NATSEMI_HW_TIMEOUT 400
111 #define NATSEMI_TIMER_FREQ 5*HZ
112 #define NATSEMI_PG0_NREGS 64
113 #define NATSEMI_RFDR_NREGS 8
114 #define NATSEMI_PG1_NREGS 4
115 #define NATSEMI_NREGS (NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
116  NATSEMI_PG1_NREGS)
117 #define NATSEMI_REGS_VER 1 /* v1 added RFDR registers */
118 #define NATSEMI_REGS_SIZE (NATSEMI_NREGS * sizeof(u32))
119 
120 /* Buffer sizes:
121  * The nic writes 32-bit values, even if the upper bytes of
122  * a 32-bit value are beyond the end of the buffer.
123  */
124 #define NATSEMI_HEADERS 22 /* 2*mac,type,vlan,crc */
125 #define NATSEMI_PADDING 16 /* 2 bytes should be sufficient */
126 #define NATSEMI_LONGPKT 1518 /* limit for normal packets */
127 #define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */
128 
129 /* These identify the driver base version and may not be removed. */
130 static const char version[] __devinitconst =
131  KERN_INFO DRV_NAME " dp8381x driver, version "
132  DRV_VERSION ", " DRV_RELDATE "\n"
133  " originally by Donald Becker <[email protected]>\n"
134  " 2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135 
136 MODULE_AUTHOR("Donald Becker <[email protected]>");
137 MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
138 MODULE_LICENSE("GPL");
139 
140 module_param(mtu, int, 0);
141 module_param(debug, int, 0);
142 module_param(rx_copybreak, int, 0);
143 module_param(dspcfg_workaround, int, 0);
145 module_param_array(full_duplex, int, NULL, 0);
146 MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
147 MODULE_PARM_DESC(debug, "DP8381x default debug level");
148 MODULE_PARM_DESC(rx_copybreak,
149  "DP8381x copy breakpoint for copy-only-tiny-frames");
150 MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
152  "DP8381x: Bits 0-3: media type, bit 17: full duplex");
153 MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
154 
155 /*
156  Theory of Operation
157 
158 I. Board Compatibility
159 
160 This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
161 It also works with other chips in in the DP83810 series.
162 
163 II. Board-specific settings
164 
165 This driver requires the PCI interrupt line to be valid.
166 It honors the EEPROM-set values.
167 
168 III. Driver operation
169 
170 IIIa. Ring buffers
171 
172 This driver uses two statically allocated fixed-size descriptor lists
173 formed into rings by a branch from the final descriptor to the beginning of
174 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
175 The NatSemi design uses a 'next descriptor' pointer that the driver forms
176 into a list.
177 
178 IIIb/c. Transmit/Receive Structure
179 
180 This driver uses a zero-copy receive and transmit scheme.
181 The driver allocates full frame size skbuffs for the Rx ring buffers at
182 open() time and passes the skb->data field to the chip as receive data
183 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
184 a fresh skbuff is allocated and the frame is copied to the new skbuff.
185 When the incoming frame is larger, the skbuff is passed directly up the
186 protocol stack. Buffers consumed this way are replaced by newly allocated
187 skbuffs in a later phase of receives.
188 
189 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
190 using a full-sized skbuff for small frames vs. the copying costs of larger
191 frames. New boards are typically used in generously configured machines
192 and the underfilled buffers have negligible impact compared to the benefit of
193 a single allocation size, so the default value of zero results in never
194 copying packets. When copying is done, the cost is usually mitigated by using
195 a combined copy/checksum routine. Copying also preloads the cache, which is
196 most useful with small frames.
197 
198 A subtle aspect of the operation is that unaligned buffers are not permitted
199 by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
200 longword aligned for further processing. On copies frames are put into the
201 skbuff at an offset of "+2", 16-byte aligning the IP header.
202 
203 IIId. Synchronization
204 
205 Most operations are synchronized on the np->lock irq spinlock, except the
206 receive and transmit paths which are synchronised using a combination of
207 hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
208 
209 IVb. References
210 
211 http://www.scyld.com/expert/100mbps.html
212 http://www.scyld.com/expert/NWay.html
213 Datasheet is available from:
214 http://www.national.com/pf/DP/DP83815.html
215 
216 IVc. Errata
217 
218 None characterised.
219 */
220 
221 
222 
223 /*
224  * Support for fibre connections on Am79C874:
225  * This phy needs a special setup when connected to a fibre cable.
226  * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
227  */
228 #define PHYID_AM79C874 0x0022561b
229 
230 enum {
231  MII_MCTRL = 0x15, /* mode control register */
232  MII_FX_SEL = 0x0001, /* 100BASE-FX (fiber) */
233  MII_EN_SCRM = 0x0004, /* enable scrambler (tp) */
234 };
235 
236 enum {
238 };
239 
240 /* array of board data directly indexed by pci_tbl[x].driver_data */
241 static struct {
242  const char *name;
243  unsigned long flags;
244  unsigned int eeprom_size;
245 } natsemi_pci_info[] __devinitdata = {
246  { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
247  { "NatSemi DP8381[56]", 0, 24 },
248 };
249 
250 static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
251  { PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
252  { PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253  { } /* terminate list */
254 };
255 MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
256 
257 /* Offsets to the device registers.
258  Unlike software-only systems, device drivers interact with complex hardware.
259  It's not useful to define symbolic names for every register bit in the
260  device.
261 */
263  ChipCmd = 0x00,
264  ChipConfig = 0x04,
265  EECtrl = 0x08,
266  PCIBusCfg = 0x0C,
267  IntrStatus = 0x10,
268  IntrMask = 0x14,
269  IntrEnable = 0x18,
270  IntrHoldoff = 0x1C, /* DP83816 only */
271  TxRingPtr = 0x20,
272  TxConfig = 0x24,
273  RxRingPtr = 0x30,
274  RxConfig = 0x34,
275  ClkRun = 0x3C,
276  WOLCmd = 0x40,
277  PauseCmd = 0x44,
278  RxFilterAddr = 0x48,
279  RxFilterData = 0x4C,
280  BootRomAddr = 0x50,
281  BootRomData = 0x54,
282  SiliconRev = 0x58,
283  StatsCtrl = 0x5C,
284  StatsData = 0x60,
285  RxPktErrs = 0x60,
286  RxMissed = 0x68,
287  RxCRCErrs = 0x64,
288  BasicControl = 0x80,
289  BasicStatus = 0x84,
290  AnegAdv = 0x90,
291  AnegPeer = 0x94,
292  PhyStatus = 0xC0,
293  MIntrCtrl = 0xC4,
294  MIntrStatus = 0xC8,
295  PhyCtrl = 0xE4,
296 
297  /* These are from the spec, around page 78... on a separate table.
298  * The meaning of these registers depend on the value of PGSEL. */
299  PGSEL = 0xCC,
300  PMDCSR = 0xE4,
301  TSTDAT = 0xFC,
302  DSPCFG = 0xF4,
303  SDCFG = 0xF8
304 };
305 /* the values for the 'magic' registers above (PGSEL=1) */
306 #define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */
307 #define TSTDAT_VAL 0x0
308 #define DSPCFG_VAL 0x5040
309 #define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */
310 #define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */
311 #define DSPCFG_COEF 0x1000 /* see coefficient (in TSTDAT) bit in DSPCFG */
312 #define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */
313 
314 /* misc PCI space registers */
316  PCIPM = 0x44,
317 };
318 
320  ChipReset = 0x100,
321  RxReset = 0x20,
322  TxReset = 0x10,
323  RxOff = 0x08,
324  RxOn = 0x04,
325  TxOff = 0x02,
326  TxOn = 0x01,
327 };
328 
330  CfgPhyDis = 0x200,
331  CfgPhyRst = 0x400,
332  CfgExtPhy = 0x1000,
333  CfgAnegEnable = 0x2000,
334  CfgAneg100 = 0x4000,
335  CfgAnegFull = 0x8000,
336  CfgAnegDone = 0x8000000,
337  CfgFullDuplex = 0x20000000,
338  CfgSpeed100 = 0x40000000,
339  CfgLink = 0x80000000,
340 };
341 
343  EE_ShiftClk = 0x04,
344  EE_DataIn = 0x01,
346  EE_DataOut = 0x02,
347  MII_Data = 0x10,
348  MII_Write = 0x20,
349  MII_ShiftClk = 0x40,
350 };
351 
354 };
355 
356 /* Bits in the interrupt status/mask registers. */
358  IntrRxDone = 0x0001,
359  IntrRxIntr = 0x0002,
360  IntrRxErr = 0x0004,
361  IntrRxEarly = 0x0008,
362  IntrRxIdle = 0x0010,
363  IntrRxOverrun = 0x0020,
364  IntrTxDone = 0x0040,
365  IntrTxIntr = 0x0080,
366  IntrTxErr = 0x0100,
367  IntrTxIdle = 0x0200,
368  IntrTxUnderrun = 0x0400,
369  StatsMax = 0x0800,
370  SWInt = 0x1000,
371  WOLPkt = 0x2000,
372  LinkChange = 0x4000,
373  IntrHighBits = 0x8000,
374  RxStatusFIFOOver = 0x10000,
375  IntrPCIErr = 0xf00000,
376  RxResetDone = 0x1000000,
377  TxResetDone = 0x2000000,
379 };
380 
381 /*
382  * Default Interrupts:
383  * Rx OK, Rx Packet Error, Rx Overrun,
384  * Tx OK, Tx Packet Error, Tx Underrun,
385  * MIB Service, Phy Interrupt, High Bits,
386  * Rx Status FIFO overrun,
387  * Received Target Abort, Received Master Abort,
388  * Signalled System Error, Received Parity Error
389  */
390 #define DEFAULT_INTR 0x00f1cd65
391 
393  TxDrthMask = 0x3f,
394  TxFlthMask = 0x3f00,
395  TxMxdmaMask = 0x700000,
396  TxMxdma_512 = 0x0,
397  TxMxdma_4 = 0x100000,
398  TxMxdma_8 = 0x200000,
399  TxMxdma_16 = 0x300000,
400  TxMxdma_32 = 0x400000,
401  TxMxdma_64 = 0x500000,
402  TxMxdma_128 = 0x600000,
403  TxMxdma_256 = 0x700000,
404  TxCollRetry = 0x800000,
405  TxAutoPad = 0x10000000,
406  TxMacLoop = 0x20000000,
407  TxHeartIgn = 0x40000000,
408  TxCarrierIgn = 0x80000000
409 };
410 
411 /*
412  * Tx Configuration:
413  * - 256 byte DMA burst length
414  * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
415  * - 64 bytes initial drain threshold (i.e. begin actual transmission
416  * when 64 byte are in the fifo)
417  * - on tx underruns, increase drain threshold by 64.
418  * - at most use a drain threshold of 1472 bytes: The sum of the fill
419  * threshold and the drain threshold must be less than 2016 bytes.
420  *
421  */
422 #define TX_FLTH_VAL ((512/32) << 8)
423 #define TX_DRTH_VAL_START (64/32)
424 #define TX_DRTH_VAL_INC 2
425 #define TX_DRTH_VAL_LIMIT (1472/32)
426 
428  RxDrthMask = 0x3e,
429  RxMxdmaMask = 0x700000,
430  RxMxdma_512 = 0x0,
431  RxMxdma_4 = 0x100000,
432  RxMxdma_8 = 0x200000,
433  RxMxdma_16 = 0x300000,
434  RxMxdma_32 = 0x400000,
435  RxMxdma_64 = 0x500000,
436  RxMxdma_128 = 0x600000,
437  RxMxdma_256 = 0x700000,
438  RxAcceptLong = 0x8000000,
439  RxAcceptTx = 0x10000000,
440  RxAcceptRunt = 0x40000000,
441  RxAcceptErr = 0x80000000
442 };
443 #define RX_DRTH_VAL (128/8)
444 
446  PMEEnable = 0x100,
447  PMEStatus = 0x8000,
448 };
449 
451  WakePhy = 0x1,
452  WakeUnicast = 0x2,
455  WakeArp = 0x10,
456  WakePMatch0 = 0x20,
457  WakePMatch1 = 0x40,
458  WakePMatch2 = 0x80,
459  WakePMatch3 = 0x100,
460  WakeMagic = 0x200,
462  SecureHack = 0x100000,
463  WokePhy = 0x400000,
464  WokeUnicast = 0x800000,
465  WokeMulticast = 0x1000000,
466  WokeBroadcast = 0x2000000,
467  WokeArp = 0x4000000,
468  WokePMatch0 = 0x8000000,
469  WokePMatch1 = 0x10000000,
470  WokePMatch2 = 0x20000000,
471  WokePMatch3 = 0x40000000,
472  WokeMagic = 0x80000000,
474 };
475 
478  AcceptMulticast = 0x00200000,
479  AcceptMyPhys = 0x08000000,
480  AcceptAllPhys = 0x10000000,
481  AcceptAllMulticast = 0x20000000,
482  AcceptBroadcast = 0x40000000,
483  RxFilterEnable = 0x80000000
484 };
485 
487  StatsWarn = 0x1,
488  StatsFreeze = 0x2,
489  StatsClear = 0x4,
490  StatsStrobe = 0x8,
491 };
492 
494  MICRIntEn = 0x2,
495 };
496 
498  PhyAddrMask = 0x1f,
499 };
500 
501 #define PHY_ADDR_NONE 32
502 #define PHY_ADDR_INTERNAL 1
503 
504 /* values we might find in the silicon revision register */
505 #define SRR_DP83815_C 0x0302
506 #define SRR_DP83815_D 0x0403
507 #define SRR_DP83816_A4 0x0504
508 #define SRR_DP83816_A5 0x0505
509 
510 /* The Rx and Tx buffer descriptors. */
511 /* Note that using only 32 bit fields simplifies conversion to big-endian
512  architectures. */
513 struct netdev_desc {
518 };
519 
520 /* Bits in network_desc.status */
522  DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
523  DescNoCRC=0x10000000, DescPktOK=0x08000000,
525 
526  DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
527  DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
528  DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
529  DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
530 
531  DescRxAbort=0x04000000, DescRxOver=0x02000000,
532  DescRxDest=0x01800000, DescRxLong=0x00400000,
533  DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
534  DescRxCRC=0x00080000, DescRxAlign=0x00040000,
535  DescRxLoop=0x00020000, DesRxColl=0x00010000,
536 };
537 
538 struct netdev_private {
539  /* Descriptor rings first for alignment */
541  struct netdev_desc *rx_ring;
542  struct netdev_desc *tx_ring;
543  /* The addresses of receive-in-place skbuffs */
544  struct sk_buff *rx_skbuff[RX_RING_SIZE];
546  /* address of a sent-in-place packet/buffer, for later free() */
547  struct sk_buff *tx_skbuff[TX_RING_SIZE];
549  struct net_device *dev;
550  void __iomem *ioaddr;
551  struct napi_struct napi;
552  /* Media monitoring timer */
553  struct timer_list timer;
554  /* Frequently used values: keep some adjacent for cache effect */
555  struct pci_dev *pci_dev;
557  /* Producer/consumer ring indices */
558  unsigned int cur_rx, dirty_rx;
559  unsigned int cur_tx, dirty_tx;
560  /* Based on MTU+slack. */
561  unsigned int rx_buf_sz;
562  int oom;
563  /* Interrupt status */
565  /* Do not touch the nic registers */
567  /* Don't pay attention to the reported link state. */
569  /* external phy that is used: only valid if dev->if_port != PORT_TP */
570  int mii;
572  unsigned int full_duplex;
573  /* Rx filter */
576  /* FIFO and PCI burst thresholds */
578  /* original contents of ClkRun register */
580  /* silicon revision */
582  /* expected DSPCFG value */
585  /* parms saved in ethtool format */
586  u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */
587  u8 duplex; /* Duplex, half or full */
588  u8 autoneg; /* Autonegotiation enabled */
589  /* MII transceiver section */
591  unsigned int iosize;
594  /* EEPROM data */
596 };
597 
598 static void move_int_phy(struct net_device *dev, int addr);
599 static int eeprom_read(void __iomem *ioaddr, int location);
600 static int mdio_read(struct net_device *dev, int reg);
601 static void mdio_write(struct net_device *dev, int reg, u16 data);
602 static void init_phy_fixup(struct net_device *dev);
603 static int miiport_read(struct net_device *dev, int phy_id, int reg);
604 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
605 static int find_mii(struct net_device *dev);
606 static void natsemi_reset(struct net_device *dev);
607 static void natsemi_reload_eeprom(struct net_device *dev);
608 static void natsemi_stop_rxtx(struct net_device *dev);
609 static int netdev_open(struct net_device *dev);
610 static void do_cable_magic(struct net_device *dev);
611 static void undo_cable_magic(struct net_device *dev);
612 static void check_link(struct net_device *dev);
613 static void netdev_timer(unsigned long data);
614 static void dump_ring(struct net_device *dev);
615 static void ns_tx_timeout(struct net_device *dev);
616 static int alloc_ring(struct net_device *dev);
617 static void refill_rx(struct net_device *dev);
618 static void init_ring(struct net_device *dev);
619 static void drain_tx(struct net_device *dev);
620 static void drain_ring(struct net_device *dev);
621 static void free_ring(struct net_device *dev);
622 static void reinit_ring(struct net_device *dev);
623 static void init_registers(struct net_device *dev);
624 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
625 static irqreturn_t intr_handler(int irq, void *dev_instance);
626 static void netdev_error(struct net_device *dev, int intr_status);
627 static int natsemi_poll(struct napi_struct *napi, int budget);
628 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
629 static void netdev_tx_done(struct net_device *dev);
630 static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
631 #ifdef CONFIG_NET_POLL_CONTROLLER
632 static void natsemi_poll_controller(struct net_device *dev);
633 #endif
634 static void __set_rx_mode(struct net_device *dev);
635 static void set_rx_mode(struct net_device *dev);
636 static void __get_stats(struct net_device *dev);
637 static struct net_device_stats *get_stats(struct net_device *dev);
638 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
639 static int netdev_set_wol(struct net_device *dev, u32 newval);
640 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
641 static int netdev_set_sopass(struct net_device *dev, u8 *newval);
642 static int netdev_get_sopass(struct net_device *dev, u8 *data);
643 static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
644 static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
645 static void enable_wol_mode(struct net_device *dev, int enable_intr);
646 static int netdev_close(struct net_device *dev);
647 static int netdev_get_regs(struct net_device *dev, u8 *buf);
648 static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
649 static const struct ethtool_ops ethtool_ops;
650 
651 #define NATSEMI_ATTR(_name) \
652 static ssize_t natsemi_show_##_name(struct device *dev, \
653  struct device_attribute *attr, char *buf); \
654  static ssize_t natsemi_set_##_name(struct device *dev, \
655  struct device_attribute *attr, \
656  const char *buf, size_t count); \
657  static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
658 
659 #define NATSEMI_CREATE_FILE(_dev, _name) \
660  device_create_file(&_dev->dev, &dev_attr_##_name)
661 #define NATSEMI_REMOVE_FILE(_dev, _name) \
662  device_remove_file(&_dev->dev, &dev_attr_##_name)
663 
664 NATSEMI_ATTR(dspcfg_workaround);
665 
666 static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
667  struct device_attribute *attr,
668  char *buf)
669 {
670  struct netdev_private *np = netdev_priv(to_net_dev(dev));
671 
672  return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
673 }
674 
675 static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
676  struct device_attribute *attr,
677  const char *buf, size_t count)
678 {
679  struct netdev_private *np = netdev_priv(to_net_dev(dev));
680  int new_setting;
681  unsigned long flags;
682 
683  /* Find out the new setting */
684  if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
685  new_setting = 1;
686  else if (!strncmp("off", buf, count - 1) ||
687  !strncmp("0", buf, count - 1))
688  new_setting = 0;
689  else
690  return count;
691 
692  spin_lock_irqsave(&np->lock, flags);
693 
694  np->dspcfg_workaround = new_setting;
695 
696  spin_unlock_irqrestore(&np->lock, flags);
697 
698  return count;
699 }
700 
701 static inline void __iomem *ns_ioaddr(struct net_device *dev)
702 {
703  struct netdev_private *np = netdev_priv(dev);
704 
705  return np->ioaddr;
706 }
707 
708 static inline void natsemi_irq_enable(struct net_device *dev)
709 {
710  writel(1, ns_ioaddr(dev) + IntrEnable);
711  readl(ns_ioaddr(dev) + IntrEnable);
712 }
713 
714 static inline void natsemi_irq_disable(struct net_device *dev)
715 {
716  writel(0, ns_ioaddr(dev) + IntrEnable);
717  readl(ns_ioaddr(dev) + IntrEnable);
718 }
719 
720 static void move_int_phy(struct net_device *dev, int addr)
721 {
722  struct netdev_private *np = netdev_priv(dev);
723  void __iomem *ioaddr = ns_ioaddr(dev);
724  int target = 31;
725 
726  /*
727  * The internal phy is visible on the external mii bus. Therefore we must
728  * move it away before we can send commands to an external phy.
729  * There are two addresses we must avoid:
730  * - the address on the external phy that is used for transmission.
731  * - the address that we want to access. User space can access phys
732  * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the
733  * phy that is used for transmission.
734  */
735 
736  if (target == addr)
737  target--;
738  if (target == np->phy_addr_external)
739  target--;
740  writew(target, ioaddr + PhyCtrl);
741  readw(ioaddr + PhyCtrl);
742  udelay(1);
743 }
744 
745 static void __devinit natsemi_init_media (struct net_device *dev)
746 {
747  struct netdev_private *np = netdev_priv(dev);
748  u32 tmp;
749 
750  if (np->ignore_phy)
751  netif_carrier_on(dev);
752  else
753  netif_carrier_off(dev);
754 
755  /* get the initial settings from hardware */
756  tmp = mdio_read(dev, MII_BMCR);
757  np->speed = (tmp & BMCR_SPEED100)? SPEED_100 : SPEED_10;
758  np->duplex = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF;
760  np->advertising= mdio_read(dev, MII_ADVERTISE);
761 
762  if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
763  netif_msg_probe(np)) {
764  printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
765  "10%s %s duplex.\n",
766  pci_name(np->pci_dev),
767  (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
768  "enabled, advertise" : "disabled, force",
769  (np->advertising &
771  "0" : "",
772  (np->advertising &
774  "full" : "half");
775  }
776  if (netif_msg_probe(np))
778  "natsemi %s: Transceiver status %#04x advertising %#04x.\n",
779  pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
780  np->advertising);
781 
782 }
783 
784 static const struct net_device_ops natsemi_netdev_ops = {
785  .ndo_open = netdev_open,
786  .ndo_stop = netdev_close,
787  .ndo_start_xmit = start_tx,
788  .ndo_get_stats = get_stats,
789  .ndo_set_rx_mode = set_rx_mode,
790  .ndo_change_mtu = natsemi_change_mtu,
791  .ndo_do_ioctl = netdev_ioctl,
792  .ndo_tx_timeout = ns_tx_timeout,
793  .ndo_set_mac_address = eth_mac_addr,
794  .ndo_validate_addr = eth_validate_addr,
795 #ifdef CONFIG_NET_POLL_CONTROLLER
796  .ndo_poll_controller = natsemi_poll_controller,
797 #endif
798 };
799 
800 static int __devinit natsemi_probe1 (struct pci_dev *pdev,
801  const struct pci_device_id *ent)
802 {
803  struct net_device *dev;
804  struct netdev_private *np;
805  int i, option, irq, chip_idx = ent->driver_data;
806  static int find_cnt = -1;
807  resource_size_t iostart;
808  unsigned long iosize;
809  void __iomem *ioaddr;
810  const int pcibar = 1; /* PCI base address register */
811  int prev_eedata;
812  u32 tmp;
813 
814 /* when built into the kernel, we only print version if device is found */
815 #ifndef MODULE
816  static int printed_version;
817  if (!printed_version++)
818  printk(version);
819 #endif
820 
821  i = pci_enable_device(pdev);
822  if (i) return i;
823 
824  /* natsemi has a non-standard PM control register
825  * in PCI config space. Some boards apparently need
826  * to be brought to D0 in this manner.
827  */
828  pci_read_config_dword(pdev, PCIPM, &tmp);
829  if (tmp & PCI_PM_CTRL_STATE_MASK) {
830  /* D0 state, disable PME assertion */
831  u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
832  pci_write_config_dword(pdev, PCIPM, newtmp);
833  }
834 
835  find_cnt++;
836  iostart = pci_resource_start(pdev, pcibar);
837  iosize = pci_resource_len(pdev, pcibar);
838  irq = pdev->irq;
839 
840  pci_set_master(pdev);
841 
842  dev = alloc_etherdev(sizeof (struct netdev_private));
843  if (!dev)
844  return -ENOMEM;
845  SET_NETDEV_DEV(dev, &pdev->dev);
846 
847  i = pci_request_regions(pdev, DRV_NAME);
848  if (i)
849  goto err_pci_request_regions;
850 
851  ioaddr = ioremap(iostart, iosize);
852  if (!ioaddr) {
853  i = -ENOMEM;
854  goto err_ioremap;
855  }
856 
857  /* Work around the dropped serial bit. */
858  prev_eedata = eeprom_read(ioaddr, 6);
859  for (i = 0; i < 3; i++) {
860  int eedata = eeprom_read(ioaddr, i + 7);
861  dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
862  dev->dev_addr[i*2+1] = eedata >> 7;
863  prev_eedata = eedata;
864  }
865 
866  /* Store MAC Address in perm_addr */
867  memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
868 
869  np = netdev_priv(dev);
870  np->ioaddr = ioaddr;
871 
872  netif_napi_add(dev, &np->napi, natsemi_poll, 64);
873  np->dev = dev;
874 
875  np->pci_dev = pdev;
876  pci_set_drvdata(pdev, dev);
877  np->iosize = iosize;
878  spin_lock_init(&np->lock);
879  np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
880  np->hands_off = 0;
881  np->intr_status = 0;
882  np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
883  if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
884  np->ignore_phy = 1;
885  else
886  np->ignore_phy = 0;
888 
889  /* Initial port:
890  * - If configured to ignore the PHY set up for external.
891  * - If the nic was configured to use an external phy and if find_mii
892  * finds a phy: use external port, first phy that replies.
893  * - Otherwise: internal port.
894  * Note that the phy address for the internal phy doesn't matter:
895  * The address would be used to access a phy over the mii bus, but
896  * the internal phy is accessed through mapped registers.
897  */
898  if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
899  dev->if_port = PORT_MII;
900  else
901  dev->if_port = PORT_TP;
902  /* Reset the chip to erase previous misconfiguration. */
903  natsemi_reload_eeprom(dev);
904  natsemi_reset(dev);
905 
906  if (dev->if_port != PORT_TP) {
907  np->phy_addr_external = find_mii(dev);
908  /* If we're ignoring the PHY it doesn't matter if we can't
909  * find one. */
910  if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
911  dev->if_port = PORT_TP;
913  }
914  } else {
916  }
917 
918  option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
919  /* The lower four bits are the media type. */
920  if (option) {
921  if (option & 0x200)
922  np->full_duplex = 1;
923  if (option & 15)
925  "natsemi %s: ignoring user supplied media type %d",
926  pci_name(np->pci_dev), option & 15);
927  }
928  if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
929  np->full_duplex = 1;
930 
931  dev->netdev_ops = &natsemi_netdev_ops;
932  dev->watchdog_timeo = TX_TIMEOUT;
933 
935 
936  if (mtu)
937  dev->mtu = mtu;
938 
939  natsemi_init_media(dev);
940 
941  /* save the silicon revision for later querying */
942  np->srr = readl(ioaddr + SiliconRev);
943  if (netif_msg_hw(np))
944  printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
945  pci_name(np->pci_dev), np->srr);
946 
947  i = register_netdev(dev);
948  if (i)
949  goto err_register_netdev;
951  if (i)
952  goto err_create_file;
953 
954  if (netif_msg_drv(np)) {
955  printk(KERN_INFO "natsemi %s: %s at %#08llx "
956  "(%s), %pM, IRQ %d",
957  dev->name, natsemi_pci_info[chip_idx].name,
958  (unsigned long long)iostart, pci_name(np->pci_dev),
959  dev->dev_addr, irq);
960  if (dev->if_port == PORT_TP)
961  printk(", port TP.\n");
962  else if (np->ignore_phy)
963  printk(", port MII, ignoring PHY\n");
964  else
965  printk(", port MII, phy ad %d.\n", np->phy_addr_external);
966  }
967  return 0;
968 
969  err_create_file:
970  unregister_netdev(dev);
971 
972  err_register_netdev:
973  iounmap(ioaddr);
974 
975  err_ioremap:
976  pci_release_regions(pdev);
977  pci_set_drvdata(pdev, NULL);
978 
979  err_pci_request_regions:
980  free_netdev(dev);
981  return i;
982 }
983 
984 
985 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
986  The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
987 
988 /* Delay between EEPROM clock transitions.
989  No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
990  a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
991  made udelay() unreliable.
992  The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
993  deprecated.
994 */
995 #define eeprom_delay(ee_addr) readl(ee_addr)
996 
997 #define EE_Write0 (EE_ChipSelect)
998 #define EE_Write1 (EE_ChipSelect | EE_DataIn)
999 
1000 /* The EEPROM commands include the alway-set leading bit. */
1002  EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
1003 };
1004 
1005 static int eeprom_read(void __iomem *addr, int location)
1006 {
1007  int i;
1008  int retval = 0;
1009  void __iomem *ee_addr = addr + EECtrl;
1010  int read_cmd = location | EE_ReadCmd;
1011 
1012  writel(EE_Write0, ee_addr);
1013 
1014  /* Shift the read command bits out. */
1015  for (i = 10; i >= 0; i--) {
1016  short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1017  writel(dataval, ee_addr);
1018  eeprom_delay(ee_addr);
1019  writel(dataval | EE_ShiftClk, ee_addr);
1020  eeprom_delay(ee_addr);
1021  }
1022  writel(EE_ChipSelect, ee_addr);
1023  eeprom_delay(ee_addr);
1024 
1025  for (i = 0; i < 16; i++) {
1026  writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1027  eeprom_delay(ee_addr);
1028  retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1029  writel(EE_ChipSelect, ee_addr);
1030  eeprom_delay(ee_addr);
1031  }
1032 
1033  /* Terminate the EEPROM access. */
1034  writel(EE_Write0, ee_addr);
1035  writel(0, ee_addr);
1036  return retval;
1037 }
1038 
1039 /* MII transceiver control section.
1040  * The 83815 series has an internal transceiver, and we present the
1041  * internal management registers as if they were MII connected.
1042  * External Phy registers are referenced through the MII interface.
1043  */
1044 
1045 /* clock transitions >= 20ns (25MHz)
1046  * One readl should be good to PCI @ 100MHz
1047  */
1048 #define mii_delay(ioaddr) readl(ioaddr + EECtrl)
1049 
1050 static int mii_getbit (struct net_device *dev)
1051 {
1052  int data;
1053  void __iomem *ioaddr = ns_ioaddr(dev);
1054 
1055  writel(MII_ShiftClk, ioaddr + EECtrl);
1056  data = readl(ioaddr + EECtrl);
1057  writel(0, ioaddr + EECtrl);
1058  mii_delay(ioaddr);
1059  return (data & MII_Data)? 1 : 0;
1060 }
1061 
1062 static void mii_send_bits (struct net_device *dev, u32 data, int len)
1063 {
1064  u32 i;
1065  void __iomem *ioaddr = ns_ioaddr(dev);
1066 
1067  for (i = (1 << (len-1)); i; i >>= 1)
1068  {
1069  u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1070  writel(mdio_val, ioaddr + EECtrl);
1071  mii_delay(ioaddr);
1072  writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1073  mii_delay(ioaddr);
1074  }
1075  writel(0, ioaddr + EECtrl);
1076  mii_delay(ioaddr);
1077 }
1078 
1079 static int miiport_read(struct net_device *dev, int phy_id, int reg)
1080 {
1081  u32 cmd;
1082  int i;
1083  u32 retval = 0;
1084 
1085  /* Ensure sync */
1086  mii_send_bits (dev, 0xffffffff, 32);
1087  /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1088  /* ST,OP = 0110'b for read operation */
1089  cmd = (0x06 << 10) | (phy_id << 5) | reg;
1090  mii_send_bits (dev, cmd, 14);
1091  /* Turnaround */
1092  if (mii_getbit (dev))
1093  return 0;
1094  /* Read data */
1095  for (i = 0; i < 16; i++) {
1096  retval <<= 1;
1097  retval |= mii_getbit (dev);
1098  }
1099  /* End cycle */
1100  mii_getbit (dev);
1101  return retval;
1102 }
1103 
1104 static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1105 {
1106  u32 cmd;
1107 
1108  /* Ensure sync */
1109  mii_send_bits (dev, 0xffffffff, 32);
1110  /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1111  /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1112  cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1113  mii_send_bits (dev, cmd, 32);
1114  /* End cycle */
1115  mii_getbit (dev);
1116 }
1117 
1118 static int mdio_read(struct net_device *dev, int reg)
1119 {
1120  struct netdev_private *np = netdev_priv(dev);
1121  void __iomem *ioaddr = ns_ioaddr(dev);
1122 
1123  /* The 83815 series has two ports:
1124  * - an internal transceiver
1125  * - an external mii bus
1126  */
1127  if (dev->if_port == PORT_TP)
1128  return readw(ioaddr+BasicControl+(reg<<2));
1129  else
1130  return miiport_read(dev, np->phy_addr_external, reg);
1131 }
1132 
1133 static void mdio_write(struct net_device *dev, int reg, u16 data)
1134 {
1135  struct netdev_private *np = netdev_priv(dev);
1136  void __iomem *ioaddr = ns_ioaddr(dev);
1137 
1138  /* The 83815 series has an internal transceiver; handle separately */
1139  if (dev->if_port == PORT_TP)
1140  writew(data, ioaddr+BasicControl+(reg<<2));
1141  else
1142  miiport_write(dev, np->phy_addr_external, reg, data);
1143 }
1144 
1145 static void init_phy_fixup(struct net_device *dev)
1146 {
1147  struct netdev_private *np = netdev_priv(dev);
1148  void __iomem *ioaddr = ns_ioaddr(dev);
1149  int i;
1150  u32 cfg;
1151  u16 tmp;
1152 
1153  /* restore stuff lost when power was out */
1154  tmp = mdio_read(dev, MII_BMCR);
1155  if (np->autoneg == AUTONEG_ENABLE) {
1156  /* renegotiate if something changed */
1157  if ((tmp & BMCR_ANENABLE) == 0 ||
1158  np->advertising != mdio_read(dev, MII_ADVERTISE))
1159  {
1160  /* turn on autonegotiation and force negotiation */
1161  tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1162  mdio_write(dev, MII_ADVERTISE, np->advertising);
1163  }
1164  } else {
1165  /* turn off auto negotiation, set speed and duplexity */
1166  tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1167  if (np->speed == SPEED_100)
1168  tmp |= BMCR_SPEED100;
1169  if (np->duplex == DUPLEX_FULL)
1170  tmp |= BMCR_FULLDPLX;
1171  /*
1172  * Note: there is no good way to inform the link partner
1173  * that our capabilities changed. The user has to unplug
1174  * and replug the network cable after some changes, e.g.
1175  * after switching from 10HD, autoneg off to 100 HD,
1176  * autoneg off.
1177  */
1178  }
1179  mdio_write(dev, MII_BMCR, tmp);
1180  readl(ioaddr + ChipConfig);
1181  udelay(1);
1182 
1183  /* find out what phy this is */
1184  np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1185  + mdio_read(dev, MII_PHYSID2);
1186 
1187  /* handle external phys here */
1188  switch (np->mii) {
1189  case PHYID_AM79C874:
1190  /* phy specific configuration for fibre/tp operation */
1191  tmp = mdio_read(dev, MII_MCTRL);
1192  tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1193  if (dev->if_port == PORT_FIBRE)
1194  tmp |= MII_FX_SEL;
1195  else
1196  tmp |= MII_EN_SCRM;
1197  mdio_write(dev, MII_MCTRL, tmp);
1198  break;
1199  default:
1200  break;
1201  }
1202  cfg = readl(ioaddr + ChipConfig);
1203  if (cfg & CfgExtPhy)
1204  return;
1205 
1206  /* On page 78 of the spec, they recommend some settings for "optimum
1207  performance" to be done in sequence. These settings optimize some
1208  of the 100Mbit autodetection circuitry. They say we only want to
1209  do this for rev C of the chip, but engineers at NSC (Bradley
1210  Kennedy) recommends always setting them. If you don't, you get
1211  errors on some autonegotiations that make the device unusable.
1212 
1213  It seems that the DSP needs a few usec to reinitialize after
1214  the start of the phy. Just retry writing these values until they
1215  stick.
1216  */
1217  for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1218 
1219  int dspcfg;
1220  writew(1, ioaddr + PGSEL);
1221  writew(PMDCSR_VAL, ioaddr + PMDCSR);
1222  writew(TSTDAT_VAL, ioaddr + TSTDAT);
1223  np->dspcfg = (np->srr <= SRR_DP83815_C)?
1224  DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1225  writew(np->dspcfg, ioaddr + DSPCFG);
1226  writew(SDCFG_VAL, ioaddr + SDCFG);
1227  writew(0, ioaddr + PGSEL);
1228  readl(ioaddr + ChipConfig);
1229  udelay(10);
1230 
1231  writew(1, ioaddr + PGSEL);
1232  dspcfg = readw(ioaddr + DSPCFG);
1233  writew(0, ioaddr + PGSEL);
1234  if (np->dspcfg == dspcfg)
1235  break;
1236  }
1237 
1238  if (netif_msg_link(np)) {
1239  if (i==NATSEMI_HW_TIMEOUT) {
1241  "%s: DSPCFG mismatch after retrying for %d usec.\n",
1242  dev->name, i*10);
1243  } else {
1245  "%s: DSPCFG accepted after %d usec.\n",
1246  dev->name, i*10);
1247  }
1248  }
1249  /*
1250  * Enable PHY Specific event based interrupts. Link state change
1251  * and Auto-Negotiation Completion are among the affected.
1252  * Read the intr status to clear it (needed for wake events).
1253  */
1254  readw(ioaddr + MIntrStatus);
1255  writew(MICRIntEn, ioaddr + MIntrCtrl);
1256 }
1257 
1258 static int switch_port_external(struct net_device *dev)
1259 {
1260  struct netdev_private *np = netdev_priv(dev);
1261  void __iomem *ioaddr = ns_ioaddr(dev);
1262  u32 cfg;
1263 
1264  cfg = readl(ioaddr + ChipConfig);
1265  if (cfg & CfgExtPhy)
1266  return 0;
1267 
1268  if (netif_msg_link(np)) {
1269  printk(KERN_INFO "%s: switching to external transceiver.\n",
1270  dev->name);
1271  }
1272 
1273  /* 1) switch back to external phy */
1274  writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1275  readl(ioaddr + ChipConfig);
1276  udelay(1);
1277 
1278  /* 2) reset the external phy: */
1279  /* resetting the external PHY has been known to cause a hub supplying
1280  * power over Ethernet to kill the power. We don't want to kill
1281  * power to this computer, so we avoid resetting the phy.
1282  */
1283 
1284  /* 3) reinit the phy fixup, it got lost during power down. */
1285  move_int_phy(dev, np->phy_addr_external);
1286  init_phy_fixup(dev);
1287 
1288  return 1;
1289 }
1290 
1291 static int switch_port_internal(struct net_device *dev)
1292 {
1293  struct netdev_private *np = netdev_priv(dev);
1294  void __iomem *ioaddr = ns_ioaddr(dev);
1295  int i;
1296  u32 cfg;
1297  u16 bmcr;
1298 
1299  cfg = readl(ioaddr + ChipConfig);
1300  if (!(cfg &CfgExtPhy))
1301  return 0;
1302 
1303  if (netif_msg_link(np)) {
1304  printk(KERN_INFO "%s: switching to internal transceiver.\n",
1305  dev->name);
1306  }
1307  /* 1) switch back to internal phy: */
1308  cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1309  writel(cfg, ioaddr + ChipConfig);
1310  readl(ioaddr + ChipConfig);
1311  udelay(1);
1312 
1313  /* 2) reset the internal phy: */
1314  bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1315  writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1316  readl(ioaddr + ChipConfig);
1317  udelay(10);
1318  for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1319  bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1320  if (!(bmcr & BMCR_RESET))
1321  break;
1322  udelay(10);
1323  }
1324  if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1326  "%s: phy reset did not complete in %d usec.\n",
1327  dev->name, i*10);
1328  }
1329  /* 3) reinit the phy fixup, it got lost during power down. */
1330  init_phy_fixup(dev);
1331 
1332  return 1;
1333 }
1334 
1335 /* Scan for a PHY on the external mii bus.
1336  * There are two tricky points:
1337  * - Do not scan while the internal phy is enabled. The internal phy will
1338  * crash: e.g. reads from the DSPCFG register will return odd values and
1339  * the nasty random phy reset code will reset the nic every few seconds.
1340  * - The internal phy must be moved around, an external phy could
1341  * have the same address as the internal phy.
1342  */
1343 static int find_mii(struct net_device *dev)
1344 {
1345  struct netdev_private *np = netdev_priv(dev);
1346  int tmp;
1347  int i;
1348  int did_switch;
1349 
1350  /* Switch to external phy */
1351  did_switch = switch_port_external(dev);
1352 
1353  /* Scan the possible phy addresses:
1354  *
1355  * PHY address 0 means that the phy is in isolate mode. Not yet
1356  * supported due to lack of test hardware. User space should
1357  * handle it through ethtool.
1358  */
1359  for (i = 1; i <= 31; i++) {
1360  move_int_phy(dev, i);
1361  tmp = miiport_read(dev, i, MII_BMSR);
1362  if (tmp != 0xffff && tmp != 0x0000) {
1363  /* found something! */
1364  np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1365  + mdio_read(dev, MII_PHYSID2);
1366  if (netif_msg_probe(np)) {
1367  printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1368  pci_name(np->pci_dev), np->mii, i);
1369  }
1370  break;
1371  }
1372  }
1373  /* And switch back to internal phy: */
1374  if (did_switch)
1375  switch_port_internal(dev);
1376  return i;
1377 }
1378 
1379 /* CFG bits [13:16] [18:23] */
1380 #define CFG_RESET_SAVE 0xfde000
1381 /* WCSR bits [0:4] [9:10] */
1382 #define WCSR_RESET_SAVE 0x61f
1383 /* RFCR bits [20] [22] [27:31] */
1384 #define RFCR_RESET_SAVE 0xf8500000
1385 
1386 static void natsemi_reset(struct net_device *dev)
1387 {
1388  int i;
1389  u32 cfg;
1390  u32 wcsr;
1391  u32 rfcr;
1392  u16 pmatch[3];
1393  u16 sopass[3];
1394  struct netdev_private *np = netdev_priv(dev);
1395  void __iomem *ioaddr = ns_ioaddr(dev);
1396 
1397  /*
1398  * Resetting the chip causes some registers to be lost.
1399  * Natsemi suggests NOT reloading the EEPROM while live, so instead
1400  * we save the state that would have been loaded from EEPROM
1401  * on a normal power-up (see the spec EEPROM map). This assumes
1402  * whoever calls this will follow up with init_registers() eventually.
1403  */
1404 
1405  /* CFG */
1406  cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1407  /* WCSR */
1408  wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1409  /* RFCR */
1410  rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1411  /* PMATCH */
1412  for (i = 0; i < 3; i++) {
1413  writel(i*2, ioaddr + RxFilterAddr);
1414  pmatch[i] = readw(ioaddr + RxFilterData);
1415  }
1416  /* SOPAS */
1417  for (i = 0; i < 3; i++) {
1418  writel(0xa+(i*2), ioaddr + RxFilterAddr);
1419  sopass[i] = readw(ioaddr + RxFilterData);
1420  }
1421 
1422  /* now whack the chip */
1423  writel(ChipReset, ioaddr + ChipCmd);
1424  for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1425  if (!(readl(ioaddr + ChipCmd) & ChipReset))
1426  break;
1427  udelay(5);
1428  }
1429  if (i==NATSEMI_HW_TIMEOUT) {
1430  printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1431  dev->name, i*5);
1432  } else if (netif_msg_hw(np)) {
1433  printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1434  dev->name, i*5);
1435  }
1436 
1437  /* restore CFG */
1438  cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1439  /* turn on external phy if it was selected */
1440  if (dev->if_port == PORT_TP)
1441  cfg &= ~(CfgExtPhy | CfgPhyDis);
1442  else
1443  cfg |= (CfgExtPhy | CfgPhyDis);
1444  writel(cfg, ioaddr + ChipConfig);
1445  /* restore WCSR */
1446  wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1447  writel(wcsr, ioaddr + WOLCmd);
1448  /* read RFCR */
1449  rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1450  /* restore PMATCH */
1451  for (i = 0; i < 3; i++) {
1452  writel(i*2, ioaddr + RxFilterAddr);
1453  writew(pmatch[i], ioaddr + RxFilterData);
1454  }
1455  for (i = 0; i < 3; i++) {
1456  writel(0xa+(i*2), ioaddr + RxFilterAddr);
1457  writew(sopass[i], ioaddr + RxFilterData);
1458  }
1459  /* restore RFCR */
1460  writel(rfcr, ioaddr + RxFilterAddr);
1461 }
1462 
1463 static void reset_rx(struct net_device *dev)
1464 {
1465  int i;
1466  struct netdev_private *np = netdev_priv(dev);
1467  void __iomem *ioaddr = ns_ioaddr(dev);
1468 
1469  np->intr_status &= ~RxResetDone;
1470 
1471  writel(RxReset, ioaddr + ChipCmd);
1472 
1473  for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1474  np->intr_status |= readl(ioaddr + IntrStatus);
1475  if (np->intr_status & RxResetDone)
1476  break;
1477  udelay(15);
1478  }
1479  if (i==NATSEMI_HW_TIMEOUT) {
1480  printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1481  dev->name, i*15);
1482  } else if (netif_msg_hw(np)) {
1483  printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1484  dev->name, i*15);
1485  }
1486 }
1487 
1488 static void natsemi_reload_eeprom(struct net_device *dev)
1489 {
1490  struct netdev_private *np = netdev_priv(dev);
1491  void __iomem *ioaddr = ns_ioaddr(dev);
1492  int i;
1493 
1494  writel(EepromReload, ioaddr + PCIBusCfg);
1495  for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1496  udelay(50);
1497  if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1498  break;
1499  }
1500  if (i==NATSEMI_HW_TIMEOUT) {
1501  printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1502  pci_name(np->pci_dev), i*50);
1503  } else if (netif_msg_hw(np)) {
1504  printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1505  pci_name(np->pci_dev), i*50);
1506  }
1507 }
1508 
1509 static void natsemi_stop_rxtx(struct net_device *dev)
1510 {
1511  void __iomem * ioaddr = ns_ioaddr(dev);
1512  struct netdev_private *np = netdev_priv(dev);
1513  int i;
1514 
1515  writel(RxOff | TxOff, ioaddr + ChipCmd);
1516  for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1517  if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1518  break;
1519  udelay(5);
1520  }
1521  if (i==NATSEMI_HW_TIMEOUT) {
1522  printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1523  dev->name, i*5);
1524  } else if (netif_msg_hw(np)) {
1525  printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1526  dev->name, i*5);
1527  }
1528 }
1529 
1530 static int netdev_open(struct net_device *dev)
1531 {
1532  struct netdev_private *np = netdev_priv(dev);
1533  void __iomem * ioaddr = ns_ioaddr(dev);
1534  const int irq = np->pci_dev->irq;
1535  int i;
1536 
1537  /* Reset the chip, just in case. */
1538  natsemi_reset(dev);
1539 
1540  i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1541  if (i) return i;
1542 
1543  if (netif_msg_ifup(np))
1544  printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1545  dev->name, irq);
1546  i = alloc_ring(dev);
1547  if (i < 0) {
1548  free_irq(irq, dev);
1549  return i;
1550  }
1551  napi_enable(&np->napi);
1552 
1553  init_ring(dev);
1554  spin_lock_irq(&np->lock);
1555  init_registers(dev);
1556  /* now set the MAC address according to dev->dev_addr */
1557  for (i = 0; i < 3; i++) {
1558  u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1559 
1560  writel(i*2, ioaddr + RxFilterAddr);
1561  writew(mac, ioaddr + RxFilterData);
1562  }
1563  writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1564  spin_unlock_irq(&np->lock);
1565 
1566  netif_start_queue(dev);
1567 
1568  if (netif_msg_ifup(np))
1569  printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1570  dev->name, (int)readl(ioaddr + ChipCmd));
1571 
1572  /* Set the timer to check for link beat. */
1573  init_timer(&np->timer);
1574  np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1575  np->timer.data = (unsigned long)dev;
1576  np->timer.function = netdev_timer; /* timer handler */
1577  add_timer(&np->timer);
1578 
1579  return 0;
1580 }
1581 
1582 static void do_cable_magic(struct net_device *dev)
1583 {
1584  struct netdev_private *np = netdev_priv(dev);
1585  void __iomem *ioaddr = ns_ioaddr(dev);
1586 
1587  if (dev->if_port != PORT_TP)
1588  return;
1589 
1590  if (np->srr >= SRR_DP83816_A5)
1591  return;
1592 
1593  /*
1594  * 100 MBit links with short cables can trip an issue with the chip.
1595  * The problem manifests as lots of CRC errors and/or flickering
1596  * activity LED while idle. This process is based on instructions
1597  * from engineers at National.
1598  */
1599  if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1600  u16 data;
1601 
1602  writew(1, ioaddr + PGSEL);
1603  /*
1604  * coefficient visibility should already be enabled via
1605  * DSPCFG | 0x1000
1606  */
1607  data = readw(ioaddr + TSTDAT) & 0xff;
1608  /*
1609  * the value must be negative, and within certain values
1610  * (these values all come from National)
1611  */
1612  if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1613  np = netdev_priv(dev);
1614 
1615  /* the bug has been triggered - fix the coefficient */
1616  writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1617  /* lock the value */
1618  data = readw(ioaddr + DSPCFG);
1619  np->dspcfg = data | DSPCFG_LOCK;
1620  writew(np->dspcfg, ioaddr + DSPCFG);
1621  }
1622  writew(0, ioaddr + PGSEL);
1623  }
1624 }
1625 
1626 static void undo_cable_magic(struct net_device *dev)
1627 {
1628  u16 data;
1629  struct netdev_private *np = netdev_priv(dev);
1630  void __iomem * ioaddr = ns_ioaddr(dev);
1631 
1632  if (dev->if_port != PORT_TP)
1633  return;
1634 
1635  if (np->srr >= SRR_DP83816_A5)
1636  return;
1637 
1638  writew(1, ioaddr + PGSEL);
1639  /* make sure the lock bit is clear */
1640  data = readw(ioaddr + DSPCFG);
1641  np->dspcfg = data & ~DSPCFG_LOCK;
1642  writew(np->dspcfg, ioaddr + DSPCFG);
1643  writew(0, ioaddr + PGSEL);
1644 }
1645 
1646 static void check_link(struct net_device *dev)
1647 {
1648  struct netdev_private *np = netdev_priv(dev);
1649  void __iomem * ioaddr = ns_ioaddr(dev);
1650  int duplex = np->duplex;
1651  u16 bmsr;
1652 
1653  /* If we are ignoring the PHY then don't try reading it. */
1654  if (np->ignore_phy)
1655  goto propagate_state;
1656 
1657  /* The link status field is latched: it remains low after a temporary
1658  * link failure until it's read. We need the current link status,
1659  * thus read twice.
1660  */
1661  mdio_read(dev, MII_BMSR);
1662  bmsr = mdio_read(dev, MII_BMSR);
1663 
1664  if (!(bmsr & BMSR_LSTATUS)) {
1665  if (netif_carrier_ok(dev)) {
1666  if (netif_msg_link(np))
1667  printk(KERN_NOTICE "%s: link down.\n",
1668  dev->name);
1669  netif_carrier_off(dev);
1670  undo_cable_magic(dev);
1671  }
1672  return;
1673  }
1674  if (!netif_carrier_ok(dev)) {
1675  if (netif_msg_link(np))
1676  printk(KERN_NOTICE "%s: link up.\n", dev->name);
1677  netif_carrier_on(dev);
1678  do_cable_magic(dev);
1679  }
1680 
1681  duplex = np->full_duplex;
1682  if (!duplex) {
1683  if (bmsr & BMSR_ANEGCOMPLETE) {
1684  int tmp = mii_nway_result(
1685  np->advertising & mdio_read(dev, MII_LPA));
1686  if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1687  duplex = 1;
1688  } else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1689  duplex = 1;
1690  }
1691 
1692 propagate_state:
1693  /* if duplex is set then bit 28 must be set, too */
1694  if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1695  if (netif_msg_link(np))
1697  "%s: Setting %s-duplex based on negotiated "
1698  "link capability.\n", dev->name,
1699  duplex ? "full" : "half");
1700  if (duplex) {
1701  np->rx_config |= RxAcceptTx;
1703  } else {
1704  np->rx_config &= ~RxAcceptTx;
1705  np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1706  }
1707  writel(np->tx_config, ioaddr + TxConfig);
1708  writel(np->rx_config, ioaddr + RxConfig);
1709  }
1710 }
1711 
1712 static void init_registers(struct net_device *dev)
1713 {
1714  struct netdev_private *np = netdev_priv(dev);
1715  void __iomem * ioaddr = ns_ioaddr(dev);
1716 
1717  init_phy_fixup(dev);
1718 
1719  /* clear any interrupts that are pending, such as wake events */
1720  readl(ioaddr + IntrStatus);
1721 
1722  writel(np->ring_dma, ioaddr + RxRingPtr);
1723  writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1724  ioaddr + TxRingPtr);
1725 
1726  /* Initialize other registers.
1727  * Configure the PCI bus bursts and FIFO thresholds.
1728  * Configure for standard, in-spec Ethernet.
1729  * Start with half-duplex. check_link will update
1730  * to the correct settings.
1731  */
1732 
1733  /* DRTH: 2: start tx if 64 bytes are in the fifo
1734  * FLTH: 0x10: refill with next packet if 512 bytes are free
1735  * MXDMA: 0: up to 256 byte bursts.
1736  * MXDMA must be <= FLTH
1737  * ECRETRY=1
1738  * ATP=1
1739  */
1742  writel(np->tx_config, ioaddr + TxConfig);
1743 
1744  /* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
1745  * MXDMA 0: up to 256 byte bursts
1746  */
1748  /* if receive ring now has bigger buffers than normal, enable jumbo */
1749  if (np->rx_buf_sz > NATSEMI_LONGPKT)
1750  np->rx_config |= RxAcceptLong;
1751 
1752  writel(np->rx_config, ioaddr + RxConfig);
1753 
1754  /* Disable PME:
1755  * The PME bit is initialized from the EEPROM contents.
1756  * PCI cards probably have PME disabled, but motherboard
1757  * implementations may have PME set to enable WakeOnLan.
1758  * With PME set the chip will scan incoming packets but
1759  * nothing will be written to memory. */
1760  np->SavedClkRun = readl(ioaddr + ClkRun);
1761  writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1762  if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1763  printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1764  dev->name, readl(ioaddr + WOLCmd));
1765  }
1766 
1767  check_link(dev);
1768  __set_rx_mode(dev);
1769 
1770  /* Enable interrupts by setting the interrupt mask. */
1771  writel(DEFAULT_INTR, ioaddr + IntrMask);
1772  natsemi_irq_enable(dev);
1773 
1774  writel(RxOn | TxOn, ioaddr + ChipCmd);
1775  writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
1776 }
1777 
1778 /*
1779  * netdev_timer:
1780  * Purpose:
1781  * 1) check for link changes. Usually they are handled by the MII interrupt
1782  * but it doesn't hurt to check twice.
1783  * 2) check for sudden death of the NIC:
1784  * It seems that a reference set for this chip went out with incorrect info,
1785  * and there exist boards that aren't quite right. An unexpected voltage
1786  * drop can cause the PHY to get itself in a weird state (basically reset).
1787  * NOTE: this only seems to affect revC chips. The user can disable
1788  * this check via dspcfg_workaround sysfs option.
1789  * 3) check of death of the RX path due to OOM
1790  */
1791 static void netdev_timer(unsigned long data)
1792 {
1793  struct net_device *dev = (struct net_device *)data;
1794  struct netdev_private *np = netdev_priv(dev);
1795  void __iomem * ioaddr = ns_ioaddr(dev);
1796  int next_tick = NATSEMI_TIMER_FREQ;
1797  const int irq = np->pci_dev->irq;
1798 
1799  if (netif_msg_timer(np)) {
1800  /* DO NOT read the IntrStatus register,
1801  * a read clears any pending interrupts.
1802  */
1803  printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1804  dev->name);
1805  }
1806 
1807  if (dev->if_port == PORT_TP) {
1808  u16 dspcfg;
1809 
1810  spin_lock_irq(&np->lock);
1811  /* check for a nasty random phy-reset - use dspcfg as a flag */
1812  writew(1, ioaddr+PGSEL);
1813  dspcfg = readw(ioaddr+DSPCFG);
1814  writew(0, ioaddr+PGSEL);
1815  if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
1816  if (!netif_queue_stopped(dev)) {
1817  spin_unlock_irq(&np->lock);
1818  if (netif_msg_drv(np))
1819  printk(KERN_NOTICE "%s: possible phy reset: "
1820  "re-initializing\n", dev->name);
1821  disable_irq(irq);
1822  spin_lock_irq(&np->lock);
1823  natsemi_stop_rxtx(dev);
1824  dump_ring(dev);
1825  reinit_ring(dev);
1826  init_registers(dev);
1827  spin_unlock_irq(&np->lock);
1828  enable_irq(irq);
1829  } else {
1830  /* hurry back */
1831  next_tick = HZ;
1832  spin_unlock_irq(&np->lock);
1833  }
1834  } else {
1835  /* init_registers() calls check_link() for the above case */
1836  check_link(dev);
1837  spin_unlock_irq(&np->lock);
1838  }
1839  } else {
1840  spin_lock_irq(&np->lock);
1841  check_link(dev);
1842  spin_unlock_irq(&np->lock);
1843  }
1844  if (np->oom) {
1845  disable_irq(irq);
1846  np->oom = 0;
1847  refill_rx(dev);
1848  enable_irq(irq);
1849  if (!np->oom) {
1850  writel(RxOn, ioaddr + ChipCmd);
1851  } else {
1852  next_tick = 1;
1853  }
1854  }
1855 
1856  if (next_tick > 1)
1857  mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1858  else
1859  mod_timer(&np->timer, jiffies + next_tick);
1860 }
1861 
1862 static void dump_ring(struct net_device *dev)
1863 {
1864  struct netdev_private *np = netdev_priv(dev);
1865 
1866  if (netif_msg_pktdata(np)) {
1867  int i;
1868  printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
1869  for (i = 0; i < TX_RING_SIZE; i++) {
1870  printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1871  i, np->tx_ring[i].next_desc,
1872  np->tx_ring[i].cmd_status,
1873  np->tx_ring[i].addr);
1874  }
1875  printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1876  for (i = 0; i < RX_RING_SIZE; i++) {
1877  printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1878  i, np->rx_ring[i].next_desc,
1879  np->rx_ring[i].cmd_status,
1880  np->rx_ring[i].addr);
1881  }
1882  }
1883 }
1884 
1885 static void ns_tx_timeout(struct net_device *dev)
1886 {
1887  struct netdev_private *np = netdev_priv(dev);
1888  void __iomem * ioaddr = ns_ioaddr(dev);
1889  const int irq = np->pci_dev->irq;
1890 
1891  disable_irq(irq);
1892  spin_lock_irq(&np->lock);
1893  if (!np->hands_off) {
1894  if (netif_msg_tx_err(np))
1896  "%s: Transmit timed out, status %#08x,"
1897  " resetting...\n",
1898  dev->name, readl(ioaddr + IntrStatus));
1899  dump_ring(dev);
1900 
1901  natsemi_reset(dev);
1902  reinit_ring(dev);
1903  init_registers(dev);
1904  } else {
1906  "%s: tx_timeout while in hands_off state?\n",
1907  dev->name);
1908  }
1909  spin_unlock_irq(&np->lock);
1910  enable_irq(irq);
1911 
1912  dev->trans_start = jiffies; /* prevent tx timeout */
1913  dev->stats.tx_errors++;
1914  netif_wake_queue(dev);
1915 }
1916 
1917 static int alloc_ring(struct net_device *dev)
1918 {
1919  struct netdev_private *np = netdev_priv(dev);
1921  sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
1922  &np->ring_dma);
1923  if (!np->rx_ring)
1924  return -ENOMEM;
1925  np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1926  return 0;
1927 }
1928 
1929 static void refill_rx(struct net_device *dev)
1930 {
1931  struct netdev_private *np = netdev_priv(dev);
1932 
1933  /* Refill the Rx ring buffers. */
1934  for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1935  struct sk_buff *skb;
1936  int entry = np->dirty_rx % RX_RING_SIZE;
1937  if (np->rx_skbuff[entry] == NULL) {
1938  unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1939  skb = netdev_alloc_skb(dev, buflen);
1940  np->rx_skbuff[entry] = skb;
1941  if (skb == NULL)
1942  break; /* Better luck next round. */
1943  np->rx_dma[entry] = pci_map_single(np->pci_dev,
1944  skb->data, buflen, PCI_DMA_FROMDEVICE);
1945  np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1946  }
1947  np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1948  }
1949  if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1950  if (netif_msg_rx_err(np))
1951  printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1952  np->oom = 1;
1953  }
1954 }
1955 
1956 static void set_bufsize(struct net_device *dev)
1957 {
1958  struct netdev_private *np = netdev_priv(dev);
1959  if (dev->mtu <= ETH_DATA_LEN)
1961  else
1962  np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1963 }
1964 
1965 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1966 static void init_ring(struct net_device *dev)
1967 {
1968  struct netdev_private *np = netdev_priv(dev);
1969  int i;
1970 
1971  /* 1) TX ring */
1972  np->dirty_tx = np->cur_tx = 0;
1973  for (i = 0; i < TX_RING_SIZE; i++) {
1974  np->tx_skbuff[i] = NULL;
1975  np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1976  +sizeof(struct netdev_desc)
1977  *((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1978  np->tx_ring[i].cmd_status = 0;
1979  }
1980 
1981  /* 2) RX ring */
1982  np->dirty_rx = 0;
1983  np->cur_rx = RX_RING_SIZE;
1984  np->oom = 0;
1985  set_bufsize(dev);
1986 
1987  np->rx_head_desc = &np->rx_ring[0];
1988 
1989  /* Please be careful before changing this loop - at least gcc-2.95.1
1990  * miscompiles it otherwise.
1991  */
1992  /* Initialize all Rx descriptors. */
1993  for (i = 0; i < RX_RING_SIZE; i++) {
1994  np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1995  +sizeof(struct netdev_desc)
1996  *((i+1)%RX_RING_SIZE));
1997  np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
1998  np->rx_skbuff[i] = NULL;
1999  }
2000  refill_rx(dev);
2001  dump_ring(dev);
2002 }
2003 
2004 static void drain_tx(struct net_device *dev)
2005 {
2006  struct netdev_private *np = netdev_priv(dev);
2007  int i;
2008 
2009  for (i = 0; i < TX_RING_SIZE; i++) {
2010  if (np->tx_skbuff[i]) {
2011  pci_unmap_single(np->pci_dev,
2012  np->tx_dma[i], np->tx_skbuff[i]->len,
2014  dev_kfree_skb(np->tx_skbuff[i]);
2015  dev->stats.tx_dropped++;
2016  }
2017  np->tx_skbuff[i] = NULL;
2018  }
2019 }
2020 
2021 static void drain_rx(struct net_device *dev)
2022 {
2023  struct netdev_private *np = netdev_priv(dev);
2024  unsigned int buflen = np->rx_buf_sz;
2025  int i;
2026 
2027  /* Free all the skbuffs in the Rx queue. */
2028  for (i = 0; i < RX_RING_SIZE; i++) {
2029  np->rx_ring[i].cmd_status = 0;
2030  np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
2031  if (np->rx_skbuff[i]) {
2032  pci_unmap_single(np->pci_dev, np->rx_dma[i],
2033  buflen + NATSEMI_PADDING,
2035  dev_kfree_skb(np->rx_skbuff[i]);
2036  }
2037  np->rx_skbuff[i] = NULL;
2038  }
2039 }
2040 
2041 static void drain_ring(struct net_device *dev)
2042 {
2043  drain_rx(dev);
2044  drain_tx(dev);
2045 }
2046 
2047 static void free_ring(struct net_device *dev)
2048 {
2049  struct netdev_private *np = netdev_priv(dev);
2051  sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
2052  np->rx_ring, np->ring_dma);
2053 }
2054 
2055 static void reinit_rx(struct net_device *dev)
2056 {
2057  struct netdev_private *np = netdev_priv(dev);
2058  int i;
2059 
2060  /* RX Ring */
2061  np->dirty_rx = 0;
2062  np->cur_rx = RX_RING_SIZE;
2063  np->rx_head_desc = &np->rx_ring[0];
2064  /* Initialize all Rx descriptors. */
2065  for (i = 0; i < RX_RING_SIZE; i++)
2066  np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2067 
2068  refill_rx(dev);
2069 }
2070 
2071 static void reinit_ring(struct net_device *dev)
2072 {
2073  struct netdev_private *np = netdev_priv(dev);
2074  int i;
2075 
2076  /* drain TX ring */
2077  drain_tx(dev);
2078  np->dirty_tx = np->cur_tx = 0;
2079  for (i=0;i<TX_RING_SIZE;i++)
2080  np->tx_ring[i].cmd_status = 0;
2081 
2082  reinit_rx(dev);
2083 }
2084 
2085 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2086 {
2087  struct netdev_private *np = netdev_priv(dev);
2088  void __iomem * ioaddr = ns_ioaddr(dev);
2089  unsigned entry;
2090  unsigned long flags;
2091 
2092  /* Note: Ordering is important here, set the field with the
2093  "ownership" bit last, and only then increment cur_tx. */
2094 
2095  /* Calculate the next Tx descriptor entry. */
2096  entry = np->cur_tx % TX_RING_SIZE;
2097 
2098  np->tx_skbuff[entry] = skb;
2099  np->tx_dma[entry] = pci_map_single(np->pci_dev,
2100  skb->data,skb->len, PCI_DMA_TODEVICE);
2101 
2102  np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2103 
2104  spin_lock_irqsave(&np->lock, flags);
2105 
2106  if (!np->hands_off) {
2107  np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2108  /* StrongARM: Explicitly cache flush np->tx_ring and
2109  * skb->data,skb->len. */
2110  wmb();
2111  np->cur_tx++;
2112  if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2113  netdev_tx_done(dev);
2114  if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2115  netif_stop_queue(dev);
2116  }
2117  /* Wake the potentially-idle transmit channel. */
2118  writel(TxOn, ioaddr + ChipCmd);
2119  } else {
2120  dev_kfree_skb_irq(skb);
2121  dev->stats.tx_dropped++;
2122  }
2123  spin_unlock_irqrestore(&np->lock, flags);
2124 
2125  if (netif_msg_tx_queued(np)) {
2126  printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2127  dev->name, np->cur_tx, entry);
2128  }
2129  return NETDEV_TX_OK;
2130 }
2131 
2132 static void netdev_tx_done(struct net_device *dev)
2133 {
2134  struct netdev_private *np = netdev_priv(dev);
2135 
2136  for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2137  int entry = np->dirty_tx % TX_RING_SIZE;
2138  if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2139  break;
2140  if (netif_msg_tx_done(np))
2142  "%s: tx frame #%d finished, status %#08x.\n",
2143  dev->name, np->dirty_tx,
2144  le32_to_cpu(np->tx_ring[entry].cmd_status));
2145  if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2146  dev->stats.tx_packets++;
2147  dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2148  } else { /* Various Tx errors */
2149  int tx_status =
2150  le32_to_cpu(np->tx_ring[entry].cmd_status);
2151  if (tx_status & (DescTxAbort|DescTxExcColl))
2152  dev->stats.tx_aborted_errors++;
2153  if (tx_status & DescTxFIFO)
2154  dev->stats.tx_fifo_errors++;
2155  if (tx_status & DescTxCarrier)
2156  dev->stats.tx_carrier_errors++;
2157  if (tx_status & DescTxOOWCol)
2158  dev->stats.tx_window_errors++;
2159  dev->stats.tx_errors++;
2160  }
2161  pci_unmap_single(np->pci_dev,np->tx_dma[entry],
2162  np->tx_skbuff[entry]->len,
2164  /* Free the original skb. */
2165  dev_kfree_skb_irq(np->tx_skbuff[entry]);
2166  np->tx_skbuff[entry] = NULL;
2167  }
2168  if (netif_queue_stopped(dev) &&
2169  np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2170  /* The ring is no longer full, wake queue. */
2171  netif_wake_queue(dev);
2172  }
2173 }
2174 
2175 /* The interrupt handler doesn't actually handle interrupts itself, it
2176  * schedules a NAPI poll if there is anything to do. */
2177 static irqreturn_t intr_handler(int irq, void *dev_instance)
2178 {
2179  struct net_device *dev = dev_instance;
2180  struct netdev_private *np = netdev_priv(dev);
2181  void __iomem * ioaddr = ns_ioaddr(dev);
2182 
2183  /* Reading IntrStatus automatically acknowledges so don't do
2184  * that while interrupts are disabled, (for example, while a
2185  * poll is scheduled). */
2186  if (np->hands_off || !readl(ioaddr + IntrEnable))
2187  return IRQ_NONE;
2188 
2189  np->intr_status = readl(ioaddr + IntrStatus);
2190 
2191  if (!np->intr_status)
2192  return IRQ_NONE;
2193 
2194  if (netif_msg_intr(np))
2196  "%s: Interrupt, status %#08x, mask %#08x.\n",
2197  dev->name, np->intr_status,
2198  readl(ioaddr + IntrMask));
2199 
2200  prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2201 
2202  if (napi_schedule_prep(&np->napi)) {
2203  /* Disable interrupts and register for poll */
2204  natsemi_irq_disable(dev);
2205  __napi_schedule(&np->napi);
2206  } else
2208  "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2209  dev->name, np->intr_status,
2210  readl(ioaddr + IntrMask));
2211 
2212  return IRQ_HANDLED;
2213 }
2214 
2215 /* This is the NAPI poll routine. As well as the standard RX handling
2216  * it also handles all other interrupts that the chip might raise.
2217  */
2218 static int natsemi_poll(struct napi_struct *napi, int budget)
2219 {
2220  struct netdev_private *np = container_of(napi, struct netdev_private, napi);
2221  struct net_device *dev = np->dev;
2222  void __iomem * ioaddr = ns_ioaddr(dev);
2223  int work_done = 0;
2224 
2225  do {
2226  if (netif_msg_intr(np))
2228  "%s: Poll, status %#08x, mask %#08x.\n",
2229  dev->name, np->intr_status,
2230  readl(ioaddr + IntrMask));
2231 
2232  /* netdev_rx() may read IntrStatus again if the RX state
2233  * machine falls over so do it first. */
2234  if (np->intr_status &
2236  IntrRxErr | IntrRxOverrun)) {
2237  netdev_rx(dev, &work_done, budget);
2238  }
2239 
2240  if (np->intr_status &
2242  spin_lock(&np->lock);
2243  netdev_tx_done(dev);
2244  spin_unlock(&np->lock);
2245  }
2246 
2247  /* Abnormal error summary/uncommon events handlers. */
2248  if (np->intr_status & IntrAbnormalSummary)
2249  netdev_error(dev, np->intr_status);
2250 
2251  if (work_done >= budget)
2252  return work_done;
2253 
2254  np->intr_status = readl(ioaddr + IntrStatus);
2255  } while (np->intr_status);
2256 
2257  napi_complete(napi);
2258 
2259  /* Reenable interrupts providing nothing is trying to shut
2260  * the chip down. */
2261  spin_lock(&np->lock);
2262  if (!np->hands_off)
2263  natsemi_irq_enable(dev);
2264  spin_unlock(&np->lock);
2265 
2266  return work_done;
2267 }
2268 
2269 /* This routine is logically part of the interrupt handler, but separated
2270  for clarity and better register allocation. */
2271 static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2272 {
2273  struct netdev_private *np = netdev_priv(dev);
2274  int entry = np->cur_rx % RX_RING_SIZE;
2275  int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2276  s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2277  unsigned int buflen = np->rx_buf_sz;
2278  void __iomem * ioaddr = ns_ioaddr(dev);
2279 
2280  /* If the driver owns the next entry it's a new packet. Send it up. */
2281  while (desc_status < 0) { /* e.g. & DescOwn */
2282  int pkt_len;
2283  if (netif_msg_rx_status(np))
2285  " netdev_rx() entry %d status was %#08x.\n",
2286  entry, desc_status);
2287  if (--boguscnt < 0)
2288  break;
2289 
2290  if (*work_done >= work_to_do)
2291  break;
2292 
2293  (*work_done)++;
2294 
2295  pkt_len = (desc_status & DescSizeMask) - 4;
2296  if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2297  if (desc_status & DescMore) {
2298  unsigned long flags;
2299 
2300  if (netif_msg_rx_err(np))
2302  "%s: Oversized(?) Ethernet "
2303  "frame spanned multiple "
2304  "buffers, entry %#08x "
2305  "status %#08x.\n", dev->name,
2306  np->cur_rx, desc_status);
2307  dev->stats.rx_length_errors++;
2308 
2309  /* The RX state machine has probably
2310  * locked up beneath us. Follow the
2311  * reset procedure documented in
2312  * AN-1287. */
2313 
2314  spin_lock_irqsave(&np->lock, flags);
2315  reset_rx(dev);
2316  reinit_rx(dev);
2317  writel(np->ring_dma, ioaddr + RxRingPtr);
2318  check_link(dev);
2319  spin_unlock_irqrestore(&np->lock, flags);
2320 
2321  /* We'll enable RX on exit from this
2322  * function. */
2323  break;
2324 
2325  } else {
2326  /* There was an error. */
2327  dev->stats.rx_errors++;
2328  if (desc_status & (DescRxAbort|DescRxOver))
2329  dev->stats.rx_over_errors++;
2330  if (desc_status & (DescRxLong|DescRxRunt))
2331  dev->stats.rx_length_errors++;
2332  if (desc_status & (DescRxInvalid|DescRxAlign))
2333  dev->stats.rx_frame_errors++;
2334  if (desc_status & DescRxCRC)
2335  dev->stats.rx_crc_errors++;
2336  }
2337  } else if (pkt_len > np->rx_buf_sz) {
2338  /* if this is the tail of a double buffer
2339  * packet, we've already counted the error
2340  * on the first part. Ignore the second half.
2341  */
2342  } else {
2343  struct sk_buff *skb;
2344  /* Omit CRC size. */
2345  /* Check if the packet is long enough to accept
2346  * without copying to a minimally-sized skbuff. */
2347  if (pkt_len < rx_copybreak &&
2348  (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2349  /* 16 byte align the IP header */
2350  skb_reserve(skb, RX_OFFSET);
2351  pci_dma_sync_single_for_cpu(np->pci_dev,
2352  np->rx_dma[entry],
2353  buflen,
2355  skb_copy_to_linear_data(skb,
2356  np->rx_skbuff[entry]->data, pkt_len);
2357  skb_put(skb, pkt_len);
2358  pci_dma_sync_single_for_device(np->pci_dev,
2359  np->rx_dma[entry],
2360  buflen,
2362  } else {
2363  pci_unmap_single(np->pci_dev, np->rx_dma[entry],
2364  buflen + NATSEMI_PADDING,
2366  skb_put(skb = np->rx_skbuff[entry], pkt_len);
2367  np->rx_skbuff[entry] = NULL;
2368  }
2369  skb->protocol = eth_type_trans(skb, dev);
2370  netif_receive_skb(skb);
2371  dev->stats.rx_packets++;
2372  dev->stats.rx_bytes += pkt_len;
2373  }
2374  entry = (++np->cur_rx) % RX_RING_SIZE;
2375  np->rx_head_desc = &np->rx_ring[entry];
2376  desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2377  }
2378  refill_rx(dev);
2379 
2380  /* Restart Rx engine if stopped. */
2381  if (np->oom)
2382  mod_timer(&np->timer, jiffies + 1);
2383  else
2384  writel(RxOn, ioaddr + ChipCmd);
2385 }
2386 
2387 static void netdev_error(struct net_device *dev, int intr_status)
2388 {
2389  struct netdev_private *np = netdev_priv(dev);
2390  void __iomem * ioaddr = ns_ioaddr(dev);
2391 
2392  spin_lock(&np->lock);
2393  if (intr_status & LinkChange) {
2394  u16 lpa = mdio_read(dev, MII_LPA);
2395  if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2396  netif_msg_link(np)) {
2398  "%s: Autonegotiation advertising"
2399  " %#04x partner %#04x.\n", dev->name,
2400  np->advertising, lpa);
2401  }
2402 
2403  /* read MII int status to clear the flag */
2404  readw(ioaddr + MIntrStatus);
2405  check_link(dev);
2406  }
2407  if (intr_status & StatsMax) {
2408  __get_stats(dev);
2409  }
2410  if (intr_status & IntrTxUnderrun) {
2411  if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2412  np->tx_config += TX_DRTH_VAL_INC;
2413  if (netif_msg_tx_err(np))
2415  "%s: increased tx threshold, txcfg %#08x.\n",
2416  dev->name, np->tx_config);
2417  } else {
2418  if (netif_msg_tx_err(np))
2420  "%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2421  dev->name, np->tx_config);
2422  }
2423  writel(np->tx_config, ioaddr + TxConfig);
2424  }
2425  if (intr_status & WOLPkt && netif_msg_wol(np)) {
2426  int wol_status = readl(ioaddr + WOLCmd);
2427  printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2428  dev->name, wol_status);
2429  }
2430  if (intr_status & RxStatusFIFOOver) {
2431  if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2432  printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2433  dev->name);
2434  }
2435  dev->stats.rx_fifo_errors++;
2436  dev->stats.rx_errors++;
2437  }
2438  /* Hmmmmm, it's not clear how to recover from PCI faults. */
2439  if (intr_status & IntrPCIErr) {
2440  printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2441  intr_status & IntrPCIErr);
2442  dev->stats.tx_fifo_errors++;
2443  dev->stats.tx_errors++;
2444  dev->stats.rx_fifo_errors++;
2445  dev->stats.rx_errors++;
2446  }
2447  spin_unlock(&np->lock);
2448 }
2449 
2450 static void __get_stats(struct net_device *dev)
2451 {
2452  void __iomem * ioaddr = ns_ioaddr(dev);
2453 
2454  /* The chip only need report frame silently dropped. */
2455  dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2456  dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2457 }
2458 
2459 static struct net_device_stats *get_stats(struct net_device *dev)
2460 {
2461  struct netdev_private *np = netdev_priv(dev);
2462 
2463  /* The chip only need report frame silently dropped. */
2464  spin_lock_irq(&np->lock);
2465  if (netif_running(dev) && !np->hands_off)
2466  __get_stats(dev);
2467  spin_unlock_irq(&np->lock);
2468 
2469  return &dev->stats;
2470 }
2471 
2472 #ifdef CONFIG_NET_POLL_CONTROLLER
2473 static void natsemi_poll_controller(struct net_device *dev)
2474 {
2475  struct netdev_private *np = netdev_priv(dev);
2476  const int irq = np->pci_dev->irq;
2477 
2478  disable_irq(irq);
2479  intr_handler(irq, dev);
2480  enable_irq(irq);
2481 }
2482 #endif
2483 
2484 #define HASH_TABLE 0x200
2485 static void __set_rx_mode(struct net_device *dev)
2486 {
2487  void __iomem * ioaddr = ns_ioaddr(dev);
2488  struct netdev_private *np = netdev_priv(dev);
2489  u8 mc_filter[64]; /* Multicast hash filter */
2490  u32 rx_mode;
2491 
2492  if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2493  rx_mode = RxFilterEnable | AcceptBroadcast
2495  } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2496  (dev->flags & IFF_ALLMULTI)) {
2497  rx_mode = RxFilterEnable | AcceptBroadcast
2499  } else {
2500  struct netdev_hw_addr *ha;
2501  int i;
2502 
2503  memset(mc_filter, 0, sizeof(mc_filter));
2504  netdev_for_each_mc_addr(ha, dev) {
2505  int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2506  mc_filter[b/8] |= (1 << (b & 0x07));
2507  }
2508  rx_mode = RxFilterEnable | AcceptBroadcast
2510  for (i = 0; i < 64; i += 2) {
2511  writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2512  writel((mc_filter[i + 1] << 8) + mc_filter[i],
2513  ioaddr + RxFilterData);
2514  }
2515  }
2516  writel(rx_mode, ioaddr + RxFilterAddr);
2517  np->cur_rx_mode = rx_mode;
2518 }
2519 
2520 static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2521 {
2522  if (new_mtu < 64 || new_mtu > NATSEMI_RX_LIMIT-NATSEMI_HEADERS)
2523  return -EINVAL;
2524 
2525  dev->mtu = new_mtu;
2526 
2527  /* synchronized against open : rtnl_lock() held by caller */
2528  if (netif_running(dev)) {
2529  struct netdev_private *np = netdev_priv(dev);
2530  void __iomem * ioaddr = ns_ioaddr(dev);
2531  const int irq = np->pci_dev->irq;
2532 
2533  disable_irq(irq);
2534  spin_lock(&np->lock);
2535  /* stop engines */
2536  natsemi_stop_rxtx(dev);
2537  /* drain rx queue */
2538  drain_rx(dev);
2539  /* change buffers */
2540  set_bufsize(dev);
2541  reinit_rx(dev);
2542  writel(np->ring_dma, ioaddr + RxRingPtr);
2543  /* restart engines */
2544  writel(RxOn | TxOn, ioaddr + ChipCmd);
2545  spin_unlock(&np->lock);
2546  enable_irq(irq);
2547  }
2548  return 0;
2549 }
2550 
2551 static void set_rx_mode(struct net_device *dev)
2552 {
2553  struct netdev_private *np = netdev_priv(dev);
2554  spin_lock_irq(&np->lock);
2555  if (!np->hands_off)
2556  __set_rx_mode(dev);
2557  spin_unlock_irq(&np->lock);
2558 }
2559 
2560 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2561 {
2562  struct netdev_private *np = netdev_priv(dev);
2563  strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2564  strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2565  strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
2566 }
2567 
2568 static int get_regs_len(struct net_device *dev)
2569 {
2570  return NATSEMI_REGS_SIZE;
2571 }
2572 
2573 static int get_eeprom_len(struct net_device *dev)
2574 {
2575  struct netdev_private *np = netdev_priv(dev);
2576  return np->eeprom_size;
2577 }
2578 
2579 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2580 {
2581  struct netdev_private *np = netdev_priv(dev);
2582  spin_lock_irq(&np->lock);
2583  netdev_get_ecmd(dev, ecmd);
2584  spin_unlock_irq(&np->lock);
2585  return 0;
2586 }
2587 
2588 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2589 {
2590  struct netdev_private *np = netdev_priv(dev);
2591  int res;
2592  spin_lock_irq(&np->lock);
2593  res = netdev_set_ecmd(dev, ecmd);
2594  spin_unlock_irq(&np->lock);
2595  return res;
2596 }
2597 
2598 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2599 {
2600  struct netdev_private *np = netdev_priv(dev);
2601  spin_lock_irq(&np->lock);
2602  netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2603  netdev_get_sopass(dev, wol->sopass);
2604  spin_unlock_irq(&np->lock);
2605 }
2606 
2607 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2608 {
2609  struct netdev_private *np = netdev_priv(dev);
2610  int res;
2611  spin_lock_irq(&np->lock);
2612  netdev_set_wol(dev, wol->wolopts);
2613  res = netdev_set_sopass(dev, wol->sopass);
2614  spin_unlock_irq(&np->lock);
2615  return res;
2616 }
2617 
2618 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2619 {
2620  struct netdev_private *np = netdev_priv(dev);
2621  regs->version = NATSEMI_REGS_VER;
2622  spin_lock_irq(&np->lock);
2623  netdev_get_regs(dev, buf);
2624  spin_unlock_irq(&np->lock);
2625 }
2626 
2627 static u32 get_msglevel(struct net_device *dev)
2628 {
2629  struct netdev_private *np = netdev_priv(dev);
2630  return np->msg_enable;
2631 }
2632 
2633 static void set_msglevel(struct net_device *dev, u32 val)
2634 {
2635  struct netdev_private *np = netdev_priv(dev);
2636  np->msg_enable = val;
2637 }
2638 
2639 static int nway_reset(struct net_device *dev)
2640 {
2641  int tmp;
2642  int r = -EINVAL;
2643  /* if autoneg is off, it's an error */
2644  tmp = mdio_read(dev, MII_BMCR);
2645  if (tmp & BMCR_ANENABLE) {
2646  tmp |= (BMCR_ANRESTART);
2647  mdio_write(dev, MII_BMCR, tmp);
2648  r = 0;
2649  }
2650  return r;
2651 }
2652 
2653 static u32 get_link(struct net_device *dev)
2654 {
2655  /* LSTATUS is latched low until a read - so read twice */
2656  mdio_read(dev, MII_BMSR);
2657  return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2658 }
2659 
2660 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2661 {
2662  struct netdev_private *np = netdev_priv(dev);
2663  u8 *eebuf;
2664  int res;
2665 
2666  eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2667  if (!eebuf)
2668  return -ENOMEM;
2669 
2670  eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2671  spin_lock_irq(&np->lock);
2672  res = netdev_get_eeprom(dev, eebuf);
2673  spin_unlock_irq(&np->lock);
2674  if (!res)
2675  memcpy(data, eebuf+eeprom->offset, eeprom->len);
2676  kfree(eebuf);
2677  return res;
2678 }
2679 
2680 static const struct ethtool_ops ethtool_ops = {
2682  .get_regs_len = get_regs_len,
2683  .get_eeprom_len = get_eeprom_len,
2684  .get_settings = get_settings,
2685  .set_settings = set_settings,
2686  .get_wol = get_wol,
2687  .set_wol = set_wol,
2688  .get_regs = get_regs,
2689  .get_msglevel = get_msglevel,
2690  .set_msglevel = set_msglevel,
2691  .nway_reset = nway_reset,
2692  .get_link = get_link,
2693  .get_eeprom = get_eeprom,
2694 };
2695 
2696 static int netdev_set_wol(struct net_device *dev, u32 newval)
2697 {
2698  struct netdev_private *np = netdev_priv(dev);
2699  void __iomem * ioaddr = ns_ioaddr(dev);
2700  u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2701 
2702  /* translate to bitmasks this chip understands */
2703  if (newval & WAKE_PHY)
2704  data |= WakePhy;
2705  if (newval & WAKE_UCAST)
2706  data |= WakeUnicast;
2707  if (newval & WAKE_MCAST)
2708  data |= WakeMulticast;
2709  if (newval & WAKE_BCAST)
2710  data |= WakeBroadcast;
2711  if (newval & WAKE_ARP)
2712  data |= WakeArp;
2713  if (newval & WAKE_MAGIC)
2714  data |= WakeMagic;
2715  if (np->srr >= SRR_DP83815_D) {
2716  if (newval & WAKE_MAGICSECURE) {
2717  data |= WakeMagicSecure;
2718  }
2719  }
2720 
2721  writel(data, ioaddr + WOLCmd);
2722 
2723  return 0;
2724 }
2725 
2726 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2727 {
2728  struct netdev_private *np = netdev_priv(dev);
2729  void __iomem * ioaddr = ns_ioaddr(dev);
2730  u32 regval = readl(ioaddr + WOLCmd);
2731 
2732  *supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2733  | WAKE_ARP | WAKE_MAGIC);
2734 
2735  if (np->srr >= SRR_DP83815_D) {
2736  /* SOPASS works on revD and higher */
2737  *supported |= WAKE_MAGICSECURE;
2738  }
2739  *cur = 0;
2740 
2741  /* translate from chip bitmasks */
2742  if (regval & WakePhy)
2743  *cur |= WAKE_PHY;
2744  if (regval & WakeUnicast)
2745  *cur |= WAKE_UCAST;
2746  if (regval & WakeMulticast)
2747  *cur |= WAKE_MCAST;
2748  if (regval & WakeBroadcast)
2749  *cur |= WAKE_BCAST;
2750  if (regval & WakeArp)
2751  *cur |= WAKE_ARP;
2752  if (regval & WakeMagic)
2753  *cur |= WAKE_MAGIC;
2754  if (regval & WakeMagicSecure) {
2755  /* this can be on in revC, but it's broken */
2756  *cur |= WAKE_MAGICSECURE;
2757  }
2758 
2759  return 0;
2760 }
2761 
2762 static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2763 {
2764  struct netdev_private *np = netdev_priv(dev);
2765  void __iomem * ioaddr = ns_ioaddr(dev);
2766  u16 *sval = (u16 *)newval;
2767  u32 addr;
2768 
2769  if (np->srr < SRR_DP83815_D) {
2770  return 0;
2771  }
2772 
2773  /* enable writing to these registers by disabling the RX filter */
2774  addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2775  addr &= ~RxFilterEnable;
2776  writel(addr, ioaddr + RxFilterAddr);
2777 
2778  /* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
2779  writel(addr | 0xa, ioaddr + RxFilterAddr);
2780  writew(sval[0], ioaddr + RxFilterData);
2781 
2782  writel(addr | 0xc, ioaddr + RxFilterAddr);
2783  writew(sval[1], ioaddr + RxFilterData);
2784 
2785  writel(addr | 0xe, ioaddr + RxFilterAddr);
2786  writew(sval[2], ioaddr + RxFilterData);
2787 
2788  /* re-enable the RX filter */
2789  writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2790 
2791  return 0;
2792 }
2793 
2794 static int netdev_get_sopass(struct net_device *dev, u8 *data)
2795 {
2796  struct netdev_private *np = netdev_priv(dev);
2797  void __iomem * ioaddr = ns_ioaddr(dev);
2798  u16 *sval = (u16 *)data;
2799  u32 addr;
2800 
2801  if (np->srr < SRR_DP83815_D) {
2802  sval[0] = sval[1] = sval[2] = 0;
2803  return 0;
2804  }
2805 
2806  /* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
2807  addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2808 
2809  writel(addr | 0xa, ioaddr + RxFilterAddr);
2810  sval[0] = readw(ioaddr + RxFilterData);
2811 
2812  writel(addr | 0xc, ioaddr + RxFilterAddr);
2813  sval[1] = readw(ioaddr + RxFilterData);
2814 
2815  writel(addr | 0xe, ioaddr + RxFilterAddr);
2816  sval[2] = readw(ioaddr + RxFilterData);
2817 
2818  writel(addr, ioaddr + RxFilterAddr);
2819 
2820  return 0;
2821 }
2822 
2823 static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2824 {
2825  struct netdev_private *np = netdev_priv(dev);
2826  u32 tmp;
2827 
2828  ecmd->port = dev->if_port;
2829  ethtool_cmd_speed_set(ecmd, np->speed);
2830  ecmd->duplex = np->duplex;
2831  ecmd->autoneg = np->autoneg;
2832  ecmd->advertising = 0;
2833  if (np->advertising & ADVERTISE_10HALF)
2835  if (np->advertising & ADVERTISE_10FULL)
2837  if (np->advertising & ADVERTISE_100HALF)
2839  if (np->advertising & ADVERTISE_100FULL)
2841  ecmd->supported = (SUPPORTED_Autoneg |
2845  ecmd->phy_address = np->phy_addr_external;
2846  /*
2847  * We intentionally report the phy address of the external
2848  * phy, even if the internal phy is used. This is necessary
2849  * to work around a deficiency of the ethtool interface:
2850  * It's only possible to query the settings of the active
2851  * port. Therefore
2852  * # ethtool -s ethX port mii
2853  * actually sends an ioctl to switch to port mii with the
2854  * settings that are used for the current active port.
2855  * If we would report a different phy address in this
2856  * command, then
2857  * # ethtool -s ethX port tp;ethtool -s ethX port mii
2858  * would unintentionally change the phy address.
2859  *
2860  * Fortunately the phy address doesn't matter with the
2861  * internal phy...
2862  */
2863 
2864  /* set information based on active port type */
2865  switch (ecmd->port) {
2866  default:
2867  case PORT_TP:
2868  ecmd->advertising |= ADVERTISED_TP;
2869  ecmd->transceiver = XCVR_INTERNAL;
2870  break;
2871  case PORT_MII:
2872  ecmd->advertising |= ADVERTISED_MII;
2873  ecmd->transceiver = XCVR_EXTERNAL;
2874  break;
2875  case PORT_FIBRE:
2876  ecmd->advertising |= ADVERTISED_FIBRE;
2877  ecmd->transceiver = XCVR_EXTERNAL;
2878  break;
2879  }
2880 
2881  /* if autonegotiation is on, try to return the active speed/duplex */
2882  if (ecmd->autoneg == AUTONEG_ENABLE) {
2884  tmp = mii_nway_result(
2885  np->advertising & mdio_read(dev, MII_LPA));
2886  if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2887  ethtool_cmd_speed_set(ecmd, SPEED_100);
2888  else
2889  ethtool_cmd_speed_set(ecmd, SPEED_10);
2890  if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2891  ecmd->duplex = DUPLEX_FULL;
2892  else
2893  ecmd->duplex = DUPLEX_HALF;
2894  }
2895 
2896  /* ignore maxtxpkt, maxrxpkt for now */
2897 
2898  return 0;
2899 }
2900 
2901 static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
2902 {
2903  struct netdev_private *np = netdev_priv(dev);
2904 
2905  if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
2906  return -EINVAL;
2907  if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
2908  return -EINVAL;
2909  if (ecmd->autoneg == AUTONEG_ENABLE) {
2910  if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
2913  ADVERTISED_100baseT_Full)) == 0) {
2914  return -EINVAL;
2915  }
2916  } else if (ecmd->autoneg == AUTONEG_DISABLE) {
2917  u32 speed = ethtool_cmd_speed(ecmd);
2918  if (speed != SPEED_10 && speed != SPEED_100)
2919  return -EINVAL;
2920  if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
2921  return -EINVAL;
2922  } else {
2923  return -EINVAL;
2924  }
2925 
2926  /*
2927  * If we're ignoring the PHY then autoneg and the internal
2928  * transceiver are really not going to work so don't let the
2929  * user select them.
2930  */
2931  if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE ||
2932  ecmd->port == PORT_TP))
2933  return -EINVAL;
2934 
2935  /*
2936  * maxtxpkt, maxrxpkt: ignored for now.
2937  *
2938  * transceiver:
2939  * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
2940  * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
2941  * selects based on ecmd->port.
2942  *
2943  * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
2944  * phys that are connected to the mii bus. It's used to apply fibre
2945  * specific updates.
2946  */
2947 
2948  /* WHEW! now lets bang some bits */
2949 
2950  /* save the parms */
2951  dev->if_port = ecmd->port;
2952  np->autoneg = ecmd->autoneg;
2954  if (np->autoneg == AUTONEG_ENABLE) {
2955  /* advertise only what has been requested */
2965  } else {
2966  np->speed = ethtool_cmd_speed(ecmd);
2967  np->duplex = ecmd->duplex;
2968  /* user overriding the initial full duplex parm? */
2969  if (np->duplex == DUPLEX_HALF)
2970  np->full_duplex = 0;
2971  }
2972 
2973  /* get the right phy enabled */
2974  if (ecmd->port == PORT_TP)
2975  switch_port_internal(dev);
2976  else
2977  switch_port_external(dev);
2978 
2979  /* set parms and see how this affected our link status */
2980  init_phy_fixup(dev);
2981  check_link(dev);
2982  return 0;
2983 }
2984 
2985 static int netdev_get_regs(struct net_device *dev, u8 *buf)
2986 {
2987  int i;
2988  int j;
2989  u32 rfcr;
2990  u32 *rbuf = (u32 *)buf;
2991  void __iomem * ioaddr = ns_ioaddr(dev);
2992 
2993  /* read non-mii page 0 of registers */
2994  for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
2995  rbuf[i] = readl(ioaddr + i*4);
2996  }
2997 
2998  /* read current mii registers */
2999  for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
3000  rbuf[i] = mdio_read(dev, i & 0x1f);
3001 
3002  /* read only the 'magic' registers from page 1 */
3003  writew(1, ioaddr + PGSEL);
3004  rbuf[i++] = readw(ioaddr + PMDCSR);
3005  rbuf[i++] = readw(ioaddr + TSTDAT);
3006  rbuf[i++] = readw(ioaddr + DSPCFG);
3007  rbuf[i++] = readw(ioaddr + SDCFG);
3008  writew(0, ioaddr + PGSEL);
3009 
3010  /* read RFCR indexed registers */
3011  rfcr = readl(ioaddr + RxFilterAddr);
3012  for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3013  writel(j*2, ioaddr + RxFilterAddr);
3014  rbuf[i++] = readw(ioaddr + RxFilterData);
3015  }
3016  writel(rfcr, ioaddr + RxFilterAddr);
3017 
3018  /* the interrupt status is clear-on-read - see if we missed any */
3019  if (rbuf[4] & rbuf[5]) {
3021  "%s: shoot, we dropped an interrupt (%#08x)\n",
3022  dev->name, rbuf[4] & rbuf[5]);
3023  }
3024 
3025  return 0;
3026 }
3027 
3028 #define SWAP_BITS(x) ( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3029  | (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9) \
3030  | (((x) & 0x0010) << 7) | (((x) & 0x0020) << 5) \
3031  | (((x) & 0x0040) << 3) | (((x) & 0x0080) << 1) \
3032  | (((x) & 0x0100) >> 1) | (((x) & 0x0200) >> 3) \
3033  | (((x) & 0x0400) >> 5) | (((x) & 0x0800) >> 7) \
3034  | (((x) & 0x1000) >> 9) | (((x) & 0x2000) >> 11) \
3035  | (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3036 
3037 static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3038 {
3039  int i;
3040  u16 *ebuf = (u16 *)buf;
3041  void __iomem * ioaddr = ns_ioaddr(dev);
3042  struct netdev_private *np = netdev_priv(dev);
3043 
3044  /* eeprom_read reads 16 bits, and indexes by 16 bits */
3045  for (i = 0; i < np->eeprom_size/2; i++) {
3046  ebuf[i] = eeprom_read(ioaddr, i);
3047  /* The EEPROM itself stores data bit-swapped, but eeprom_read
3048  * reads it back "sanely". So we swap it back here in order to
3049  * present it to userland as it is stored. */
3050  ebuf[i] = SWAP_BITS(ebuf[i]);
3051  }
3052  return 0;
3053 }
3054 
3055 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3056 {
3057  struct mii_ioctl_data *data = if_mii(rq);
3058  struct netdev_private *np = netdev_priv(dev);
3059 
3060  switch(cmd) {
3061  case SIOCGMIIPHY: /* Get address of MII PHY in use. */
3062  data->phy_id = np->phy_addr_external;
3063  /* Fall Through */
3064 
3065  case SIOCGMIIREG: /* Read MII PHY register. */
3066  /* The phy_id is not enough to uniquely identify
3067  * the intended target. Therefore the command is sent to
3068  * the given mii on the current port.
3069  */
3070  if (dev->if_port == PORT_TP) {
3071  if ((data->phy_id & 0x1f) == np->phy_addr_external)
3072  data->val_out = mdio_read(dev,
3073  data->reg_num & 0x1f);
3074  else
3075  data->val_out = 0;
3076  } else {
3077  move_int_phy(dev, data->phy_id & 0x1f);
3078  data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3079  data->reg_num & 0x1f);
3080  }
3081  return 0;
3082 
3083  case SIOCSMIIREG: /* Write MII PHY register. */
3084  if (dev->if_port == PORT_TP) {
3085  if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3086  if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3087  np->advertising = data->val_in;
3088  mdio_write(dev, data->reg_num & 0x1f,
3089  data->val_in);
3090  }
3091  } else {
3092  if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3093  if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3094  np->advertising = data->val_in;
3095  }
3096  move_int_phy(dev, data->phy_id & 0x1f);
3097  miiport_write(dev, data->phy_id & 0x1f,
3098  data->reg_num & 0x1f,
3099  data->val_in);
3100  }
3101  return 0;
3102  default:
3103  return -EOPNOTSUPP;
3104  }
3105 }
3106 
3107 static void enable_wol_mode(struct net_device *dev, int enable_intr)
3108 {
3109  void __iomem * ioaddr = ns_ioaddr(dev);
3110  struct netdev_private *np = netdev_priv(dev);
3111 
3112  if (netif_msg_wol(np))
3113  printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3114  dev->name);
3115 
3116  /* For WOL we must restart the rx process in silent mode.
3117  * Write NULL to the RxRingPtr. Only possible if
3118  * rx process is stopped
3119  */
3120  writel(0, ioaddr + RxRingPtr);
3121 
3122  /* read WoL status to clear */
3123  readl(ioaddr + WOLCmd);
3124 
3125  /* PME on, clear status */
3126  writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3127 
3128  /* and restart the rx process */
3129  writel(RxOn, ioaddr + ChipCmd);
3130 
3131  if (enable_intr) {
3132  /* enable the WOL interrupt.
3133  * Could be used to send a netlink message.
3134  */
3135  writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3136  natsemi_irq_enable(dev);
3137  }
3138 }
3139 
3140 static int netdev_close(struct net_device *dev)
3141 {
3142  void __iomem * ioaddr = ns_ioaddr(dev);
3143  struct netdev_private *np = netdev_priv(dev);
3144  const int irq = np->pci_dev->irq;
3145 
3146  if (netif_msg_ifdown(np))
3148  "%s: Shutting down ethercard, status was %#04x.\n",
3149  dev->name, (int)readl(ioaddr + ChipCmd));
3150  if (netif_msg_pktdata(np))
3152  "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
3153  dev->name, np->cur_tx, np->dirty_tx,
3154  np->cur_rx, np->dirty_rx);
3155 
3156  napi_disable(&np->napi);
3157 
3158  /*
3159  * FIXME: what if someone tries to close a device
3160  * that is suspended?
3161  * Should we reenable the nic to switch to
3162  * the final WOL settings?
3163  */
3164 
3165  del_timer_sync(&np->timer);
3166  disable_irq(irq);
3167  spin_lock_irq(&np->lock);
3168  natsemi_irq_disable(dev);
3169  np->hands_off = 1;
3170  spin_unlock_irq(&np->lock);
3171  enable_irq(irq);
3172 
3173  free_irq(irq, dev);
3174 
3175  /* Interrupt disabled, interrupt handler released,
3176  * queue stopped, timer deleted, rtnl_lock held
3177  * All async codepaths that access the driver are disabled.
3178  */
3179  spin_lock_irq(&np->lock);
3180  np->hands_off = 0;
3181  readl(ioaddr + IntrMask);
3182  readw(ioaddr + MIntrStatus);
3183 
3184  /* Freeze Stats */
3185  writel(StatsFreeze, ioaddr + StatsCtrl);
3186 
3187  /* Stop the chip's Tx and Rx processes. */
3188  natsemi_stop_rxtx(dev);
3189 
3190  __get_stats(dev);
3191  spin_unlock_irq(&np->lock);
3192 
3193  /* clear the carrier last - an interrupt could reenable it otherwise */
3194  netif_carrier_off(dev);
3195  netif_stop_queue(dev);
3196 
3197  dump_ring(dev);
3198  drain_ring(dev);
3199  free_ring(dev);
3200 
3201  {
3202  u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3203  if (wol) {
3204  /* restart the NIC in WOL mode.
3205  * The nic must be stopped for this.
3206  */
3207  enable_wol_mode(dev, 0);
3208  } else {
3209  /* Restore PME enable bit unmolested */
3210  writel(np->SavedClkRun, ioaddr + ClkRun);
3211  }
3212  }
3213  return 0;
3214 }
3215 
3216 
3217 static void __devexit natsemi_remove1 (struct pci_dev *pdev)
3218 {
3219  struct net_device *dev = pci_get_drvdata(pdev);
3220  void __iomem * ioaddr = ns_ioaddr(dev);
3221 
3222  NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
3223  unregister_netdev (dev);
3224  pci_release_regions (pdev);
3225  iounmap(ioaddr);
3226  free_netdev (dev);
3227  pci_set_drvdata(pdev, NULL);
3228 }
3229 
3230 #ifdef CONFIG_PM
3231 
3232 /*
3233  * The ns83815 chip doesn't have explicit RxStop bits.
3234  * Kicking the Rx or Tx process for a new packet reenables the Rx process
3235  * of the nic, thus this function must be very careful:
3236  *
3237  * suspend/resume synchronization:
3238  * entry points:
3239  * netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
3240  * start_tx, ns_tx_timeout
3241  *
3242  * No function accesses the hardware without checking np->hands_off.
3243  * the check occurs under spin_lock_irq(&np->lock);
3244  * exceptions:
3245  * * netdev_ioctl: noncritical access.
3246  * * netdev_open: cannot happen due to the device_detach
3247  * * netdev_close: doesn't hurt.
3248  * * netdev_timer: timer stopped by natsemi_suspend.
3249  * * intr_handler: doesn't acquire the spinlock. suspend calls
3250  * disable_irq() to enforce synchronization.
3251  * * natsemi_poll: checks before reenabling interrupts. suspend
3252  * sets hands_off, disables interrupts and then waits with
3253  * napi_disable().
3254  *
3255  * Interrupts must be disabled, otherwise hands_off can cause irq storms.
3256  */
3257 
3258 static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3259 {
3260  struct net_device *dev = pci_get_drvdata (pdev);
3261  struct netdev_private *np = netdev_priv(dev);
3262  void __iomem * ioaddr = ns_ioaddr(dev);
3263 
3264  rtnl_lock();
3265  if (netif_running (dev)) {
3266  const int irq = np->pci_dev->irq;
3267 
3268  del_timer_sync(&np->timer);
3269 
3270  disable_irq(irq);
3271  spin_lock_irq(&np->lock);
3272 
3273  natsemi_irq_disable(dev);
3274  np->hands_off = 1;
3275  natsemi_stop_rxtx(dev);
3276  netif_stop_queue(dev);
3277 
3278  spin_unlock_irq(&np->lock);
3279  enable_irq(irq);
3280 
3281  napi_disable(&np->napi);
3282 
3283  /* Update the error counts. */
3284  __get_stats(dev);
3285 
3286  /* pci_power_off(pdev, -1); */
3287  drain_ring(dev);
3288  {
3289  u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3290  /* Restore PME enable bit */
3291  if (wol) {
3292  /* restart the NIC in WOL mode.
3293  * The nic must be stopped for this.
3294  * FIXME: use the WOL interrupt
3295  */
3296  enable_wol_mode(dev, 0);
3297  } else {
3298  /* Restore PME enable bit unmolested */
3299  writel(np->SavedClkRun, ioaddr + ClkRun);
3300  }
3301  }
3302  }
3303  netif_device_detach(dev);
3304  rtnl_unlock();
3305  return 0;
3306 }
3307 
3308 
3309 static int natsemi_resume (struct pci_dev *pdev)
3310 {
3311  struct net_device *dev = pci_get_drvdata (pdev);
3312  struct netdev_private *np = netdev_priv(dev);
3313  int ret = 0;
3314 
3315  rtnl_lock();
3316  if (netif_device_present(dev))
3317  goto out;
3318  if (netif_running(dev)) {
3319  const int irq = np->pci_dev->irq;
3320 
3321  BUG_ON(!np->hands_off);
3322  ret = pci_enable_device(pdev);
3323  if (ret < 0) {
3324  dev_err(&pdev->dev,
3325  "pci_enable_device() failed: %d\n", ret);
3326  goto out;
3327  }
3328  /* pci_power_on(pdev); */
3329 
3330  napi_enable(&np->napi);
3331 
3332  natsemi_reset(dev);
3333  init_ring(dev);
3334  disable_irq(irq);
3335  spin_lock_irq(&np->lock);
3336  np->hands_off = 0;
3337  init_registers(dev);
3338  netif_device_attach(dev);
3339  spin_unlock_irq(&np->lock);
3340  enable_irq(irq);
3341 
3342  mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3343  }
3344  netif_device_attach(dev);
3345 out:
3346  rtnl_unlock();
3347  return ret;
3348 }
3349 
3350 #endif /* CONFIG_PM */
3351 
3352 static struct pci_driver natsemi_driver = {
3353  .name = DRV_NAME,
3354  .id_table = natsemi_pci_tbl,
3355  .probe = natsemi_probe1,
3356  .remove = __devexit_p(natsemi_remove1),
3357 #ifdef CONFIG_PM
3358  .suspend = natsemi_suspend,
3359  .resume = natsemi_resume,
3360 #endif
3361 };
3362 
3363 static int __init natsemi_init_mod (void)
3364 {
3365 /* when a module, this is printed whether or not devices are found in probe */
3366 #ifdef MODULE
3367  printk(version);
3368 #endif
3369 
3370  return pci_register_driver(&natsemi_driver);
3371 }
3372 
3373 static void __exit natsemi_exit_mod (void)
3374 {
3375  pci_unregister_driver (&natsemi_driver);
3376 }
3377 
3378 module_init(natsemi_init_mod);
3379 module_exit(natsemi_exit_mod);
3380