Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sundance.c
Go to the documentation of this file.
1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2 /*
3  Written 1999-2000 by Donald Becker.
4 
5  This software may be used and distributed according to the terms of
6  the GNU General Public License (GPL), incorporated herein by reference.
7  Drivers based on or derived from this code fall under the GPL and must
8  retain the authorship, copyright and license notice. This file is not
9  a complete program and may only be used when the entire operating
10  system is licensed under the GPL.
11 
12  The author may be reached as [email protected], or C/O
13  Scyld Computing Corporation
14  410 Severn Ave., Suite 210
15  Annapolis MD 21403
16 
17  Support and updates available at
18  http://www.scyld.com/network/sundance.html
19  [link no longer provides useful info -jgarzik]
20  Archives of the mailing list are still available at
21  http://www.beowulf.org/pipermail/netdrivers/
22 
23 */
24 
25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
28 
29 
30 /* The user-configurable values.
31  These may be modified when a driver module is loaded.*/
32 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
33 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
34  Typical is a 64 element hash table based on the Ethernet CRC. */
35 static const int multicast_filter_limit = 32;
36 
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
38  Setting to > 1518 effectively disables this feature.
39  This chip can receive into offset buffers, so the Alpha does not
40  need a copy-align. */
41 static int rx_copybreak;
42 static int flowctrl=1;
43 
44 /* media[] specifies the media type the NIC operates at.
45  autosense Autosensing active media.
46  10mbps_hd 10Mbps half duplex.
47  10mbps_fd 10Mbps full duplex.
48  100mbps_hd 100Mbps half duplex.
49  100mbps_fd 100Mbps full duplex.
50  0 Autosensing active media.
51  1 10Mbps half duplex.
52  2 10Mbps full duplex.
53  3 100Mbps half duplex.
54  4 100Mbps full duplex.
55 */
56 #define MAX_UNITS 8
57 static char *media[MAX_UNITS];
58 
59 
60 /* Operational parameters that are set at compile time. */
61 
62 /* Keep the ring sizes a power of two for compile efficiency.
63  The compiler will convert <unsigned>'%'<2^N> into a bit mask.
64  Making the Tx ring too large decreases the effectiveness of channel
65  bonding and packet priority, and more than 128 requires modifying the
66  Tx error recovery.
67  Large receive rings merely waste memory. */
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
70 #define RX_RING_SIZE 64
71 #define RX_BUDGET 32
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
74 
75 /* Operational parameters that usually are not changed. */
76 /* Time in jiffies before concluding the transmitter is hung. */
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
79 
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/interrupt.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
90 #include <linux/etherdevice.h>
91 #include <linux/skbuff.h>
92 #include <linux/init.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h> /* Processor type for cache alignment. */
96 #include <asm/io.h>
97 #include <linux/delay.h>
98 #include <linux/spinlock.h>
99 #include <linux/dma-mapping.h>
100 #include <linux/crc32.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
103 
104 /* These identify the driver base version and may not be removed. */
105 static const char version[] __devinitconst =
107  " Written by Donald Becker\n";
108 
109 MODULE_AUTHOR("Donald Becker <[email protected]>");
110 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
111 MODULE_LICENSE("GPL");
112 
113 module_param(debug, int, 0);
114 module_param(rx_copybreak, int, 0);
115 module_param_array(media, charp, NULL, 0);
116 module_param(flowctrl, int, 0);
117 MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
118 MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
119 MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
120 
121 /*
122  Theory of Operation
123 
124 I. Board Compatibility
125 
126 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
127 
128 II. Board-specific settings
129 
130 III. Driver operation
131 
132 IIIa. Ring buffers
133 
134 This driver uses two statically allocated fixed-size descriptor lists
135 formed into rings by a branch from the final descriptor to the beginning of
136 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
137 Some chips explicitly use only 2^N sized rings, while others use a
138 'next descriptor' pointer that the driver forms into rings.
139 
140 IIIb/c. Transmit/Receive Structure
141 
142 This driver uses a zero-copy receive and transmit scheme.
143 The driver allocates full frame size skbuffs for the Rx ring buffers at
144 open() time and passes the skb->data field to the chip as receive data
145 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
146 a fresh skbuff is allocated and the frame is copied to the new skbuff.
147 When the incoming frame is larger, the skbuff is passed directly up the
148 protocol stack. Buffers consumed this way are replaced by newly allocated
149 skbuffs in a later phase of receives.
150 
151 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
152 using a full-sized skbuff for small frames vs. the copying costs of larger
153 frames. New boards are typically used in generously configured machines
154 and the underfilled buffers have negligible impact compared to the benefit of
155 a single allocation size, so the default value of zero results in never
156 copying packets. When copying is done, the cost is usually mitigated by using
157 a combined copy/checksum routine. Copying also preloads the cache, which is
158 most useful with small frames.
159 
160 A subtle aspect of the operation is that the IP header at offset 14 in an
161 ethernet frame isn't longword aligned for further processing.
162 Unaligned buffers are permitted by the Sundance hardware, so
163 frames are received into the skbuff at an offset of "+2", 16-byte aligning
164 the IP header.
165 
166 IIId. Synchronization
167 
168 The driver runs as two independent, single-threaded flows of control. One
169 is the send-packet routine, which enforces single-threaded use by the
170 dev->tbusy flag. The other thread is the interrupt handler, which is single
171 threaded by the hardware and interrupt handling software.
172 
173 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
174 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
175 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
176 the 'lp->tx_full' flag.
177 
178 The interrupt handler has exclusive control over the Rx ring and records stats
179 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
180 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
181 clears both the tx_full and tbusy flags.
182 
183 IV. Notes
184 
185 IVb. References
186 
187 The Sundance ST201 datasheet, preliminary version.
188 The Kendin KS8723 datasheet, preliminary version.
189 The ICplus IP100 datasheet, preliminary version.
190 http://www.scyld.com/expert/100mbps.html
191 http://www.scyld.com/expert/NWay.html
192 
193 IVc. Errata
194 
195 */
196 
197 /* Work-around for Kendin chip bugs. */
198 #ifndef CONFIG_SUNDANCE_MMIO
199 #define USE_IO_OPS 1
200 #endif
201 
202 static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
203  { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204  { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205  { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206  { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
207  { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
208  { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
209  { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
210  { }
211 };
212 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
213 
214 enum {
216 };
217 
218 struct pci_id_info {
219  const char *name;
220 };
221 static const struct pci_id_info pci_id_tbl[] __devinitconst = {
222  {"D-Link DFE-550TX FAST Ethernet Adapter"},
223  {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224  {"D-Link DFE-580TX 4 port Server Adapter"},
225  {"D-Link DFE-530TXS FAST Ethernet Adapter"},
226  {"D-Link DL10050-based FAST Ethernet Adapter"},
227  {"Sundance Technology Alta"},
228  {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
229  { } /* terminate list. */
230 };
231 
232 /* This driver was written to use PCI memory space, however x86-oriented
233  hardware often uses I/O space accesses. */
234 
235 /* Offsets to the device registers.
236  Unlike software-only systems, device drivers interact with complex hardware.
237  It's not useful to define symbolic names for every register bit in the
238  device. The name can only partially document the semantics and make
239  the driver longer and more difficult to read.
240  In general, only the important configuration values or bits changed
241  multiple times should be defined symbolically.
242 */
244  DMACtrl = 0x00,
245  TxListPtr = 0x04,
249  RxDMAStatus = 0x0c,
250  RxListPtr = 0x10,
251  DebugCtrl0 = 0x1a,
252  DebugCtrl1 = 0x1c,
256  LEDCtrl = 0x1a,
257  ASICCtrl = 0x30,
258  EEData = 0x34,
259  EECtrl = 0x36,
260  FlashAddr = 0x40,
261  FlashData = 0x44,
262  TxStatus = 0x46,
263  TxFrameId = 0x47,
264  DownCounter = 0x18,
265  IntrClear = 0x4a,
266  IntrEnable = 0x4c,
267  IntrStatus = 0x4e,
268  MACCtrl0 = 0x50,
269  MACCtrl1 = 0x52,
270  StationAddr = 0x54,
271  MaxFrameSize = 0x5A,
272  RxMode = 0x5c,
273  MIICtrl = 0x5e,
276  RxOctetsLow = 0x68,
277  RxOctetsHigh = 0x6a,
278  TxOctetsLow = 0x6c,
279  TxOctetsHigh = 0x6e,
280  TxFramesOK = 0x70,
281  RxFramesOK = 0x72,
285  StatsOneColl = 0x77,
286  StatsTxDefer = 0x78,
287  RxMissed = 0x79,
289  StatsTxAbort = 0x7b,
290  StatsBcastTx = 0x7c,
291  StatsBcastRx = 0x7d,
292  StatsMcastTx = 0x7e,
293  StatsMcastRx = 0x7f,
294  /* Aliased and bogus values! */
295  RxStatus = 0x0c,
296 };
297 
298 #define ASIC_HI_WORD(x) ((x) + 2)
299 
301  GlobalReset = 0x0001,
302  RxReset = 0x0002,
303  TxReset = 0x0004,
304  DMAReset = 0x0008,
305  FIFOReset = 0x0010,
306  NetworkReset = 0x0020,
307  HostReset = 0x0040,
308  ResetBusy = 0x0400,
309 };
310 
311 /* Bits in the interrupt status/mask registers. */
313  IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
314  IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
315  IntrDrvRqst=0x0040,
316  StatsMax=0x0080, LinkChange=0x0100,
317  IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
318 };
319 
320 /* Bits in the RxMode register. */
324 };
325 /* Bits in MACCtrl. */
328  EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
329 };
331  StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
332  TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
333  RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
334 };
335 
336 /* The Rx and Tx buffer descriptors. */
337 /* Note that using only 32 bit fields simplifies conversion to big-endian
338  architectures. */
339 struct netdev_desc {
342  struct desc_frag { __le32 addr, length; } frag[1];
343 };
344 
345 /* Bits in netdev_desc.status */
347  DescOwn=0x8000,
349  DescEndRing=0x2000,
350  LastFrag=0x80000000,
351  DescIntrOnTx=0x8000,
352  DescIntrOnDMADone=0x80000000,
353  DisableAlign = 0x00000001,
354 };
355 
356 #define PRIV_ALIGN 15 /* Required alignment mask */
357 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
358  within the structure. */
359 #define MII_CNT 4
360 struct netdev_private {
361  /* Descriptor rings first for alignment. */
362  struct netdev_desc *rx_ring;
363  struct netdev_desc *tx_ring;
364  struct sk_buff* rx_skbuff[RX_RING_SIZE];
365  struct sk_buff* tx_skbuff[TX_RING_SIZE];
368  struct timer_list timer; /* Media monitoring timer. */
369  /* ethtool extra stats */
370  struct {
381  } xstats;
382  /* Frequently used values: keep some adjacent for cache effect. */
385  int chip_id;
386  unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
387  unsigned int rx_buf_sz; /* Based on MTU+slack. */
388  struct netdev_desc *last_tx; /* Last Tx descriptor used. */
389  unsigned int cur_tx, dirty_tx;
390  /* These values are keep track of the transceiver/media in use. */
391  unsigned int flowctrl:1;
392  unsigned int default_port:4; /* Last dev->if_port value. */
393  unsigned int an_enable:1;
394  unsigned int speed;
397  int budget;
398  int cur_task;
399  /* Multicast and receive mode. */
400  spinlock_t mcastlock; /* SMP lock multicast updates. */
401  u16 mcast_filter[4];
402  /* MII transceiver section. */
403  struct mii_if_info mii_if;
405  unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
406  struct pci_dev *pci_dev;
407  void __iomem *base;
409 };
410 
411 /* The station address location in the EEPROM. */
412 #define EEPROM_SA_OFFSET 0x10
413 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
414  IntrDrvRqst | IntrTxDone | StatsMax | \
415  LinkChange)
416 
417 static int change_mtu(struct net_device *dev, int new_mtu);
418 static int eeprom_read(void __iomem *ioaddr, int location);
419 static int mdio_read(struct net_device *dev, int phy_id, int location);
420 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
421 static int mdio_wait_link(struct net_device *dev, int wait);
422 static int netdev_open(struct net_device *dev);
423 static void check_duplex(struct net_device *dev);
424 static void netdev_timer(unsigned long data);
425 static void tx_timeout(struct net_device *dev);
426 static void init_ring(struct net_device *dev);
427 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
428 static int reset_tx (struct net_device *dev);
429 static irqreturn_t intr_handler(int irq, void *dev_instance);
430 static void rx_poll(unsigned long data);
431 static void tx_poll(unsigned long data);
432 static void refill_rx (struct net_device *dev);
433 static void netdev_error(struct net_device *dev, int intr_status);
434 static void netdev_error(struct net_device *dev, int intr_status);
435 static void set_rx_mode(struct net_device *dev);
436 static int __set_mac_addr(struct net_device *dev);
437 static int sundance_set_mac_addr(struct net_device *dev, void *data);
438 static struct net_device_stats *get_stats(struct net_device *dev);
439 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
440 static int netdev_close(struct net_device *dev);
441 static const struct ethtool_ops ethtool_ops;
442 
443 static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
444 {
445  struct netdev_private *np = netdev_priv(dev);
446  void __iomem *ioaddr = np->base + ASICCtrl;
447  int countdown;
448 
449  /* ST201 documentation states ASICCtrl is a 32bit register */
450  iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
451  /* ST201 documentation states reset can take up to 1 ms */
452  countdown = 10 + 1;
453  while (ioread32 (ioaddr) & (ResetBusy << 16)) {
454  if (--countdown == 0) {
455  printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
456  break;
457  }
458  udelay(100);
459  }
460 }
461 
462 static const struct net_device_ops netdev_ops = {
463  .ndo_open = netdev_open,
464  .ndo_stop = netdev_close,
465  .ndo_start_xmit = start_tx,
466  .ndo_get_stats = get_stats,
467  .ndo_set_rx_mode = set_rx_mode,
468  .ndo_do_ioctl = netdev_ioctl,
469  .ndo_tx_timeout = tx_timeout,
470  .ndo_change_mtu = change_mtu,
471  .ndo_set_mac_address = sundance_set_mac_addr,
472  .ndo_validate_addr = eth_validate_addr,
473 };
474 
475 static int __devinit sundance_probe1 (struct pci_dev *pdev,
476  const struct pci_device_id *ent)
477 {
478  struct net_device *dev;
479  struct netdev_private *np;
480  static int card_idx;
481  int chip_idx = ent->driver_data;
482  int irq;
483  int i;
484  void __iomem *ioaddr;
485  u16 mii_ctl;
486  void *ring_space;
488 #ifdef USE_IO_OPS
489  int bar = 0;
490 #else
491  int bar = 1;
492 #endif
493  int phy, phy_end, phy_idx = 0;
494 
495 /* when built into the kernel, we only print version if device is found */
496 #ifndef MODULE
497  static int printed_version;
498  if (!printed_version++)
499  printk(version);
500 #endif
501 
502  if (pci_enable_device(pdev))
503  return -EIO;
504  pci_set_master(pdev);
505 
506  irq = pdev->irq;
507 
508  dev = alloc_etherdev(sizeof(*np));
509  if (!dev)
510  return -ENOMEM;
511  SET_NETDEV_DEV(dev, &pdev->dev);
512 
513  if (pci_request_regions(pdev, DRV_NAME))
514  goto err_out_netdev;
515 
516  ioaddr = pci_iomap(pdev, bar, netdev_io_size);
517  if (!ioaddr)
518  goto err_out_res;
519 
520  for (i = 0; i < 3; i++)
521  ((__le16 *)dev->dev_addr)[i] =
522  cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
523  memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
524 
525  np = netdev_priv(dev);
526  np->base = ioaddr;
527  np->pci_dev = pdev;
528  np->chip_id = chip_idx;
529  np->msg_enable = (1 << debug) - 1;
530  spin_lock_init(&np->lock);
531  spin_lock_init(&np->statlock);
532  tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
533  tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
534 
535  ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
536  &ring_dma, GFP_KERNEL);
537  if (!ring_space)
538  goto err_out_cleardev;
539  np->tx_ring = (struct netdev_desc *)ring_space;
540  np->tx_ring_dma = ring_dma;
541 
542  ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE,
543  &ring_dma, GFP_KERNEL);
544  if (!ring_space)
545  goto err_out_unmap_tx;
546  np->rx_ring = (struct netdev_desc *)ring_space;
547  np->rx_ring_dma = ring_dma;
548 
549  np->mii_if.dev = dev;
550  np->mii_if.mdio_read = mdio_read;
551  np->mii_if.mdio_write = mdio_write;
552  np->mii_if.phy_id_mask = 0x1f;
553  np->mii_if.reg_num_mask = 0x1f;
554 
555  /* The chip-specific entries in the device structure. */
556  dev->netdev_ops = &netdev_ops;
558  dev->watchdog_timeo = TX_TIMEOUT;
559 
560  pci_set_drvdata(pdev, dev);
561 
562  i = register_netdev(dev);
563  if (i)
564  goto err_out_unmap_rx;
565 
566  printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
567  dev->name, pci_id_tbl[chip_idx].name, ioaddr,
568  dev->dev_addr, irq);
569 
570  np->phys[0] = 1; /* Default setting */
571  np->mii_preamble_required++;
572 
573  /*
574  * It seems some phys doesn't deal well with address 0 being accessed
575  * first
576  */
577  if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
578  phy = 0;
579  phy_end = 31;
580  } else {
581  phy = 1;
582  phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */
583  }
584  for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
585  int phyx = phy & 0x1f;
586  int mii_status = mdio_read(dev, phyx, MII_BMSR);
587  if (mii_status != 0xffff && mii_status != 0x0000) {
588  np->phys[phy_idx++] = phyx;
589  np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
590  if ((mii_status & 0x0040) == 0)
591  np->mii_preamble_required++;
592  printk(KERN_INFO "%s: MII PHY found at address %d, status "
593  "0x%4.4x advertising %4.4x.\n",
594  dev->name, phyx, mii_status, np->mii_if.advertising);
595  }
596  }
597  np->mii_preamble_required--;
598 
599  if (phy_idx == 0) {
600  printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
601  dev->name, ioread32(ioaddr + ASICCtrl));
602  goto err_out_unregister;
603  }
604 
605  np->mii_if.phy_id = np->phys[0];
606 
607  /* Parse override configuration */
608  np->an_enable = 1;
609  if (card_idx < MAX_UNITS) {
610  if (media[card_idx] != NULL) {
611  np->an_enable = 0;
612  if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
613  strcmp (media[card_idx], "4") == 0) {
614  np->speed = 100;
615  np->mii_if.full_duplex = 1;
616  } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
617  strcmp (media[card_idx], "3") == 0) {
618  np->speed = 100;
619  np->mii_if.full_duplex = 0;
620  } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
621  strcmp (media[card_idx], "2") == 0) {
622  np->speed = 10;
623  np->mii_if.full_duplex = 1;
624  } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
625  strcmp (media[card_idx], "1") == 0) {
626  np->speed = 10;
627  np->mii_if.full_duplex = 0;
628  } else {
629  np->an_enable = 1;
630  }
631  }
632  if (flowctrl == 1)
633  np->flowctrl = 1;
634  }
635 
636  /* Fibre PHY? */
637  if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
638  /* Default 100Mbps Full */
639  if (np->an_enable) {
640  np->speed = 100;
641  np->mii_if.full_duplex = 1;
642  np->an_enable = 0;
643  }
644  }
645  /* Reset PHY */
646  mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
647  mdelay (300);
648  /* If flow control enabled, we need to advertise it.*/
649  if (np->flowctrl)
650  mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
651  mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
652  /* Force media type */
653  if (!np->an_enable) {
654  mii_ctl = 0;
655  mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
656  mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
657  mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
658  printk (KERN_INFO "Override speed=%d, %s duplex\n",
659  np->speed, np->mii_if.full_duplex ? "Full" : "Half");
660 
661  }
662 
663  /* Perhaps move the reset here? */
664  /* Reset the chip to erase previous misconfiguration. */
665  if (netif_msg_hw(np))
666  printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
667  sundance_reset(dev, 0x00ff << 16);
668  if (netif_msg_hw(np))
669  printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
670 
671  card_idx++;
672  return 0;
673 
674 err_out_unregister:
675  unregister_netdev(dev);
676 err_out_unmap_rx:
678  np->rx_ring, np->rx_ring_dma);
679 err_out_unmap_tx:
681  np->tx_ring, np->tx_ring_dma);
682 err_out_cleardev:
683  pci_set_drvdata(pdev, NULL);
684  pci_iounmap(pdev, ioaddr);
685 err_out_res:
686  pci_release_regions(pdev);
687 err_out_netdev:
688  free_netdev (dev);
689  return -ENODEV;
690 }
691 
692 static int change_mtu(struct net_device *dev, int new_mtu)
693 {
694  if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
695  return -EINVAL;
696  if (netif_running(dev))
697  return -EBUSY;
698  dev->mtu = new_mtu;
699  return 0;
700 }
701 
702 #define eeprom_delay(ee_addr) ioread32(ee_addr)
703 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
704 static int __devinit eeprom_read(void __iomem *ioaddr, int location)
705 {
706  int boguscnt = 10000; /* Typical 1900 ticks. */
707  iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
708  do {
709  eeprom_delay(ioaddr + EECtrl);
710  if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
711  return ioread16(ioaddr + EEData);
712  }
713  } while (--boguscnt > 0);
714  return 0;
715 }
716 
717 /* MII transceiver control section.
718  Read and write the MII registers using software-generated serial
719  MDIO protocol. See the MII specifications or DP83840A data sheet
720  for details.
721 
722  The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
723  met by back-to-back 33Mhz PCI cycles. */
724 #define mdio_delay() ioread8(mdio_addr)
725 
727  MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
728 };
729 #define MDIO_EnbIn (0)
730 #define MDIO_WRITE0 (MDIO_EnbOutput)
731 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
732 
733 /* Generate the preamble required for initial synchronization and
734  a few older transceivers. */
735 static void mdio_sync(void __iomem *mdio_addr)
736 {
737  int bits = 32;
738 
739  /* Establish sync by sending at least 32 logic ones. */
740  while (--bits >= 0) {
741  iowrite8(MDIO_WRITE1, mdio_addr);
742  mdio_delay();
743  iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
744  mdio_delay();
745  }
746 }
747 
748 static int mdio_read(struct net_device *dev, int phy_id, int location)
749 {
750  struct netdev_private *np = netdev_priv(dev);
751  void __iomem *mdio_addr = np->base + MIICtrl;
752  int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
753  int i, retval = 0;
754 
755  if (np->mii_preamble_required)
756  mdio_sync(mdio_addr);
757 
758  /* Shift the read command bits out. */
759  for (i = 15; i >= 0; i--) {
760  int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
761 
762  iowrite8(dataval, mdio_addr);
763  mdio_delay();
764  iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
765  mdio_delay();
766  }
767  /* Read the two transition, 16 data, and wire-idle bits. */
768  for (i = 19; i > 0; i--) {
769  iowrite8(MDIO_EnbIn, mdio_addr);
770  mdio_delay();
771  retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
772  iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
773  mdio_delay();
774  }
775  return (retval>>1) & 0xffff;
776 }
777 
778 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
779 {
780  struct netdev_private *np = netdev_priv(dev);
781  void __iomem *mdio_addr = np->base + MIICtrl;
782  int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
783  int i;
784 
785  if (np->mii_preamble_required)
786  mdio_sync(mdio_addr);
787 
788  /* Shift the command bits out. */
789  for (i = 31; i >= 0; i--) {
790  int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
791 
792  iowrite8(dataval, mdio_addr);
793  mdio_delay();
794  iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
795  mdio_delay();
796  }
797  /* Clear out extra bits. */
798  for (i = 2; i > 0; i--) {
799  iowrite8(MDIO_EnbIn, mdio_addr);
800  mdio_delay();
801  iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
802  mdio_delay();
803  }
804 }
805 
806 static int mdio_wait_link(struct net_device *dev, int wait)
807 {
808  int bmsr;
809  int phy_id;
810  struct netdev_private *np;
811 
812  np = netdev_priv(dev);
813  phy_id = np->phys[0];
814 
815  do {
816  bmsr = mdio_read(dev, phy_id, MII_BMSR);
817  if (bmsr & 0x0004)
818  return 0;
819  mdelay(1);
820  } while (--wait > 0);
821  return -1;
822 }
823 
824 static int netdev_open(struct net_device *dev)
825 {
826  struct netdev_private *np = netdev_priv(dev);
827  void __iomem *ioaddr = np->base;
828  const int irq = np->pci_dev->irq;
829  unsigned long flags;
830  int i;
831 
832  /* Do we need to reset the chip??? */
833 
834  i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
835  if (i)
836  return i;
837 
838  if (netif_msg_ifup(np))
839  printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
840 
841  init_ring(dev);
842 
843  iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
844  /* The Tx list pointer is written as packets are queued. */
845 
846  /* Initialize other registers. */
847  __set_mac_addr(dev);
848 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
849  iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
850 #else
851  iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
852 #endif
853  if (dev->mtu > 2047)
854  iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
855 
856  /* Configure the PCI bus bursts and FIFO thresholds. */
857 
858  if (dev->if_port == 0)
859  dev->if_port = np->default_port;
860 
862 
863  set_rx_mode(dev);
864  iowrite16(0, ioaddr + IntrEnable);
865  iowrite16(0, ioaddr + DownCounter);
866  /* Set the chip to poll every N*320nsec. */
867  iowrite8(100, ioaddr + RxDMAPollPeriod);
868  iowrite8(127, ioaddr + TxDMAPollPeriod);
869  /* Fix DFE-580TX packet drop issue */
870  if (np->pci_dev->revision >= 0x14)
871  iowrite8(0x01, ioaddr + DebugCtrl1);
872  netif_start_queue(dev);
873 
874  spin_lock_irqsave(&np->lock, flags);
875  reset_tx(dev);
876  spin_unlock_irqrestore(&np->lock, flags);
877 
879 
880  if (netif_msg_ifup(np))
881  printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
882  "MAC Control %x, %4.4x %4.4x.\n",
883  dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
884  ioread32(ioaddr + MACCtrl0),
885  ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
886 
887  /* Set the timer to check for link beat. */
888  init_timer(&np->timer);
889  np->timer.expires = jiffies + 3*HZ;
890  np->timer.data = (unsigned long)dev;
891  np->timer.function = netdev_timer; /* timer handler */
892  add_timer(&np->timer);
893 
894  /* Enable interrupts by setting the interrupt mask. */
895  iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
896 
897  return 0;
898 }
899 
900 static void check_duplex(struct net_device *dev)
901 {
902  struct netdev_private *np = netdev_priv(dev);
903  void __iomem *ioaddr = np->base;
904  int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
905  int negotiated = mii_lpa & np->mii_if.advertising;
906  int duplex;
907 
908  /* Force media */
909  if (!np->an_enable || mii_lpa == 0xffff) {
910  if (np->mii_if.full_duplex)
912  ioaddr + MACCtrl0);
913  return;
914  }
915 
916  /* Autonegotiation */
917  duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
918  if (np->mii_if.full_duplex != duplex) {
919  np->mii_if.full_duplex = duplex;
920  if (netif_msg_link(np))
921  printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
922  "negotiated capability %4.4x.\n", dev->name,
923  duplex ? "full" : "half", np->phys[0], negotiated);
924  iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
925  }
926 }
927 
928 static void netdev_timer(unsigned long data)
929 {
930  struct net_device *dev = (struct net_device *)data;
931  struct netdev_private *np = netdev_priv(dev);
932  void __iomem *ioaddr = np->base;
933  int next_tick = 10*HZ;
934 
935  if (netif_msg_timer(np)) {
936  printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
937  "Tx %x Rx %x.\n",
938  dev->name, ioread16(ioaddr + IntrEnable),
939  ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
940  }
941  check_duplex(dev);
942  np->timer.expires = jiffies + next_tick;
943  add_timer(&np->timer);
944 }
945 
946 static void tx_timeout(struct net_device *dev)
947 {
948  struct netdev_private *np = netdev_priv(dev);
949  void __iomem *ioaddr = np->base;
950  unsigned long flag;
951 
952  netif_stop_queue(dev);
953  tasklet_disable(&np->tx_tasklet);
954  iowrite16(0, ioaddr + IntrEnable);
955  printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
956  "TxFrameId %2.2x,"
957  " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
958  ioread8(ioaddr + TxFrameId));
959 
960  {
961  int i;
962  for (i=0; i<TX_RING_SIZE; i++) {
963  printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
964  (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
965  le32_to_cpu(np->tx_ring[i].next_desc),
966  le32_to_cpu(np->tx_ring[i].status),
967  (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
968  le32_to_cpu(np->tx_ring[i].frag[0].addr),
969  le32_to_cpu(np->tx_ring[i].frag[0].length));
970  }
971  printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
972  ioread32(np->base + TxListPtr),
973  netif_queue_stopped(dev));
974  printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
975  np->cur_tx, np->cur_tx % TX_RING_SIZE,
976  np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
977  printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
978  printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
979  }
980  spin_lock_irqsave(&np->lock, flag);
981 
982  /* Stop and restart the chip's Tx processes . */
983  reset_tx(dev);
984  spin_unlock_irqrestore(&np->lock, flag);
985 
986  dev->if_port = 0;
987 
988  dev->trans_start = jiffies; /* prevent tx timeout */
989  dev->stats.tx_errors++;
990  if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
991  netif_wake_queue(dev);
992  }
993  iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
994  tasklet_enable(&np->tx_tasklet);
995 }
996 
997 
998 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
999 static void init_ring(struct net_device *dev)
1000 {
1001  struct netdev_private *np = netdev_priv(dev);
1002  int i;
1003 
1004  np->cur_rx = np->cur_tx = 0;
1005  np->dirty_rx = np->dirty_tx = 0;
1006  np->cur_task = 0;
1007 
1008  np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1009 
1010  /* Initialize all Rx descriptors. */
1011  for (i = 0; i < RX_RING_SIZE; i++) {
1012  np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1013  ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1014  np->rx_ring[i].status = 0;
1015  np->rx_ring[i].frag[0].length = 0;
1016  np->rx_skbuff[i] = NULL;
1017  }
1018 
1019  /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1020  for (i = 0; i < RX_RING_SIZE; i++) {
1021  struct sk_buff *skb =
1022  netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1023  np->rx_skbuff[i] = skb;
1024  if (skb == NULL)
1025  break;
1026  skb_reserve(skb, 2); /* 16 byte align the IP header. */
1027  np->rx_ring[i].frag[0].addr = cpu_to_le32(
1028  dma_map_single(&np->pci_dev->dev, skb->data,
1029  np->rx_buf_sz, DMA_FROM_DEVICE));
1030  if (dma_mapping_error(&np->pci_dev->dev,
1031  np->rx_ring[i].frag[0].addr)) {
1032  dev_kfree_skb(skb);
1033  np->rx_skbuff[i] = NULL;
1034  break;
1035  }
1036  np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1037  }
1038  np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1039 
1040  for (i = 0; i < TX_RING_SIZE; i++) {
1041  np->tx_skbuff[i] = NULL;
1042  np->tx_ring[i].status = 0;
1043  }
1044 }
1045 
1046 static void tx_poll (unsigned long data)
1047 {
1048  struct net_device *dev = (struct net_device *)data;
1049  struct netdev_private *np = netdev_priv(dev);
1050  unsigned head = np->cur_task % TX_RING_SIZE;
1051  struct netdev_desc *txdesc =
1052  &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1053 
1054  /* Chain the next pointer */
1055  for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1056  int entry = np->cur_task % TX_RING_SIZE;
1057  txdesc = &np->tx_ring[entry];
1058  if (np->last_tx) {
1059  np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1060  entry*sizeof(struct netdev_desc));
1061  }
1062  np->last_tx = txdesc;
1063  }
1064  /* Indicate the latest descriptor of tx ring */
1065  txdesc->status |= cpu_to_le32(DescIntrOnTx);
1066 
1067  if (ioread32 (np->base + TxListPtr) == 0)
1068  iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1069  np->base + TxListPtr);
1070 }
1071 
1072 static netdev_tx_t
1073 start_tx (struct sk_buff *skb, struct net_device *dev)
1074 {
1075  struct netdev_private *np = netdev_priv(dev);
1076  struct netdev_desc *txdesc;
1077  unsigned entry;
1078 
1079  /* Calculate the next Tx descriptor entry. */
1080  entry = np->cur_tx % TX_RING_SIZE;
1081  np->tx_skbuff[entry] = skb;
1082  txdesc = &np->tx_ring[entry];
1083 
1084  txdesc->next_desc = 0;
1085  txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1086  txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
1087  skb->data, skb->len, DMA_TO_DEVICE));
1088  if (dma_mapping_error(&np->pci_dev->dev,
1089  txdesc->frag[0].addr))
1090  goto drop_frame;
1091  txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1092 
1093  /* Increment cur_tx before tasklet_schedule() */
1094  np->cur_tx++;
1095  mb();
1096  /* Schedule a tx_poll() task */
1097  tasklet_schedule(&np->tx_tasklet);
1098 
1099  /* On some architectures: explicitly flush cache lines here. */
1100  if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1101  !netif_queue_stopped(dev)) {
1102  /* do nothing */
1103  } else {
1104  netif_stop_queue (dev);
1105  }
1106  if (netif_msg_tx_queued(np)) {
1108  "%s: Transmit frame #%d queued in slot %d.\n",
1109  dev->name, np->cur_tx, entry);
1110  }
1111  return NETDEV_TX_OK;
1112 
1113 drop_frame:
1114  dev_kfree_skb(skb);
1115  np->tx_skbuff[entry] = NULL;
1116  dev->stats.tx_dropped++;
1117  return NETDEV_TX_OK;
1118 }
1119 
1120 /* Reset hardware tx and free all of tx buffers */
1121 static int
1122 reset_tx (struct net_device *dev)
1123 {
1124  struct netdev_private *np = netdev_priv(dev);
1125  void __iomem *ioaddr = np->base;
1126  struct sk_buff *skb;
1127  int i;
1128 
1129  /* Reset tx logic, TxListPtr will be cleaned */
1130  iowrite16 (TxDisable, ioaddr + MACCtrl1);
1131  sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1132 
1133  /* free all tx skbuff */
1134  for (i = 0; i < TX_RING_SIZE; i++) {
1135  np->tx_ring[i].next_desc = 0;
1136 
1137  skb = np->tx_skbuff[i];
1138  if (skb) {
1139  dma_unmap_single(&np->pci_dev->dev,
1140  le32_to_cpu(np->tx_ring[i].frag[0].addr),
1141  skb->len, DMA_TO_DEVICE);
1142  dev_kfree_skb_any(skb);
1143  np->tx_skbuff[i] = NULL;
1144  dev->stats.tx_dropped++;
1145  }
1146  }
1147  np->cur_tx = np->dirty_tx = 0;
1148  np->cur_task = 0;
1149 
1150  np->last_tx = NULL;
1151  iowrite8(127, ioaddr + TxDMAPollPeriod);
1152 
1154  return 0;
1155 }
1156 
1157 /* The interrupt handler cleans up after the Tx thread,
1158  and schedule a Rx thread work */
1159 static irqreturn_t intr_handler(int irq, void *dev_instance)
1160 {
1161  struct net_device *dev = (struct net_device *)dev_instance;
1162  struct netdev_private *np = netdev_priv(dev);
1163  void __iomem *ioaddr = np->base;
1164  int hw_frame_id;
1165  int tx_cnt;
1166  int tx_status;
1167  int handled = 0;
1168  int i;
1169 
1170 
1171  do {
1172  int intr_status = ioread16(ioaddr + IntrStatus);
1173  iowrite16(intr_status, ioaddr + IntrStatus);
1174 
1175  if (netif_msg_intr(np))
1176  printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1177  dev->name, intr_status);
1178 
1179  if (!(intr_status & DEFAULT_INTR))
1180  break;
1181 
1182  handled = 1;
1183 
1184  if (intr_status & (IntrRxDMADone)) {
1185  iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1186  ioaddr + IntrEnable);
1187  if (np->budget < 0)
1188  np->budget = RX_BUDGET;
1189  tasklet_schedule(&np->rx_tasklet);
1190  }
1191  if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1192  tx_status = ioread16 (ioaddr + TxStatus);
1193  for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1194  if (netif_msg_tx_done(np))
1195  printk
1196  ("%s: Transmit status is %2.2x.\n",
1197  dev->name, tx_status);
1198  if (tx_status & 0x1e) {
1199  if (netif_msg_tx_err(np))
1200  printk("%s: Transmit error status %4.4x.\n",
1201  dev->name, tx_status);
1202  dev->stats.tx_errors++;
1203  if (tx_status & 0x10)
1204  dev->stats.tx_fifo_errors++;
1205  if (tx_status & 0x08)
1206  dev->stats.collisions++;
1207  if (tx_status & 0x04)
1208  dev->stats.tx_fifo_errors++;
1209  if (tx_status & 0x02)
1210  dev->stats.tx_window_errors++;
1211 
1212  /*
1213  ** This reset has been verified on
1214  ** DFE-580TX boards ! [email protected].
1215  */
1216  if (tx_status & 0x10) { /* TxUnderrun */
1217  /* Restart Tx FIFO and transmitter */
1218  sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1219  /* No need to reset the Tx pointer here */
1220  }
1221  /* Restart the Tx. Need to make sure tx enabled */
1222  i = 10;
1223  do {
1224  iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1225  if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1226  break;
1227  mdelay(1);
1228  } while (--i);
1229  }
1230  /* Yup, this is a documentation bug. It cost me *hours*. */
1231  iowrite16 (0, ioaddr + TxStatus);
1232  if (tx_cnt < 0) {
1233  iowrite32(5000, ioaddr + DownCounter);
1234  break;
1235  }
1236  tx_status = ioread16 (ioaddr + TxStatus);
1237  }
1238  hw_frame_id = (tx_status >> 8) & 0xff;
1239  } else {
1240  hw_frame_id = ioread8(ioaddr + TxFrameId);
1241  }
1242 
1243  if (np->pci_dev->revision >= 0x14) {
1244  spin_lock(&np->lock);
1245  for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1246  int entry = np->dirty_tx % TX_RING_SIZE;
1247  struct sk_buff *skb;
1248  int sw_frame_id;
1249  sw_frame_id = (le32_to_cpu(
1250  np->tx_ring[entry].status) >> 2) & 0xff;
1251  if (sw_frame_id == hw_frame_id &&
1252  !(le32_to_cpu(np->tx_ring[entry].status)
1253  & 0x00010000))
1254  break;
1255  if (sw_frame_id == (hw_frame_id + 1) %
1256  TX_RING_SIZE)
1257  break;
1258  skb = np->tx_skbuff[entry];
1259  /* Free the original skb. */
1260  dma_unmap_single(&np->pci_dev->dev,
1261  le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1262  skb->len, DMA_TO_DEVICE);
1263  dev_kfree_skb_irq (np->tx_skbuff[entry]);
1264  np->tx_skbuff[entry] = NULL;
1265  np->tx_ring[entry].frag[0].addr = 0;
1266  np->tx_ring[entry].frag[0].length = 0;
1267  }
1268  spin_unlock(&np->lock);
1269  } else {
1270  spin_lock(&np->lock);
1271  for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1272  int entry = np->dirty_tx % TX_RING_SIZE;
1273  struct sk_buff *skb;
1274  if (!(le32_to_cpu(np->tx_ring[entry].status)
1275  & 0x00010000))
1276  break;
1277  skb = np->tx_skbuff[entry];
1278  /* Free the original skb. */
1279  dma_unmap_single(&np->pci_dev->dev,
1280  le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1281  skb->len, DMA_TO_DEVICE);
1282  dev_kfree_skb_irq (np->tx_skbuff[entry]);
1283  np->tx_skbuff[entry] = NULL;
1284  np->tx_ring[entry].frag[0].addr = 0;
1285  np->tx_ring[entry].frag[0].length = 0;
1286  }
1287  spin_unlock(&np->lock);
1288  }
1289 
1290  if (netif_queue_stopped(dev) &&
1291  np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1292  /* The ring is no longer full, clear busy flag. */
1293  netif_wake_queue (dev);
1294  }
1295  /* Abnormal error summary/uncommon events handlers. */
1296  if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1297  netdev_error(dev, intr_status);
1298  } while (0);
1299  if (netif_msg_intr(np))
1300  printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1301  dev->name, ioread16(ioaddr + IntrStatus));
1302  return IRQ_RETVAL(handled);
1303 }
1304 
1305 static void rx_poll(unsigned long data)
1306 {
1307  struct net_device *dev = (struct net_device *)data;
1308  struct netdev_private *np = netdev_priv(dev);
1309  int entry = np->cur_rx % RX_RING_SIZE;
1310  int boguscnt = np->budget;
1311  void __iomem *ioaddr = np->base;
1312  int received = 0;
1313 
1314  /* If EOP is set on the next entry, it's a new packet. Send it up. */
1315  while (1) {
1316  struct netdev_desc *desc = &(np->rx_ring[entry]);
1318  int pkt_len;
1319 
1320  if (--boguscnt < 0) {
1321  goto not_done;
1322  }
1323  if (!(frame_status & DescOwn))
1324  break;
1325  pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1326  if (netif_msg_rx_status(np))
1327  printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1328  frame_status);
1329  if (frame_status & 0x001f4000) {
1330  /* There was a error. */
1331  if (netif_msg_rx_err(np))
1332  printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1333  frame_status);
1334  dev->stats.rx_errors++;
1335  if (frame_status & 0x00100000)
1336  dev->stats.rx_length_errors++;
1337  if (frame_status & 0x00010000)
1338  dev->stats.rx_fifo_errors++;
1339  if (frame_status & 0x00060000)
1340  dev->stats.rx_frame_errors++;
1341  if (frame_status & 0x00080000)
1342  dev->stats.rx_crc_errors++;
1343  if (frame_status & 0x00100000) {
1344  printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1345  " status %8.8x.\n",
1346  dev->name, frame_status);
1347  }
1348  } else {
1349  struct sk_buff *skb;
1350 #ifndef final_version
1351  if (netif_msg_rx_status(np))
1352  printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1353  ", bogus_cnt %d.\n",
1354  pkt_len, boguscnt);
1355 #endif
1356  /* Check if the packet is long enough to accept without copying
1357  to a minimally-sized skbuff. */
1358  if (pkt_len < rx_copybreak &&
1359  (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1360  skb_reserve(skb, 2); /* 16 byte align the IP header */
1361  dma_sync_single_for_cpu(&np->pci_dev->dev,
1362  le32_to_cpu(desc->frag[0].addr),
1363  np->rx_buf_sz, DMA_FROM_DEVICE);
1364  skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1366  le32_to_cpu(desc->frag[0].addr),
1367  np->rx_buf_sz, DMA_FROM_DEVICE);
1368  skb_put(skb, pkt_len);
1369  } else {
1370  dma_unmap_single(&np->pci_dev->dev,
1371  le32_to_cpu(desc->frag[0].addr),
1372  np->rx_buf_sz, DMA_FROM_DEVICE);
1373  skb_put(skb = np->rx_skbuff[entry], pkt_len);
1374  np->rx_skbuff[entry] = NULL;
1375  }
1376  skb->protocol = eth_type_trans(skb, dev);
1377  /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1378  netif_rx(skb);
1379  }
1380  entry = (entry + 1) % RX_RING_SIZE;
1381  received++;
1382  }
1383  np->cur_rx = entry;
1384  refill_rx (dev);
1385  np->budget -= received;
1386  iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1387  return;
1388 
1389 not_done:
1390  np->cur_rx = entry;
1391  refill_rx (dev);
1392  if (!received)
1393  received = 1;
1394  np->budget -= received;
1395  if (np->budget <= 0)
1396  np->budget = RX_BUDGET;
1397  tasklet_schedule(&np->rx_tasklet);
1398 }
1399 
1400 static void refill_rx (struct net_device *dev)
1401 {
1402  struct netdev_private *np = netdev_priv(dev);
1403  int entry;
1404  int cnt = 0;
1405 
1406  /* Refill the Rx ring buffers. */
1407  for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1408  np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1409  struct sk_buff *skb;
1410  entry = np->dirty_rx % RX_RING_SIZE;
1411  if (np->rx_skbuff[entry] == NULL) {
1412  skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2);
1413  np->rx_skbuff[entry] = skb;
1414  if (skb == NULL)
1415  break; /* Better luck next round. */
1416  skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1417  np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1418  dma_map_single(&np->pci_dev->dev, skb->data,
1419  np->rx_buf_sz, DMA_FROM_DEVICE));
1420  if (dma_mapping_error(&np->pci_dev->dev,
1421  np->rx_ring[entry].frag[0].addr)) {
1422  dev_kfree_skb_irq(skb);
1423  np->rx_skbuff[entry] = NULL;
1424  break;
1425  }
1426  }
1427  /* Perhaps we need not reset this field. */
1428  np->rx_ring[entry].frag[0].length =
1430  np->rx_ring[entry].status = 0;
1431  cnt++;
1432  }
1433 }
1434 static void netdev_error(struct net_device *dev, int intr_status)
1435 {
1436  struct netdev_private *np = netdev_priv(dev);
1437  void __iomem *ioaddr = np->base;
1438  u16 mii_ctl, mii_advertise, mii_lpa;
1439  int speed;
1440 
1441  if (intr_status & LinkChange) {
1442  if (mdio_wait_link(dev, 10) == 0) {
1443  printk(KERN_INFO "%s: Link up\n", dev->name);
1444  if (np->an_enable) {
1445  mii_advertise = mdio_read(dev, np->phys[0],
1446  MII_ADVERTISE);
1447  mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1448  mii_advertise &= mii_lpa;
1449  printk(KERN_INFO "%s: Link changed: ",
1450  dev->name);
1451  if (mii_advertise & ADVERTISE_100FULL) {
1452  np->speed = 100;
1453  printk("100Mbps, full duplex\n");
1454  } else if (mii_advertise & ADVERTISE_100HALF) {
1455  np->speed = 100;
1456  printk("100Mbps, half duplex\n");
1457  } else if (mii_advertise & ADVERTISE_10FULL) {
1458  np->speed = 10;
1459  printk("10Mbps, full duplex\n");
1460  } else if (mii_advertise & ADVERTISE_10HALF) {
1461  np->speed = 10;
1462  printk("10Mbps, half duplex\n");
1463  } else
1464  printk("\n");
1465 
1466  } else {
1467  mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1468  speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1469  np->speed = speed;
1470  printk(KERN_INFO "%s: Link changed: %dMbps ,",
1471  dev->name, speed);
1472  printk("%s duplex.\n",
1473  (mii_ctl & BMCR_FULLDPLX) ?
1474  "full" : "half");
1475  }
1476  check_duplex(dev);
1477  if (np->flowctrl && np->mii_if.full_duplex) {
1478  iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1479  ioaddr + MulticastFilter1+2);
1480  iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1481  ioaddr + MACCtrl0);
1482  }
1483  netif_carrier_on(dev);
1484  } else {
1485  printk(KERN_INFO "%s: Link down\n", dev->name);
1486  netif_carrier_off(dev);
1487  }
1488  }
1489  if (intr_status & StatsMax) {
1490  get_stats(dev);
1491  }
1492  if (intr_status & IntrPCIErr) {
1493  printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1494  dev->name, intr_status);
1495  /* We must do a global reset of DMA to continue. */
1496  }
1497 }
1498 
1499 static struct net_device_stats *get_stats(struct net_device *dev)
1500 {
1501  struct netdev_private *np = netdev_priv(dev);
1502  void __iomem *ioaddr = np->base;
1503  unsigned long flags;
1504  u8 late_coll, single_coll, mult_coll;
1505 
1506  spin_lock_irqsave(&np->statlock, flags);
1507  /* The chip only need report frame silently dropped. */
1508  dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1509  dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1510  dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1511  dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1512 
1513  mult_coll = ioread8(ioaddr + StatsMultiColl);
1514  np->xstats.tx_multiple_collisions += mult_coll;
1515  single_coll = ioread8(ioaddr + StatsOneColl);
1516  np->xstats.tx_single_collisions += single_coll;
1517  late_coll = ioread8(ioaddr + StatsLateColl);
1518  np->xstats.tx_late_collisions += late_coll;
1519  dev->stats.collisions += mult_coll
1520  + single_coll
1521  + late_coll;
1522 
1523  np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer);
1524  np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer);
1525  np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort);
1526  np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx);
1527  np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx);
1528  np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx);
1529  np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx);
1530 
1531  dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1532  dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1533  dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1534  dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1535 
1536  spin_unlock_irqrestore(&np->statlock, flags);
1537 
1538  return &dev->stats;
1539 }
1540 
1541 static void set_rx_mode(struct net_device *dev)
1542 {
1543  struct netdev_private *np = netdev_priv(dev);
1544  void __iomem *ioaddr = np->base;
1545  u16 mc_filter[4]; /* Multicast hash filter */
1546  u32 rx_mode;
1547  int i;
1548 
1549  if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1550  memset(mc_filter, 0xff, sizeof(mc_filter));
1552  } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1553  (dev->flags & IFF_ALLMULTI)) {
1554  /* Too many to match, or accept all multicasts. */
1555  memset(mc_filter, 0xff, sizeof(mc_filter));
1557  } else if (!netdev_mc_empty(dev)) {
1558  struct netdev_hw_addr *ha;
1559  int bit;
1560  int index;
1561  int crc;
1562  memset (mc_filter, 0, sizeof (mc_filter));
1563  netdev_for_each_mc_addr(ha, dev) {
1564  crc = ether_crc_le(ETH_ALEN, ha->addr);
1565  for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1566  if (crc & 0x80000000) index |= 1 << bit;
1567  mc_filter[index/16] |= (1 << (index % 16));
1568  }
1570  } else {
1572  return;
1573  }
1574  if (np->mii_if.full_duplex && np->flowctrl)
1575  mc_filter[3] |= 0x0200;
1576 
1577  for (i = 0; i < 4; i++)
1578  iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1579  iowrite8(rx_mode, ioaddr + RxMode);
1580 }
1581 
1582 static int __set_mac_addr(struct net_device *dev)
1583 {
1584  struct netdev_private *np = netdev_priv(dev);
1585  u16 addr16;
1586 
1587  addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1588  iowrite16(addr16, np->base + StationAddr);
1589  addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1590  iowrite16(addr16, np->base + StationAddr+2);
1591  addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1592  iowrite16(addr16, np->base + StationAddr+4);
1593  return 0;
1594 }
1595 
1596 /* Invoked with rtnl_lock held */
1597 static int sundance_set_mac_addr(struct net_device *dev, void *data)
1598 {
1599  const struct sockaddr *addr = data;
1600 
1601  if (!is_valid_ether_addr(addr->sa_data))
1602  return -EADDRNOTAVAIL;
1603  memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1604  __set_mac_addr(dev);
1605 
1606  return 0;
1607 }
1608 
1609 static const struct {
1610  const char name[ETH_GSTRING_LEN];
1611 } sundance_stats[] = {
1612  { "tx_multiple_collisions" },
1613  { "tx_single_collisions" },
1614  { "tx_late_collisions" },
1615  { "tx_deferred" },
1616  { "tx_deferred_excessive" },
1617  { "tx_aborted" },
1618  { "tx_bcasts" },
1619  { "rx_bcasts" },
1620  { "tx_mcasts" },
1621  { "rx_mcasts" },
1622 };
1623 
1624 static int check_if_running(struct net_device *dev)
1625 {
1626  if (!netif_running(dev))
1627  return -EINVAL;
1628  return 0;
1629 }
1630 
1631 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1632 {
1633  struct netdev_private *np = netdev_priv(dev);
1634  strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1635  strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1636  strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1637 }
1638 
1639 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1640 {
1641  struct netdev_private *np = netdev_priv(dev);
1642  spin_lock_irq(&np->lock);
1643  mii_ethtool_gset(&np->mii_if, ecmd);
1644  spin_unlock_irq(&np->lock);
1645  return 0;
1646 }
1647 
1648 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1649 {
1650  struct netdev_private *np = netdev_priv(dev);
1651  int res;
1652  spin_lock_irq(&np->lock);
1653  res = mii_ethtool_sset(&np->mii_if, ecmd);
1654  spin_unlock_irq(&np->lock);
1655  return res;
1656 }
1657 
1658 static int nway_reset(struct net_device *dev)
1659 {
1660  struct netdev_private *np = netdev_priv(dev);
1661  return mii_nway_restart(&np->mii_if);
1662 }
1663 
1664 static u32 get_link(struct net_device *dev)
1665 {
1666  struct netdev_private *np = netdev_priv(dev);
1667  return mii_link_ok(&np->mii_if);
1668 }
1669 
1670 static u32 get_msglevel(struct net_device *dev)
1671 {
1672  struct netdev_private *np = netdev_priv(dev);
1673  return np->msg_enable;
1674 }
1675 
1676 static void set_msglevel(struct net_device *dev, u32 val)
1677 {
1678  struct netdev_private *np = netdev_priv(dev);
1679  np->msg_enable = val;
1680 }
1681 
1682 static void get_strings(struct net_device *dev, u32 stringset,
1683  u8 *data)
1684 {
1685  if (stringset == ETH_SS_STATS)
1686  memcpy(data, sundance_stats, sizeof(sundance_stats));
1687 }
1688 
1689 static int get_sset_count(struct net_device *dev, int sset)
1690 {
1691  switch (sset) {
1692  case ETH_SS_STATS:
1693  return ARRAY_SIZE(sundance_stats);
1694  default:
1695  return -EOPNOTSUPP;
1696  }
1697 }
1698 
1699 static void get_ethtool_stats(struct net_device *dev,
1700  struct ethtool_stats *stats, u64 *data)
1701 {
1702  struct netdev_private *np = netdev_priv(dev);
1703  int i = 0;
1704 
1705  get_stats(dev);
1706  data[i++] = np->xstats.tx_multiple_collisions;
1707  data[i++] = np->xstats.tx_single_collisions;
1708  data[i++] = np->xstats.tx_late_collisions;
1709  data[i++] = np->xstats.tx_deferred;
1710  data[i++] = np->xstats.tx_deferred_excessive;
1711  data[i++] = np->xstats.tx_aborted;
1712  data[i++] = np->xstats.tx_bcasts;
1713  data[i++] = np->xstats.rx_bcasts;
1714  data[i++] = np->xstats.tx_mcasts;
1715  data[i++] = np->xstats.rx_mcasts;
1716 }
1717 
1718 static const struct ethtool_ops ethtool_ops = {
1719  .begin = check_if_running,
1720  .get_drvinfo = get_drvinfo,
1721  .get_settings = get_settings,
1722  .set_settings = set_settings,
1723  .nway_reset = nway_reset,
1724  .get_link = get_link,
1725  .get_msglevel = get_msglevel,
1726  .set_msglevel = set_msglevel,
1727  .get_strings = get_strings,
1728  .get_sset_count = get_sset_count,
1729  .get_ethtool_stats = get_ethtool_stats,
1730 };
1731 
1732 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1733 {
1734  struct netdev_private *np = netdev_priv(dev);
1735  int rc;
1736 
1737  if (!netif_running(dev))
1738  return -EINVAL;
1739 
1740  spin_lock_irq(&np->lock);
1741  rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1742  spin_unlock_irq(&np->lock);
1743 
1744  return rc;
1745 }
1746 
1747 static int netdev_close(struct net_device *dev)
1748 {
1749  struct netdev_private *np = netdev_priv(dev);
1750  void __iomem *ioaddr = np->base;
1751  struct sk_buff *skb;
1752  int i;
1753 
1754  /* Wait and kill tasklet */
1755  tasklet_kill(&np->rx_tasklet);
1756  tasklet_kill(&np->tx_tasklet);
1757  np->cur_tx = 0;
1758  np->dirty_tx = 0;
1759  np->cur_task = 0;
1760  np->last_tx = NULL;
1761 
1762  netif_stop_queue(dev);
1763 
1764  if (netif_msg_ifdown(np)) {
1765  printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1766  "Rx %4.4x Int %2.2x.\n",
1767  dev->name, ioread8(ioaddr + TxStatus),
1768  ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1769  printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1770  dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1771  }
1772 
1773  /* Disable interrupts by clearing the interrupt mask. */
1774  iowrite16(0x0000, ioaddr + IntrEnable);
1775 
1776  /* Disable Rx and Tx DMA for safely release resource */
1777  iowrite32(0x500, ioaddr + DMACtrl);
1778 
1779  /* Stop the chip's Tx and Rx processes. */
1781 
1782  for (i = 2000; i > 0; i--) {
1783  if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1784  break;
1785  mdelay(1);
1786  }
1787 
1789  ioaddr + ASIC_HI_WORD(ASICCtrl));
1790 
1791  for (i = 2000; i > 0; i--) {
1792  if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1793  break;
1794  mdelay(1);
1795  }
1796 
1797 #ifdef __i386__
1798  if (netif_msg_hw(np)) {
1799  printk(KERN_DEBUG " Tx ring at %8.8x:\n",
1800  (int)(np->tx_ring_dma));
1801  for (i = 0; i < TX_RING_SIZE; i++)
1802  printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1803  i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1804  np->tx_ring[i].frag[0].length);
1805  printk(KERN_DEBUG " Rx ring %8.8x:\n",
1806  (int)(np->rx_ring_dma));
1807  for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1808  printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1809  i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1810  np->rx_ring[i].frag[0].length);
1811  }
1812  }
1813 #endif /* __i386__ debugging only */
1814 
1815  free_irq(np->pci_dev->irq, dev);
1816 
1817  del_timer_sync(&np->timer);
1818 
1819  /* Free all the skbuffs in the Rx queue. */
1820  for (i = 0; i < RX_RING_SIZE; i++) {
1821  np->rx_ring[i].status = 0;
1822  skb = np->rx_skbuff[i];
1823  if (skb) {
1824  dma_unmap_single(&np->pci_dev->dev,
1825  le32_to_cpu(np->rx_ring[i].frag[0].addr),
1826  np->rx_buf_sz, DMA_FROM_DEVICE);
1827  dev_kfree_skb(skb);
1828  np->rx_skbuff[i] = NULL;
1829  }
1830  np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1831  }
1832  for (i = 0; i < TX_RING_SIZE; i++) {
1833  np->tx_ring[i].next_desc = 0;
1834  skb = np->tx_skbuff[i];
1835  if (skb) {
1836  dma_unmap_single(&np->pci_dev->dev,
1837  le32_to_cpu(np->tx_ring[i].frag[0].addr),
1838  skb->len, DMA_TO_DEVICE);
1839  dev_kfree_skb(skb);
1840  np->tx_skbuff[i] = NULL;
1841  }
1842  }
1843 
1844  return 0;
1845 }
1846 
1847 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1848 {
1849  struct net_device *dev = pci_get_drvdata(pdev);
1850 
1851  if (dev) {
1852  struct netdev_private *np = netdev_priv(dev);
1853  unregister_netdev(dev);
1855  np->rx_ring, np->rx_ring_dma);
1857  np->tx_ring, np->tx_ring_dma);
1858  pci_iounmap(pdev, np->base);
1859  pci_release_regions(pdev);
1860  free_netdev(dev);
1861  pci_set_drvdata(pdev, NULL);
1862  }
1863 }
1864 
1865 #ifdef CONFIG_PM
1866 
1867 static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1868 {
1869  struct net_device *dev = pci_get_drvdata(pci_dev);
1870 
1871  if (!netif_running(dev))
1872  return 0;
1873 
1874  netdev_close(dev);
1875  netif_device_detach(dev);
1876 
1877  pci_save_state(pci_dev);
1878  pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1879 
1880  return 0;
1881 }
1882 
1883 static int sundance_resume(struct pci_dev *pci_dev)
1884 {
1885  struct net_device *dev = pci_get_drvdata(pci_dev);
1886  int err = 0;
1887 
1888  if (!netif_running(dev))
1889  return 0;
1890 
1891  pci_set_power_state(pci_dev, PCI_D0);
1892  pci_restore_state(pci_dev);
1893 
1894  err = netdev_open(dev);
1895  if (err) {
1896  printk(KERN_ERR "%s: Can't resume interface!\n",
1897  dev->name);
1898  goto out;
1899  }
1900 
1901  netif_device_attach(dev);
1902 
1903 out:
1904  return err;
1905 }
1906 
1907 #endif /* CONFIG_PM */
1908 
1909 static struct pci_driver sundance_driver = {
1910  .name = DRV_NAME,
1911  .id_table = sundance_pci_tbl,
1912  .probe = sundance_probe1,
1913  .remove = __devexit_p(sundance_remove1),
1914 #ifdef CONFIG_PM
1915  .suspend = sundance_suspend,
1916  .resume = sundance_resume,
1917 #endif /* CONFIG_PM */
1918 };
1919 
1920 static int __init sundance_init(void)
1921 {
1922 /* when a module, this is printed whether or not devices are found in probe */
1923 #ifdef MODULE
1924  printk(version);
1925 #endif
1926  return pci_register_driver(&sundance_driver);
1927 }
1928 
1929 static void __exit sundance_exit(void)
1930 {
1931  pci_unregister_driver(&sundance_driver);
1932 }
1933 
1934 module_init(sundance_init);
1935 module_exit(sundance_exit);
1936 
1937