Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
winbond-840.c
Go to the documentation of this file.
1 /* winbond-840.c: A Linux PCI network adapter device driver. */
2 /*
3  Written 1998-2001 by Donald Becker.
4 
5  This software may be used and distributed according to the terms of
6  the GNU General Public License (GPL), incorporated herein by reference.
7  Drivers based on or derived from this code fall under the GPL and must
8  retain the authorship, copyright and license notice. This file is not
9  a complete program and may only be used when the entire operating
10  system is licensed under the GPL.
11 
12  The author may be reached as [email protected], or C/O
13  Scyld Computing Corporation
14  410 Severn Ave., Suite 210
15  Annapolis MD 21403
16 
17  Support and updates available at
18  http://www.scyld.com/network/drivers.html
19 
20  Do not remove the copyright information.
21  Do not change the version information unless an improvement has been made.
22  Merely removing my name, as Compex has done in the past, does not count
23  as an improvement.
24 
25  Changelog:
26  * ported to 2.4
27  ???
28  * spin lock update, memory barriers, new style dma mappings
29  limit each tx buffer to < 1024 bytes
30  remove DescIntr from Rx descriptors (that's an Tx flag)
31  remove next pointer from Tx descriptors
32  synchronize tx_q_bytes
33  software reset in tx_timeout
34  Copyright (C) 2000 Manfred Spraul
35  * further cleanups
36  power management.
37  support for big endian descriptors
38  Copyright (C) 2001 Manfred Spraul
39  * ethtool support (jgarzik)
40  * Replace some MII-related magic numbers with constants (jgarzik)
41 
42  TODO:
43  * enable pci_power_off
44  * Wake-On-LAN
45 */
46 
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48 
49 #define DRV_NAME "winbond-840"
50 #define DRV_VERSION "1.01-e"
51 #define DRV_RELDATE "Sep-11-2006"
52 
53 
54 /* Automatically extracted configuration info:
55 probe-func: winbond840_probe
56 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
57 
58 c-help-name: Winbond W89c840 PCI Ethernet support
59 c-help-symbol: CONFIG_WINBOND_840
60 c-help: This driver is for the Winbond W89c840 chip. It also works with
61 c-help: the TX9882 chip on the Compex RL100-ATX board.
62 c-help: More specific information and updates are available from
63 c-help: http://www.scyld.com/network/drivers.html
64 */
65 
66 /* The user-configurable values.
67  These may be modified when a driver module is loaded.*/
68 
69 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
70 static int max_interrupt_work = 20;
71 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
72  The '840 uses a 64 element hash table based on the Ethernet CRC. */
73 static int multicast_filter_limit = 32;
74 
75 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
76  Setting to > 1518 effectively disables this feature. */
77 static int rx_copybreak;
78 
79 /* Used to pass the media type, etc.
80  Both 'options[]' and 'full_duplex[]' should exist for driver
81  interoperability.
82  The media type is usually passed in 'options[]'.
83 */
84 #define MAX_UNITS 8 /* More are supported, limit only on options */
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87 
88 /* Operational parameters that are set at compile time. */
89 
90 /* Keep the ring sizes a power of two for compile efficiency.
91  The compiler will convert <unsigned>'%'<2^N> into a bit mask.
92  Making the Tx ring too large decreases the effectiveness of channel
93  bonding and packet priority.
94  There are no ill effects from too-large receive rings. */
95 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
96 #define TX_QUEUE_LEN_RESTART 5
97 
98 #define TX_BUFLIMIT (1024-128)
99 
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101  To avoid overflowing we don't queue again until we have room for a
102  full-size packet.
103  */
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106 
107 
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
111 
112 /* Include files, designed to support most kernel versions 2.0.0 and later. */
113 #include <linux/module.h>
114 #include <linux/kernel.h>
115 #include <linux/string.h>
116 #include <linux/timer.h>
117 #include <linux/errno.h>
118 #include <linux/ioport.h>
119 #include <linux/interrupt.h>
120 #include <linux/pci.h>
121 #include <linux/dma-mapping.h>
122 #include <linux/netdevice.h>
123 #include <linux/etherdevice.h>
124 #include <linux/skbuff.h>
125 #include <linux/init.h>
126 #include <linux/delay.h>
127 #include <linux/ethtool.h>
128 #include <linux/mii.h>
129 #include <linux/rtnetlink.h>
130 #include <linux/crc32.h>
131 #include <linux/bitops.h>
132 #include <asm/uaccess.h>
133 #include <asm/processor.h> /* Processor type for cache alignment. */
134 #include <asm/io.h>
135 #include <asm/irq.h>
136 
137 #include "tulip.h"
138 
139 #undef PKT_BUF_SZ /* tulip.h also defines this */
140 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
141 
142 /* These identify the driver base version and may not be removed. */
143 static const char version[] __initconst =
144  "v" DRV_VERSION " (2.4 port) "
145  DRV_RELDATE " Donald Becker <[email protected]>\n"
146  " http://www.scyld.com/network/drivers.html\n";
147 
148 MODULE_AUTHOR("Donald Becker <[email protected]>");
149 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150 MODULE_LICENSE("GPL");
152 
153 module_param(max_interrupt_work, int, 0);
154 module_param(debug, int, 0);
155 module_param(rx_copybreak, int, 0);
156 module_param(multicast_filter_limit, int, 0);
158 module_param_array(full_duplex, int, NULL, 0);
159 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
160 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
161 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
163 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
165 
166 /*
167  Theory of Operation
168 
169 I. Board Compatibility
170 
171 This driver is for the Winbond w89c840 chip.
172 
173 II. Board-specific settings
174 
175 None.
176 
177 III. Driver operation
178 
179 This chip is very similar to the Digital 21*4* "Tulip" family. The first
180 twelve registers and the descriptor format are nearly identical. Read a
181 Tulip manual for operational details.
182 
183 A significant difference is that the multicast filter and station address are
184 stored in registers rather than loaded through a pseudo-transmit packet.
185 
186 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
187 full-sized packet we must use both data buffers in a descriptor. Thus the
188 driver uses ring mode where descriptors are implicitly sequential in memory,
189 rather than using the second descriptor address as a chain pointer to
190 subsequent descriptors.
191 
192 IV. Notes
193 
194 If you are going to almost clone a Tulip, why not go all the way and avoid
195 the need for a new driver?
196 
197 IVb. References
198 
199 http://www.scyld.com/expert/100mbps.html
200 http://www.scyld.com/expert/NWay.html
201 http://www.winbond.com.tw/
202 
203 IVc. Errata
204 
205 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
206 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
207 silent data corruption.
208 
209 Test with 'ping -s 10000' on a fast computer.
210 
211 */
212 
213 
214 
215 /*
216  PCI probe table.
217 */
220 };
221 
222 static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
223  { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
224  { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
225  { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
226  { }
227 };
228 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
229 
230 enum {
231  netdev_res_size = 128, /* size of PCI BAR resource */
232 };
233 
234 struct pci_id_info {
235  const char *name;
236  int drv_flags; /* Driver use, intended as capability flags. */
237 };
238 
239 static const struct pci_id_info pci_id_tbl[] __devinitconst = {
240  { /* Sometime a Level-One switch card. */
241  "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242  { "Winbond W89c840", CanHaveMII | HasBrokenTx},
243  { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
244  { } /* terminate list. */
245 };
246 
247 /* This driver was written to use PCI memory space, however some x86 systems
248  work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
249 */
250 
251 /* Offsets to the Command and Status Registers, "CSRs".
252  While similar to the Tulip, these registers are longword aligned.
253  Note: It's not useful to define symbolic names for every register bit in
254  the device. The name can only partially document the semantics and make
255  the driver longer and more difficult to read.
256 */
259  RxRingPtr=0x0C, TxRingPtr=0x10,
261  RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
262  CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
265 };
266 
267 /* Bits in the NetworkConfig register. */
269  AcceptErr=0x80,
272 };
273 
275  MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
276  MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
277 };
278 
279 /* The Tulip Rx and Tx buffer descriptors. */
280 struct w840_rx_desc {
285 };
286 
287 struct w840_tx_desc {
291 };
292 
293 #define MII_CNT 1 /* winbond only supports one MII */
294 struct netdev_private {
300  /* The addresses of receive-in-place skbuffs. */
302  /* The saved address of a sent-in-place packet/buffer, for later free(). */
305  struct timer_list timer; /* Media monitoring timer. */
306  /* Frequently used values: keep some adjacent for cache effect. */
309  struct pci_dev *pci_dev;
310  int csr6;
312  unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
313  unsigned int rx_buf_sz; /* Based on MTU+slack. */
314  unsigned int cur_tx, dirty_tx;
315  unsigned int tx_q_bytes;
316  unsigned int tx_full; /* The Tx queue is full. */
317  /* MII transceiver section. */
318  int mii_cnt; /* MII device addresses. */
319  unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
321  struct mii_if_info mii_if;
323 };
324 
325 static int eeprom_read(void __iomem *ioaddr, int location);
326 static int mdio_read(struct net_device *dev, int phy_id, int location);
327 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
328 static int netdev_open(struct net_device *dev);
329 static int update_link(struct net_device *dev);
330 static void netdev_timer(unsigned long data);
331 static void init_rxtx_rings(struct net_device *dev);
332 static void free_rxtx_rings(struct netdev_private *np);
333 static void init_registers(struct net_device *dev);
334 static void tx_timeout(struct net_device *dev);
335 static int alloc_ringdesc(struct net_device *dev);
336 static void free_ringdesc(struct netdev_private *np);
337 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
338 static irqreturn_t intr_handler(int irq, void *dev_instance);
339 static void netdev_error(struct net_device *dev, int intr_status);
340 static int netdev_rx(struct net_device *dev);
341 static u32 __set_rx_mode(struct net_device *dev);
342 static void set_rx_mode(struct net_device *dev);
343 static struct net_device_stats *get_stats(struct net_device *dev);
344 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345 static const struct ethtool_ops netdev_ethtool_ops;
346 static int netdev_close(struct net_device *dev);
347 
348 static const struct net_device_ops netdev_ops = {
349  .ndo_open = netdev_open,
350  .ndo_stop = netdev_close,
351  .ndo_start_xmit = start_tx,
352  .ndo_get_stats = get_stats,
353  .ndo_set_rx_mode = set_rx_mode,
354  .ndo_do_ioctl = netdev_ioctl,
355  .ndo_tx_timeout = tx_timeout,
356  .ndo_change_mtu = eth_change_mtu,
357  .ndo_set_mac_address = eth_mac_addr,
358  .ndo_validate_addr = eth_validate_addr,
359 };
360 
361 static int __devinit w840_probe1 (struct pci_dev *pdev,
362  const struct pci_device_id *ent)
363 {
364  struct net_device *dev;
365  struct netdev_private *np;
366  static int find_cnt;
367  int chip_idx = ent->driver_data;
368  int irq;
369  int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
370  void __iomem *ioaddr;
371 
372  i = pci_enable_device(pdev);
373  if (i) return i;
374 
375  pci_set_master(pdev);
376 
377  irq = pdev->irq;
378 
379  if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
380  pr_warn("Device %s disabled due to DMA limitations\n",
381  pci_name(pdev));
382  return -EIO;
383  }
384  dev = alloc_etherdev(sizeof(*np));
385  if (!dev)
386  return -ENOMEM;
387  SET_NETDEV_DEV(dev, &pdev->dev);
388 
389  if (pci_request_regions(pdev, DRV_NAME))
390  goto err_out_netdev;
391 
392  ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
393  if (!ioaddr)
394  goto err_out_free_res;
395 
396  for (i = 0; i < 3; i++)
397  ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
398 
399  /* Reset the chip to erase previous misconfiguration.
400  No hold time required! */
401  iowrite32(0x00000001, ioaddr + PCIBusCfg);
402 
403  np = netdev_priv(dev);
404  np->pci_dev = pdev;
405  np->chip_id = chip_idx;
406  np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
407  spin_lock_init(&np->lock);
408  np->mii_if.dev = dev;
409  np->mii_if.mdio_read = mdio_read;
410  np->mii_if.mdio_write = mdio_write;
411  np->base_addr = ioaddr;
412 
413  pci_set_drvdata(pdev, dev);
414 
415  if (dev->mem_start)
416  option = dev->mem_start;
417 
418  /* The lower four bits are the media type. */
419  if (option > 0) {
420  if (option & 0x200)
421  np->mii_if.full_duplex = 1;
422  if (option & 15)
423  dev_info(&dev->dev,
424  "ignoring user supplied media type %d",
425  option & 15);
426  }
427  if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
428  np->mii_if.full_duplex = 1;
429 
430  if (np->mii_if.full_duplex)
431  np->mii_if.force_media = 1;
432 
433  /* The chip-specific entries in the device structure. */
434  dev->netdev_ops = &netdev_ops;
435  dev->ethtool_ops = &netdev_ethtool_ops;
436  dev->watchdog_timeo = TX_TIMEOUT;
437 
438  i = register_netdev(dev);
439  if (i)
440  goto err_out_cleardev;
441 
442  dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
443  pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
444 
445  if (np->drv_flags & CanHaveMII) {
446  int phy, phy_idx = 0;
447  for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
448  int mii_status = mdio_read(dev, phy, MII_BMSR);
449  if (mii_status != 0xffff && mii_status != 0x0000) {
450  np->phys[phy_idx++] = phy;
451  np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
452  np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
453  mdio_read(dev, phy, MII_PHYSID2);
454  dev_info(&dev->dev,
455  "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
456  np->mii, phy, mii_status,
457  np->mii_if.advertising);
458  }
459  }
460  np->mii_cnt = phy_idx;
461  np->mii_if.phy_id = np->phys[0];
462  if (phy_idx == 0) {
463  dev_warn(&dev->dev,
464  "MII PHY not found -- this device may not operate correctly\n");
465  }
466  }
467 
468  find_cnt++;
469  return 0;
470 
471 err_out_cleardev:
472  pci_set_drvdata(pdev, NULL);
473  pci_iounmap(pdev, ioaddr);
474 err_out_free_res:
475  pci_release_regions(pdev);
476 err_out_netdev:
477  free_netdev (dev);
478  return -ENODEV;
479 }
480 
481 
482 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
483  often serial bit streams generated by the host processor.
484  The example below is for the common 93c46 EEPROM, 64 16 bit words. */
485 
486 /* Delay between EEPROM clock transitions.
487  No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
488  a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
489  made udelay() unreliable.
490  The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
491  deprecated.
492 */
493 #define eeprom_delay(ee_addr) ioread32(ee_addr)
494 
496  EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
498 };
499 
500 /* The EEPROM commands include the alway-set leading bit. */
502  EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
503 };
504 
505 static int eeprom_read(void __iomem *addr, int location)
506 {
507  int i;
508  int retval = 0;
509  void __iomem *ee_addr = addr + EECtrl;
510  int read_cmd = location | EE_ReadCmd;
511  iowrite32(EE_ChipSelect, ee_addr);
512 
513  /* Shift the read command bits out. */
514  for (i = 10; i >= 0; i--) {
515  short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
516  iowrite32(dataval, ee_addr);
517  eeprom_delay(ee_addr);
518  iowrite32(dataval | EE_ShiftClk, ee_addr);
519  eeprom_delay(ee_addr);
520  }
521  iowrite32(EE_ChipSelect, ee_addr);
522  eeprom_delay(ee_addr);
523 
524  for (i = 16; i > 0; i--) {
525  iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
526  eeprom_delay(ee_addr);
527  retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
528  iowrite32(EE_ChipSelect, ee_addr);
529  eeprom_delay(ee_addr);
530  }
531 
532  /* Terminate the EEPROM access. */
533  iowrite32(0, ee_addr);
534  return retval;
535 }
536 
537 /* MII transceiver control section.
538  Read and write the MII registers using software-generated serial
539  MDIO protocol. See the MII specifications or DP83840A data sheet
540  for details.
541 
542  The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
543  met by back-to-back 33Mhz PCI cycles. */
544 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
545 
546 /* Set iff a MII transceiver on any interface requires mdio preamble.
547  This only set with older transceivers, so the extra
548  code size of a per-interface flag is not worthwhile. */
549 static char mii_preamble_required = 1;
550 
551 #define MDIO_WRITE0 (MDIO_EnbOutput)
552 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
553 
554 /* Generate the preamble required for initial synchronization and
555  a few older transceivers. */
556 static void mdio_sync(void __iomem *mdio_addr)
557 {
558  int bits = 32;
559 
560  /* Establish sync by sending at least 32 logic ones. */
561  while (--bits >= 0) {
562  iowrite32(MDIO_WRITE1, mdio_addr);
563  mdio_delay(mdio_addr);
564  iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
565  mdio_delay(mdio_addr);
566  }
567 }
568 
569 static int mdio_read(struct net_device *dev, int phy_id, int location)
570 {
571  struct netdev_private *np = netdev_priv(dev);
572  void __iomem *mdio_addr = np->base_addr + MIICtrl;
573  int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
574  int i, retval = 0;
575 
576  if (mii_preamble_required)
577  mdio_sync(mdio_addr);
578 
579  /* Shift the read command bits out. */
580  for (i = 15; i >= 0; i--) {
581  int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
582 
583  iowrite32(dataval, mdio_addr);
584  mdio_delay(mdio_addr);
585  iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
586  mdio_delay(mdio_addr);
587  }
588  /* Read the two transition, 16 data, and wire-idle bits. */
589  for (i = 20; i > 0; i--) {
590  iowrite32(MDIO_EnbIn, mdio_addr);
591  mdio_delay(mdio_addr);
592  retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
593  iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
594  mdio_delay(mdio_addr);
595  }
596  return (retval>>1) & 0xffff;
597 }
598 
599 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
600 {
601  struct netdev_private *np = netdev_priv(dev);
602  void __iomem *mdio_addr = np->base_addr + MIICtrl;
603  int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
604  int i;
605 
606  if (location == 4 && phy_id == np->phys[0])
607  np->mii_if.advertising = value;
608 
609  if (mii_preamble_required)
610  mdio_sync(mdio_addr);
611 
612  /* Shift the command bits out. */
613  for (i = 31; i >= 0; i--) {
614  int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
615 
616  iowrite32(dataval, mdio_addr);
617  mdio_delay(mdio_addr);
618  iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
619  mdio_delay(mdio_addr);
620  }
621  /* Clear out extra bits. */
622  for (i = 2; i > 0; i--) {
623  iowrite32(MDIO_EnbIn, mdio_addr);
624  mdio_delay(mdio_addr);
625  iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
626  mdio_delay(mdio_addr);
627  }
628 }
629 
630 
631 static int netdev_open(struct net_device *dev)
632 {
633  struct netdev_private *np = netdev_priv(dev);
634  void __iomem *ioaddr = np->base_addr;
635  const int irq = np->pci_dev->irq;
636  int i;
637 
638  iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
639 
640  netif_device_detach(dev);
641  i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
642  if (i)
643  goto out_err;
644 
645  if (debug > 1)
646  netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
647 
648  if((i=alloc_ringdesc(dev)))
649  goto out_err;
650 
651  spin_lock_irq(&np->lock);
652  netif_device_attach(dev);
653  init_registers(dev);
654  spin_unlock_irq(&np->lock);
655 
656  netif_start_queue(dev);
657  if (debug > 2)
658  netdev_dbg(dev, "Done netdev_open()\n");
659 
660  /* Set the timer to check for link beat. */
661  init_timer(&np->timer);
662  np->timer.expires = jiffies + 1*HZ;
663  np->timer.data = (unsigned long)dev;
664  np->timer.function = netdev_timer; /* timer handler */
665  add_timer(&np->timer);
666  return 0;
667 out_err:
668  netif_device_attach(dev);
669  return i;
670 }
671 
672 #define MII_DAVICOM_DM9101 0x0181b800
673 
674 static int update_link(struct net_device *dev)
675 {
676  struct netdev_private *np = netdev_priv(dev);
677  int duplex, fasteth, result, mii_reg;
678 
679  /* BSMR */
680  mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
681 
682  if (mii_reg == 0xffff)
683  return np->csr6;
684  /* reread: the link status bit is sticky */
685  mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
686  if (!(mii_reg & 0x4)) {
687  if (netif_carrier_ok(dev)) {
688  if (debug)
689  dev_info(&dev->dev,
690  "MII #%d reports no link. Disabling watchdog\n",
691  np->phys[0]);
692  netif_carrier_off(dev);
693  }
694  return np->csr6;
695  }
696  if (!netif_carrier_ok(dev)) {
697  if (debug)
698  dev_info(&dev->dev,
699  "MII #%d link is back. Enabling watchdog\n",
700  np->phys[0]);
701  netif_carrier_on(dev);
702  }
703 
704  if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
705  /* If the link partner doesn't support autonegotiation
706  * the MII detects it's abilities with the "parallel detection".
707  * Some MIIs update the LPA register to the result of the parallel
708  * detection, some don't.
709  * The Davicom PHY [at least 0181b800] doesn't.
710  * Instead bit 9 and 13 of the BMCR are updated to the result
711  * of the negotiation..
712  */
713  mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
714  duplex = mii_reg & BMCR_FULLDPLX;
715  fasteth = mii_reg & BMCR_SPEED100;
716  } else {
717  int negotiated;
718  mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
719  negotiated = mii_reg & np->mii_if.advertising;
720 
721  duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
722  fasteth = negotiated & 0x380;
723  }
724  duplex |= np->mii_if.force_media;
725  /* remove fastether and fullduplex */
726  result = np->csr6 & ~0x20000200;
727  if (duplex)
728  result |= 0x200;
729  if (fasteth)
730  result |= 0x20000000;
731  if (result != np->csr6 && debug)
732  dev_info(&dev->dev,
733  "Setting %dMBit-%s-duplex based on MII#%d\n",
734  fasteth ? 100 : 10, duplex ? "full" : "half",
735  np->phys[0]);
736  return result;
737 }
738 
739 #define RXTX_TIMEOUT 2000
740 static inline void update_csr6(struct net_device *dev, int new)
741 {
742  struct netdev_private *np = netdev_priv(dev);
743  void __iomem *ioaddr = np->base_addr;
744  int limit = RXTX_TIMEOUT;
745 
746  if (!netif_device_present(dev))
747  new = 0;
748  if (new==np->csr6)
749  return;
750  /* stop both Tx and Rx processes */
751  iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
752  /* wait until they have really stopped */
753  for (;;) {
754  int csr5 = ioread32(ioaddr + IntrStatus);
755  int t;
756 
757  t = (csr5 >> 17) & 0x07;
758  if (t==0||t==1) {
759  /* rx stopped */
760  t = (csr5 >> 20) & 0x07;
761  if (t==0||t==1)
762  break;
763  }
764 
765  limit--;
766  if(!limit) {
767  dev_info(&dev->dev,
768  "couldn't stop rxtx, IntrStatus %xh\n", csr5);
769  break;
770  }
771  udelay(1);
772  }
773  np->csr6 = new;
774  /* and restart them with the new configuration */
775  iowrite32(np->csr6, ioaddr + NetworkConfig);
776  if (new & 0x200)
777  np->mii_if.full_duplex = 1;
778 }
779 
780 static void netdev_timer(unsigned long data)
781 {
782  struct net_device *dev = (struct net_device *)data;
783  struct netdev_private *np = netdev_priv(dev);
784  void __iomem *ioaddr = np->base_addr;
785 
786  if (debug > 2)
787  netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
788  ioread32(ioaddr + IntrStatus),
789  ioread32(ioaddr + NetworkConfig));
790  spin_lock_irq(&np->lock);
791  update_csr6(dev, update_link(dev));
792  spin_unlock_irq(&np->lock);
793  np->timer.expires = jiffies + 10*HZ;
794  add_timer(&np->timer);
795 }
796 
797 static void init_rxtx_rings(struct net_device *dev)
798 {
799  struct netdev_private *np = netdev_priv(dev);
800  int i;
801 
802  np->rx_head_desc = &np->rx_ring[0];
803  np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
804 
805  /* Initial all Rx descriptors. */
806  for (i = 0; i < RX_RING_SIZE; i++) {
807  np->rx_ring[i].length = np->rx_buf_sz;
808  np->rx_ring[i].status = 0;
809  np->rx_skbuff[i] = NULL;
810  }
811  /* Mark the last entry as wrapping the ring. */
812  np->rx_ring[i-1].length |= DescEndRing;
813 
814  /* Fill in the Rx buffers. Handle allocation failure gracefully. */
815  for (i = 0; i < RX_RING_SIZE; i++) {
816  struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
817  np->rx_skbuff[i] = skb;
818  if (skb == NULL)
819  break;
820  np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
822 
823  np->rx_ring[i].buffer1 = np->rx_addr[i];
824  np->rx_ring[i].status = DescOwned;
825  }
826 
827  np->cur_rx = 0;
828  np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
829 
830  /* Initialize the Tx descriptors */
831  for (i = 0; i < TX_RING_SIZE; i++) {
832  np->tx_skbuff[i] = NULL;
833  np->tx_ring[i].status = 0;
834  }
835  np->tx_full = 0;
836  np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
837 
839  iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
840  np->base_addr + TxRingPtr);
841 
842 }
843 
844 static void free_rxtx_rings(struct netdev_private* np)
845 {
846  int i;
847  /* Free all the skbuffs in the Rx queue. */
848  for (i = 0; i < RX_RING_SIZE; i++) {
849  np->rx_ring[i].status = 0;
850  if (np->rx_skbuff[i]) {
851  pci_unmap_single(np->pci_dev,
852  np->rx_addr[i],
853  np->rx_skbuff[i]->len,
855  dev_kfree_skb(np->rx_skbuff[i]);
856  }
857  np->rx_skbuff[i] = NULL;
858  }
859  for (i = 0; i < TX_RING_SIZE; i++) {
860  if (np->tx_skbuff[i]) {
861  pci_unmap_single(np->pci_dev,
862  np->tx_addr[i],
863  np->tx_skbuff[i]->len,
865  dev_kfree_skb(np->tx_skbuff[i]);
866  }
867  np->tx_skbuff[i] = NULL;
868  }
869 }
870 
871 static void init_registers(struct net_device *dev)
872 {
873  struct netdev_private *np = netdev_priv(dev);
874  void __iomem *ioaddr = np->base_addr;
875  int i;
876 
877  for (i = 0; i < 6; i++)
878  iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
879 
880  /* Initialize other registers. */
881 #ifdef __BIG_ENDIAN
882  i = (1<<20); /* Big-endian descriptors */
883 #else
884  i = 0;
885 #endif
886  i |= (0x04<<2); /* skip length 4 u32 */
887  i |= 0x02; /* give Rx priority */
888 
889  /* Configure the PCI bus bursts and FIFO thresholds.
890  486: Set 8 longword cache alignment, 8 longword burst.
891  586: Set 16 longword cache alignment, no burst limit.
892  Cache alignment bits 15:14 Burst length 13:8
893  0000 <not allowed> 0000 align to cache 0800 8 longwords
894  4000 8 longwords 0100 1 longword 1000 16 longwords
895  8000 16 longwords 0200 2 longwords 2000 32 longwords
896  C000 32 longwords 0400 4 longwords */
897 
898 #if defined (__i386__) && !defined(MODULE)
899  /* When not a module we can work around broken '486 PCI boards. */
900  if (boot_cpu_data.x86 <= 4) {
901  i |= 0x4800;
902  dev_info(&dev->dev,
903  "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
904  } else {
905  i |= 0xE000;
906  }
907 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
908  i |= 0xE000;
909 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
910  i |= 0x4800;
911 #else
912 #warning Processor architecture undefined
913  i |= 0x4800;
914 #endif
915  iowrite32(i, ioaddr + PCIBusCfg);
916 
917  np->csr6 = 0;
918  /* 128 byte Tx threshold;
919  Transmit on; Receive on; */
920  update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
921 
922  /* Clear and Enable interrupts by setting the interrupt mask. */
923  iowrite32(0x1A0F5, ioaddr + IntrStatus);
924  iowrite32(0x1A0F5, ioaddr + IntrEnable);
925 
926  iowrite32(0, ioaddr + RxStartDemand);
927 }
928 
929 static void tx_timeout(struct net_device *dev)
930 {
931  struct netdev_private *np = netdev_priv(dev);
932  void __iomem *ioaddr = np->base_addr;
933  const int irq = np->pci_dev->irq;
934 
935  dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
936  ioread32(ioaddr + IntrStatus));
937 
938  {
939  int i;
940  printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
941  for (i = 0; i < RX_RING_SIZE; i++)
942  printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
943  printk(KERN_CONT "\n");
944  printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
945  for (i = 0; i < TX_RING_SIZE; i++)
946  printk(KERN_CONT " %08x", np->tx_ring[i].status);
947  printk(KERN_CONT "\n");
948  }
949  printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
950  np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
951  printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
952 
953  disable_irq(irq);
954  spin_lock_irq(&np->lock);
955  /*
956  * Under high load dirty_tx and the internal tx descriptor pointer
957  * come out of sync, thus perform a software reset and reinitialize
958  * everything.
959  */
960 
961  iowrite32(1, np->base_addr+PCIBusCfg);
962  udelay(1);
963 
964  free_rxtx_rings(np);
965  init_rxtx_rings(dev);
966  init_registers(dev);
967  spin_unlock_irq(&np->lock);
968  enable_irq(irq);
969 
970  netif_wake_queue(dev);
971  dev->trans_start = jiffies; /* prevent tx timeout */
972  np->stats.tx_errors++;
973 }
974 
975 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
976 static int alloc_ringdesc(struct net_device *dev)
977 {
978  struct netdev_private *np = netdev_priv(dev);
979 
980  np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
981 
983  sizeof(struct w840_rx_desc)*RX_RING_SIZE +
984  sizeof(struct w840_tx_desc)*TX_RING_SIZE,
985  &np->ring_dma_addr);
986  if(!np->rx_ring)
987  return -ENOMEM;
988  init_rxtx_rings(dev);
989  return 0;
990 }
991 
992 static void free_ringdesc(struct netdev_private *np)
993 {
995  sizeof(struct w840_rx_desc)*RX_RING_SIZE +
996  sizeof(struct w840_tx_desc)*TX_RING_SIZE,
997  np->rx_ring, np->ring_dma_addr);
998 
999 }
1000 
1001 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1002 {
1003  struct netdev_private *np = netdev_priv(dev);
1004  unsigned entry;
1005 
1006  /* Caution: the write order is important here, set the field
1007  with the "ownership" bits last. */
1008 
1009  /* Calculate the next Tx descriptor entry. */
1010  entry = np->cur_tx % TX_RING_SIZE;
1011 
1012  np->tx_addr[entry] = pci_map_single(np->pci_dev,
1013  skb->data,skb->len, PCI_DMA_TODEVICE);
1014  np->tx_skbuff[entry] = skb;
1015 
1016  np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1017  if (skb->len < TX_BUFLIMIT) {
1018  np->tx_ring[entry].length = DescWholePkt | skb->len;
1019  } else {
1020  int len = skb->len - TX_BUFLIMIT;
1021 
1022  np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1023  np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1024  }
1025  if(entry == TX_RING_SIZE-1)
1026  np->tx_ring[entry].length |= DescEndRing;
1027 
1028  /* Now acquire the irq spinlock.
1029  * The difficult race is the ordering between
1030  * increasing np->cur_tx and setting DescOwned:
1031  * - if np->cur_tx is increased first the interrupt
1032  * handler could consider the packet as transmitted
1033  * since DescOwned is cleared.
1034  * - If DescOwned is set first the NIC could report the
1035  * packet as sent, but the interrupt handler would ignore it
1036  * since the np->cur_tx was not yet increased.
1037  */
1038  spin_lock_irq(&np->lock);
1039  np->cur_tx++;
1040 
1041  wmb(); /* flush length, buffer1, buffer2 */
1042  np->tx_ring[entry].status = DescOwned;
1043  wmb(); /* flush status and kick the hardware */
1044  iowrite32(0, np->base_addr + TxStartDemand);
1045  np->tx_q_bytes += skb->len;
1046  /* Work around horrible bug in the chip by marking the queue as full
1047  when we do not have FIFO room for a maximum sized packet. */
1048  if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1049  ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1050  netif_stop_queue(dev);
1051  wmb();
1052  np->tx_full = 1;
1053  }
1054  spin_unlock_irq(&np->lock);
1055 
1056  if (debug > 4) {
1057  netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1058  np->cur_tx, entry);
1059  }
1060  return NETDEV_TX_OK;
1061 }
1062 
1063 static void netdev_tx_done(struct net_device *dev)
1064 {
1065  struct netdev_private *np = netdev_priv(dev);
1066  for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1067  int entry = np->dirty_tx % TX_RING_SIZE;
1068  int tx_status = np->tx_ring[entry].status;
1069 
1070  if (tx_status < 0)
1071  break;
1072  if (tx_status & 0x8000) { /* There was an error, log it. */
1073 #ifndef final_version
1074  if (debug > 1)
1075  netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1076  tx_status);
1077 #endif
1078  np->stats.tx_errors++;
1079  if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1080  if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1081  if (tx_status & 0x0200) np->stats.tx_window_errors++;
1082  if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1083  if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1084  np->stats.tx_heartbeat_errors++;
1085  } else {
1086 #ifndef final_version
1087  if (debug > 3)
1088  netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1089  entry, tx_status);
1090 #endif
1091  np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1092  np->stats.collisions += (tx_status >> 3) & 15;
1093  np->stats.tx_packets++;
1094  }
1095  /* Free the original skb. */
1096  pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1097  np->tx_skbuff[entry]->len,
1099  np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1100  dev_kfree_skb_irq(np->tx_skbuff[entry]);
1101  np->tx_skbuff[entry] = NULL;
1102  }
1103  if (np->tx_full &&
1104  np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1105  np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1106  /* The ring is no longer full, clear tbusy. */
1107  np->tx_full = 0;
1108  wmb();
1109  netif_wake_queue(dev);
1110  }
1111 }
1112 
1113 /* The interrupt handler does all of the Rx thread work and cleans up
1114  after the Tx thread. */
1115 static irqreturn_t intr_handler(int irq, void *dev_instance)
1116 {
1117  struct net_device *dev = (struct net_device *)dev_instance;
1118  struct netdev_private *np = netdev_priv(dev);
1119  void __iomem *ioaddr = np->base_addr;
1120  int work_limit = max_interrupt_work;
1121  int handled = 0;
1122 
1123  if (!netif_device_present(dev))
1124  return IRQ_NONE;
1125  do {
1126  u32 intr_status = ioread32(ioaddr + IntrStatus);
1127 
1128  /* Acknowledge all of the current interrupt sources ASAP. */
1129  iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1130 
1131  if (debug > 4)
1132  netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1133 
1134  if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1135  break;
1136 
1137  handled = 1;
1138 
1139  if (intr_status & (RxIntr | RxNoBuf))
1140  netdev_rx(dev);
1141  if (intr_status & RxNoBuf)
1142  iowrite32(0, ioaddr + RxStartDemand);
1143 
1144  if (intr_status & (TxNoBuf | TxIntr) &&
1145  np->cur_tx != np->dirty_tx) {
1146  spin_lock(&np->lock);
1147  netdev_tx_done(dev);
1148  spin_unlock(&np->lock);
1149  }
1150 
1151  /* Abnormal error summary/uncommon events handlers. */
1152  if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1153  TimerInt | TxDied))
1154  netdev_error(dev, intr_status);
1155 
1156  if (--work_limit < 0) {
1157  dev_warn(&dev->dev,
1158  "Too much work at interrupt, status=0x%04x\n",
1159  intr_status);
1160  /* Set the timer to re-enable the other interrupts after
1161  10*82usec ticks. */
1162  spin_lock(&np->lock);
1163  if (netif_device_present(dev)) {
1165  iowrite32(10, ioaddr + GPTimer);
1166  }
1167  spin_unlock(&np->lock);
1168  break;
1169  }
1170  } while (1);
1171 
1172  if (debug > 3)
1173  netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1174  ioread32(ioaddr + IntrStatus));
1175  return IRQ_RETVAL(handled);
1176 }
1177 
1178 /* This routine is logically part of the interrupt handler, but separated
1179  for clarity and better register allocation. */
1180 static int netdev_rx(struct net_device *dev)
1181 {
1182  struct netdev_private *np = netdev_priv(dev);
1183  int entry = np->cur_rx % RX_RING_SIZE;
1184  int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1185 
1186  if (debug > 4) {
1187  netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1188  entry, np->rx_ring[entry].status);
1189  }
1190 
1191  /* If EOP is set on the next entry, it's a new packet. Send it up. */
1192  while (--work_limit >= 0) {
1193  struct w840_rx_desc *desc = np->rx_head_desc;
1194  s32 status = desc->status;
1195 
1196  if (debug > 4)
1197  netdev_dbg(dev, " netdev_rx() status was %08x\n",
1198  status);
1199  if (status < 0)
1200  break;
1201  if ((status & 0x38008300) != 0x0300) {
1202  if ((status & 0x38000300) != 0x0300) {
1203  /* Ingore earlier buffers. */
1204  if ((status & 0xffff) != 0x7fff) {
1205  dev_warn(&dev->dev,
1206  "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1207  np->cur_rx, status);
1208  np->stats.rx_length_errors++;
1209  }
1210  } else if (status & 0x8000) {
1211  /* There was a fatal error. */
1212  if (debug > 2)
1213  netdev_dbg(dev, "Receive error, Rx status %08x\n",
1214  status);
1215  np->stats.rx_errors++; /* end of a packet.*/
1216  if (status & 0x0890) np->stats.rx_length_errors++;
1217  if (status & 0x004C) np->stats.rx_frame_errors++;
1218  if (status & 0x0002) np->stats.rx_crc_errors++;
1219  }
1220  } else {
1221  struct sk_buff *skb;
1222  /* Omit the four octet CRC from the length. */
1223  int pkt_len = ((status >> 16) & 0x7ff) - 4;
1224 
1225 #ifndef final_version
1226  if (debug > 4)
1227  netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1228  pkt_len, status);
1229 #endif
1230  /* Check if the packet is long enough to accept without copying
1231  to a minimally-sized skbuff. */
1232  if (pkt_len < rx_copybreak &&
1233  (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1234  skb_reserve(skb, 2); /* 16 byte align the IP header */
1235  pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1236  np->rx_skbuff[entry]->len,
1238  skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1239  skb_put(skb, pkt_len);
1240  pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1241  np->rx_skbuff[entry]->len,
1243  } else {
1244  pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1245  np->rx_skbuff[entry]->len,
1247  skb_put(skb = np->rx_skbuff[entry], pkt_len);
1248  np->rx_skbuff[entry] = NULL;
1249  }
1250 #ifndef final_version /* Remove after testing. */
1251  /* You will want this info for the initial debug. */
1252  if (debug > 5)
1253  netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1254  &skb->data[0], &skb->data[6],
1255  skb->data[12], skb->data[13],
1256  &skb->data[14]);
1257 #endif
1258  skb->protocol = eth_type_trans(skb, dev);
1259  netif_rx(skb);
1260  np->stats.rx_packets++;
1261  np->stats.rx_bytes += pkt_len;
1262  }
1263  entry = (++np->cur_rx) % RX_RING_SIZE;
1264  np->rx_head_desc = &np->rx_ring[entry];
1265  }
1266 
1267  /* Refill the Rx ring buffers. */
1268  for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1269  struct sk_buff *skb;
1270  entry = np->dirty_rx % RX_RING_SIZE;
1271  if (np->rx_skbuff[entry] == NULL) {
1272  skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1273  np->rx_skbuff[entry] = skb;
1274  if (skb == NULL)
1275  break; /* Better luck next round. */
1276  np->rx_addr[entry] = pci_map_single(np->pci_dev,
1277  skb->data,
1279  np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1280  }
1281  wmb();
1282  np->rx_ring[entry].status = DescOwned;
1283  }
1284 
1285  return 0;
1286 }
1287 
1288 static void netdev_error(struct net_device *dev, int intr_status)
1289 {
1290  struct netdev_private *np = netdev_priv(dev);
1291  void __iomem *ioaddr = np->base_addr;
1292 
1293  if (debug > 2)
1294  netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1295  if (intr_status == 0xffffffff)
1296  return;
1297  spin_lock(&np->lock);
1298  if (intr_status & TxFIFOUnderflow) {
1299  int new;
1300  /* Bump up the Tx threshold */
1301 #if 0
1302  /* This causes lots of dropped packets,
1303  * and under high load even tx_timeouts
1304  */
1305  new = np->csr6 + 0x4000;
1306 #else
1307  new = (np->csr6 >> 14)&0x7f;
1308  if (new < 64)
1309  new *= 2;
1310  else
1311  new = 127; /* load full packet before starting */
1312  new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1313 #endif
1314  netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1315  update_csr6(dev, new);
1316  }
1317  if (intr_status & RxDied) { /* Missed a Rx frame. */
1318  np->stats.rx_errors++;
1319  }
1320  if (intr_status & TimerInt) {
1321  /* Re-enable other interrupts. */
1322  if (netif_device_present(dev))
1323  iowrite32(0x1A0F5, ioaddr + IntrEnable);
1324  }
1325  np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1326  iowrite32(0, ioaddr + RxStartDemand);
1327  spin_unlock(&np->lock);
1328 }
1329 
1330 static struct net_device_stats *get_stats(struct net_device *dev)
1331 {
1332  struct netdev_private *np = netdev_priv(dev);
1333  void __iomem *ioaddr = np->base_addr;
1334 
1335  /* The chip only need report frame silently dropped. */
1336  spin_lock_irq(&np->lock);
1337  if (netif_running(dev) && netif_device_present(dev))
1338  np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1339  spin_unlock_irq(&np->lock);
1340 
1341  return &np->stats;
1342 }
1343 
1344 
1345 static u32 __set_rx_mode(struct net_device *dev)
1346 {
1347  struct netdev_private *np = netdev_priv(dev);
1348  void __iomem *ioaddr = np->base_addr;
1349  u32 mc_filter[2]; /* Multicast hash filter */
1350  u32 rx_mode;
1351 
1352  if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1353  memset(mc_filter, 0xff, sizeof(mc_filter));
1355  | AcceptMyPhys;
1356  } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1357  (dev->flags & IFF_ALLMULTI)) {
1358  /* Too many to match, or accept all multicasts. */
1359  memset(mc_filter, 0xff, sizeof(mc_filter));
1361  } else {
1362  struct netdev_hw_addr *ha;
1363 
1364  memset(mc_filter, 0, sizeof(mc_filter));
1365  netdev_for_each_mc_addr(ha, dev) {
1366  int filbit;
1367 
1368  filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1369  filbit &= 0x3f;
1370  mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1371  }
1373  }
1374  iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1375  iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1376  return rx_mode;
1377 }
1378 
1379 static void set_rx_mode(struct net_device *dev)
1380 {
1381  struct netdev_private *np = netdev_priv(dev);
1382  u32 rx_mode = __set_rx_mode(dev);
1383  spin_lock_irq(&np->lock);
1384  update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1385  spin_unlock_irq(&np->lock);
1386 }
1387 
1388 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1389 {
1390  struct netdev_private *np = netdev_priv(dev);
1391 
1392  strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1393  strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1394  strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1395 }
1396 
1397 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1398 {
1399  struct netdev_private *np = netdev_priv(dev);
1400  int rc;
1401 
1402  spin_lock_irq(&np->lock);
1403  rc = mii_ethtool_gset(&np->mii_if, cmd);
1404  spin_unlock_irq(&np->lock);
1405 
1406  return rc;
1407 }
1408 
1409 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1410 {
1411  struct netdev_private *np = netdev_priv(dev);
1412  int rc;
1413 
1414  spin_lock_irq(&np->lock);
1415  rc = mii_ethtool_sset(&np->mii_if, cmd);
1416  spin_unlock_irq(&np->lock);
1417 
1418  return rc;
1419 }
1420 
1421 static int netdev_nway_reset(struct net_device *dev)
1422 {
1423  struct netdev_private *np = netdev_priv(dev);
1424  return mii_nway_restart(&np->mii_if);
1425 }
1426 
1427 static u32 netdev_get_link(struct net_device *dev)
1428 {
1429  struct netdev_private *np = netdev_priv(dev);
1430  return mii_link_ok(&np->mii_if);
1431 }
1432 
1433 static u32 netdev_get_msglevel(struct net_device *dev)
1434 {
1435  return debug;
1436 }
1437 
1438 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1439 {
1440  debug = value;
1441 }
1442 
1443 static const struct ethtool_ops netdev_ethtool_ops = {
1444  .get_drvinfo = netdev_get_drvinfo,
1445  .get_settings = netdev_get_settings,
1446  .set_settings = netdev_set_settings,
1447  .nway_reset = netdev_nway_reset,
1448  .get_link = netdev_get_link,
1449  .get_msglevel = netdev_get_msglevel,
1450  .set_msglevel = netdev_set_msglevel,
1451 };
1452 
1453 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1454 {
1455  struct mii_ioctl_data *data = if_mii(rq);
1456  struct netdev_private *np = netdev_priv(dev);
1457 
1458  switch(cmd) {
1459  case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1460  data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1461  /* Fall Through */
1462 
1463  case SIOCGMIIREG: /* Read MII PHY register. */
1464  spin_lock_irq(&np->lock);
1465  data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1466  spin_unlock_irq(&np->lock);
1467  return 0;
1468 
1469  case SIOCSMIIREG: /* Write MII PHY register. */
1470  spin_lock_irq(&np->lock);
1471  mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1472  spin_unlock_irq(&np->lock);
1473  return 0;
1474  default:
1475  return -EOPNOTSUPP;
1476  }
1477 }
1478 
1479 static int netdev_close(struct net_device *dev)
1480 {
1481  struct netdev_private *np = netdev_priv(dev);
1482  void __iomem *ioaddr = np->base_addr;
1483 
1484  netif_stop_queue(dev);
1485 
1486  if (debug > 1) {
1487  netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1488  ioread32(ioaddr + IntrStatus),
1489  ioread32(ioaddr + NetworkConfig));
1490  netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1491  np->cur_tx, np->dirty_tx,
1492  np->cur_rx, np->dirty_rx);
1493  }
1494 
1495  /* Stop the chip's Tx and Rx processes. */
1496  spin_lock_irq(&np->lock);
1497  netif_device_detach(dev);
1498  update_csr6(dev, 0);
1499  iowrite32(0x0000, ioaddr + IntrEnable);
1500  spin_unlock_irq(&np->lock);
1501 
1502  free_irq(np->pci_dev->irq, dev);
1503  wmb();
1504  netif_device_attach(dev);
1505 
1506  if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1507  np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1508 
1509 #ifdef __i386__
1510  if (debug > 2) {
1511  int i;
1512 
1513  printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1514  for (i = 0; i < TX_RING_SIZE; i++)
1515  printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1516  i, np->tx_ring[i].length,
1517  np->tx_ring[i].status, np->tx_ring[i].buffer1);
1518  printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1519  for (i = 0; i < RX_RING_SIZE; i++) {
1520  printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1521  i, np->rx_ring[i].length,
1522  np->rx_ring[i].status, np->rx_ring[i].buffer1);
1523  }
1524  }
1525 #endif /* __i386__ debugging only */
1526 
1527  del_timer_sync(&np->timer);
1528 
1529  free_rxtx_rings(np);
1530  free_ringdesc(np);
1531 
1532  return 0;
1533 }
1534 
1535 static void __devexit w840_remove1 (struct pci_dev *pdev)
1536 {
1537  struct net_device *dev = pci_get_drvdata(pdev);
1538 
1539  if (dev) {
1540  struct netdev_private *np = netdev_priv(dev);
1541  unregister_netdev(dev);
1542  pci_release_regions(pdev);
1543  pci_iounmap(pdev, np->base_addr);
1544  free_netdev(dev);
1545  }
1546 
1547  pci_set_drvdata(pdev, NULL);
1548 }
1549 
1550 #ifdef CONFIG_PM
1551 
1552 /*
1553  * suspend/resume synchronization:
1554  * - open, close, do_ioctl:
1555  * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1556  * - get_stats:
1557  * spin_lock_irq(np->lock), doesn't touch hw if not present
1558  * - start_xmit:
1559  * synchronize_irq + netif_tx_disable;
1560  * - tx_timeout:
1561  * netif_device_detach + netif_tx_disable;
1562  * - set_multicast_list
1563  * netif_device_detach + netif_tx_disable;
1564  * - interrupt handler
1565  * doesn't touch hw if not present, synchronize_irq waits for
1566  * running instances of the interrupt handler.
1567  *
1568  * Disabling hw requires clearing csr6 & IntrEnable.
1569  * update_csr6 & all function that write IntrEnable check netif_device_present
1570  * before settings any bits.
1571  *
1572  * Detach must occur under spin_unlock_irq(), interrupts from a detached
1573  * device would cause an irq storm.
1574  */
1575 static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1576 {
1577  struct net_device *dev = pci_get_drvdata (pdev);
1578  struct netdev_private *np = netdev_priv(dev);
1579  void __iomem *ioaddr = np->base_addr;
1580 
1581  rtnl_lock();
1582  if (netif_running (dev)) {
1583  del_timer_sync(&np->timer);
1584 
1585  spin_lock_irq(&np->lock);
1586  netif_device_detach(dev);
1587  update_csr6(dev, 0);
1588  iowrite32(0, ioaddr + IntrEnable);
1589  spin_unlock_irq(&np->lock);
1590 
1591  synchronize_irq(np->pci_dev->irq);
1592  netif_tx_disable(dev);
1593 
1594  np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1595 
1596  /* no more hardware accesses behind this line. */
1597 
1598  BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1599 
1600  /* pci_power_off(pdev, -1); */
1601 
1602  free_rxtx_rings(np);
1603  } else {
1604  netif_device_detach(dev);
1605  }
1606  rtnl_unlock();
1607  return 0;
1608 }
1609 
1610 static int w840_resume (struct pci_dev *pdev)
1611 {
1612  struct net_device *dev = pci_get_drvdata (pdev);
1613  struct netdev_private *np = netdev_priv(dev);
1614  int retval = 0;
1615 
1616  rtnl_lock();
1617  if (netif_device_present(dev))
1618  goto out; /* device not suspended */
1619  if (netif_running(dev)) {
1620  if ((retval = pci_enable_device(pdev))) {
1621  dev_err(&dev->dev,
1622  "pci_enable_device failed in resume\n");
1623  goto out;
1624  }
1625  spin_lock_irq(&np->lock);
1626  iowrite32(1, np->base_addr+PCIBusCfg);
1628  udelay(1);
1629  netif_device_attach(dev);
1630  init_rxtx_rings(dev);
1631  init_registers(dev);
1632  spin_unlock_irq(&np->lock);
1633 
1634  netif_wake_queue(dev);
1635 
1636  mod_timer(&np->timer, jiffies + 1*HZ);
1637  } else {
1638  netif_device_attach(dev);
1639  }
1640 out:
1641  rtnl_unlock();
1642  return retval;
1643 }
1644 #endif
1645 
1646 static struct pci_driver w840_driver = {
1647  .name = DRV_NAME,
1648  .id_table = w840_pci_tbl,
1649  .probe = w840_probe1,
1650  .remove = __devexit_p(w840_remove1),
1651 #ifdef CONFIG_PM
1652  .suspend = w840_suspend,
1653  .resume = w840_resume,
1654 #endif
1655 };
1656 
1657 static int __init w840_init(void)
1658 {
1659  printk(version);
1660  return pci_register_driver(&w840_driver);
1661 }
1662 
1663 static void __exit w840_exit(void)
1664 {
1665  pci_unregister_driver(&w840_driver);
1666 }
1667 
1668 module_init(w840_init);
1669 module_exit(w840_exit);