Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
via-rhine.c
Go to the documentation of this file.
1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2 /*
3  Written 1998-2001 by Donald Becker.
4 
5  Current Maintainer: Roger Luethi <[email protected]>
6 
7  This software may be used and distributed according to the terms of
8  the GNU General Public License (GPL), incorporated herein by reference.
9  Drivers based on or derived from this code fall under the GPL and must
10  retain the authorship, copyright and license notice. This file is not
11  a complete program and may only be used when the entire operating
12  system is licensed under the GPL.
13 
14  This driver is designed for the VIA VT86C100A Rhine-I.
15  It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16  and management NIC 6105M).
17 
18  The author may be reached as [email protected], or C/O
19  Scyld Computing Corporation
20  410 Severn Ave., Suite 210
21  Annapolis MD 21403
22 
23 
24  This driver contains some changes from the original Donald Becker
25  version. He may or may not be interested in bug reports on this
26  code. You can find his versions at:
27  http://www.scyld.com/network/via-rhine.html
28  [link no longer provides useful info -jgarzik]
29 
30 */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #define DRV_NAME "via-rhine"
35 #define DRV_VERSION "1.5.0"
36 #define DRV_RELDATE "2010-10-09"
37 
38 #include <linux/types.h>
39 
40 /* A few user-configurable values.
41  These may be modified when a driver module is loaded. */
42 static int debug = 0;
43 #define RHINE_MSG_DEFAULT \
44  (0x0000)
45 
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47  Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49  defined(CONFIG_SPARC) || defined(__ia64__) || \
50  defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
52 #else
53 static int rx_copybreak;
54 #endif
55 
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57  power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
58 static bool avoid_D3;
59 
60 /*
61  * In case you are looking for 'options[]' or 'full_duplex[]', they
62  * are gone. Use ethtool(8) instead.
63  */
64 
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66  The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
68 
69 
70 /* Operational parameters that are set at compile time. */
71 
72 /* Keep the ring sizes a power of two for compile efficiency.
73  The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74  Making the Tx ring too large decreases the effectiveness of channel
75  bonding and packet priority.
76  There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE 16
78 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
79 #define RX_RING_SIZE 64
80 
81 /* Operational parameters that usually are not changed. */
82 
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT (2*HZ)
85 
86 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
87 
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/dma-mapping.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/skbuff.h>
101 #include <linux/init.h>
102 #include <linux/delay.h>
103 #include <linux/mii.h>
104 #include <linux/ethtool.h>
105 #include <linux/crc32.h>
106 #include <linux/if_vlan.h>
107 #include <linux/bitops.h>
108 #include <linux/workqueue.h>
109 #include <asm/processor.h> /* Processor type for cache alignment. */
110 #include <asm/io.h>
111 #include <asm/irq.h>
112 #include <asm/uaccess.h>
113 #include <linux/dmi.h>
114 
115 /* These identify the driver base version and may not be removed. */
116 static const char version[] __devinitconst =
117  "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118 
119 /* This driver was written to use PCI memory space. Some early versions
120  of the Rhine may only work correctly with I/O space accesses. */
121 #ifdef CONFIG_VIA_RHINE_MMIO
122 #define USE_MMIO
123 #else
124 #endif
125 
126 MODULE_AUTHOR("Donald Becker <[email protected]>");
127 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128 MODULE_LICENSE("GPL");
129 
130 module_param(debug, int, 0);
131 module_param(rx_copybreak, int, 0);
132 module_param(avoid_D3, bool, 0);
133 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
136 
137 #define MCAM_SIZE 32
138 #define VCAM_SIZE 32
139 
140 /*
141  Theory of Operation
142 
143 I. Board Compatibility
144 
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
146 controller.
147 
148 II. Board-specific settings
149 
150 Boards with this chip are functional only in a bus-master PCI slot.
151 
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
154 correct.
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
157 
158 III. Driver operation
159 
160 IIIa. Ring buffers
161 
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
165 
166 IIIb/c. Transmit/Receive Structure
167 
168 This driver attempts to use a zero-copy receive and transmit scheme.
169 
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
172 
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
180 
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
189 
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
194 
195 IIId. Synchronization
196 
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200 which is single threaded by the hardware and interrupt handling software.
201 
202 The send packet thread has partial control over the Tx ring. It locks the
203 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204 the ring is not available it stops the transmit queue by
205 calling netif_stop_queue.
206 
207 The interrupt handler has exclusive control over the Rx ring and records stats
208 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209 empty by incrementing the dirty_tx mark. If at least half of the entries in
210 the Rx ring are available the transmit queue is woken up if it was stopped.
211 
212 IV. Notes
213 
214 IVb. References
215 
216 Preliminary VT86C100A manual from http://www.via.com.tw/
217 http://www.scyld.com/expert/100mbps.html
218 http://www.scyld.com/expert/NWay.html
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
221 
222 
223 IVc. Errata
224 
225 The VT86C100A manual is not reliable information.
226 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227 in significant performance degradation for bounce buffer copies on transmit
228 and unaligned IP headers on receive.
229 The chip does not pad to minimum transmit length.
230 
231 */
232 
233 
234 /* This table drives the PCI probe routines. It's mostly boilerplate in all
235  of the drivers, and will likely be provided by some future kernel.
236  Note the matching code -- the first table entry matchs all 56** cards but
237  second only the 1234 card.
238 */
239 
241  VT86C100A = 0x00,
242  VTunknown0 = 0x20,
243  VT6102 = 0x40,
244  VT8231 = 0x50, /* Integrated MAC */
245  VT8233 = 0x60, /* Integrated MAC */
246  VT8235 = 0x74, /* Integrated MAC */
247  VT8237 = 0x78, /* Integrated MAC */
248  VTunknown1 = 0x7C,
249  VT6105 = 0x80,
250  VT6105_B0 = 0x83,
251  VT6105L = 0x8A,
252  VT6107 = 0x8C,
253  VTunknown2 = 0x8E,
254  VT6105M = 0x90, /* Management adapter */
255 };
256 
258  rqWOL = 0x0001, /* Wake-On-LAN support */
259  rqForceReset = 0x0002,
260  rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
261  rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
262  rqRhineI = 0x0100, /* See comment below */
263 };
264 /*
265  * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266  * MMIO as well as for the collision counter and the Tx FIFO underflow
267  * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
268  */
269 
270 /* Beware of PCI posted writes */
271 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
272 
273 static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274  { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
275  { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
276  { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
277  { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
278  { } /* terminate list */
279 };
280 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
281 
282 
283 /* Offsets to the device registers. */
285  StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286  ChipCmd1=0x09, TQWake=0x0A,
287  IntrStatus=0x0C, IntrEnable=0x0E,
289  RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
291  MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292  ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293  RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294  StickyHW=0x83, IntrStatus2=0x84,
295  CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296  WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297  WOLcrClr1=0xA6, WOLcgClr=0xA7,
298  PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
299 };
300 
301 /* Bits in ConfigD */
305 };
306 
307 /* Bits in the TxConfig (TCR) register */
308 enum tcr_bits {
309  TCR_PQEN=0x01,
310  TCR_LB0=0x02, /* loopback[0] */
311  TCR_LB1=0x04, /* loopback[1] */
312  TCR_OFSET=0x08,
314  TCR_RTFT0=0x20,
315  TCR_RTFT1=0x40,
316  TCR_RTSF=0x80,
317 };
318 
319 /* Bits in the CamCon (CAMC) register */
325 };
326 
327 /* Bits in the PCIBusConfig1 (BCR1) register */
328 enum bcr1_bits {
329  BCR1_POT0=0x01,
330  BCR1_POT1=0x02,
331  BCR1_POT2=0x04,
334  BCR1_CTSF=0x20,
335  BCR1_TXQNOBK=0x40, /* for VT6105 */
336  BCR1_VIDFR=0x80, /* for VT6105 */
337  BCR1_MED0=0x40, /* for VT6102 */
338  BCR1_MED1=0x80, /* for VT6102 */
339 };
340 
341 #ifdef USE_MMIO
342 /* Registers we check that mmio and reg are the same. */
343 static const int mmio_verify_registers[] = {
345  0
346 };
347 #endif
348 
349 /* Bits in the interrupt status/mask registers. */
351  IntrRxDone = 0x0001,
352  IntrTxDone = 0x0002,
353  IntrRxErr = 0x0004,
354  IntrTxError = 0x0008,
355  IntrRxEmpty = 0x0020,
356  IntrPCIErr = 0x0040,
357  IntrStatsMax = 0x0080,
358  IntrRxEarly = 0x0100,
359  IntrTxUnderrun = 0x0210,
360  IntrRxOverflow = 0x0400,
361  IntrRxDropped = 0x0800,
362  IntrRxNoBuf = 0x1000,
363  IntrTxAborted = 0x2000,
364  IntrLinkChange = 0x4000,
365  IntrRxWakeUp = 0x8000,
366  IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
370 };
371 
372 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
373 enum wol_bits {
374  WOLucast = 0x10,
375  WOLmagic = 0x20,
376  WOLbmcast = 0x30,
377  WOLlnkon = 0x40,
378  WOLlnkoff = 0x80,
379 };
380 
381 /* The Rx and Tx buffer descriptors. */
382 struct rx_desc {
384  __le32 desc_length; /* Chain flag, Buffer/frame length */
387 };
388 struct tx_desc {
390  __le32 desc_length; /* Chain flag, Tx Config, Frame length */
391  __le32 addr;
393 };
394 
395 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396 #define TXDESC 0x00e08000
397 
399  RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
400 };
401 
402 /* Bits in *_desc.*_status */
404  DescOwn=0x80000000
405 };
406 
407 /* Bits in *_desc.*_length */
409  DescTag=0x00010000
410 };
411 
412 /* Bits in ChipCmd. */
414  CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415  CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
418 };
419 
421  /* Bit mask for configured VLAN ids */
423 
424  /* Descriptor rings */
425  struct rx_desc *rx_ring;
426  struct tx_desc *tx_ring;
429 
430  /* The addresses of receive-in-place skbuffs. */
433 
434  /* The saved address of a sent-in-place packet/buffer, for later free(). */
437 
438  /* Tx bounce buffers (Rhine-I only) */
439  unsigned char *tx_buf[TX_RING_SIZE];
440  unsigned char *tx_bufs;
442 
443  struct pci_dev *pdev;
444  long pioaddr;
445  struct net_device *dev;
448  struct mutex task_lock;
452 
454 
455  /* Frequently used values: keep some adjacent for cache effect. */
458  unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
459  unsigned int cur_tx, dirty_tx;
460  unsigned int rx_buf_sz; /* Based on MTU+slack. */
462 
464 
466  void __iomem *base;
467 };
468 
469 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
470 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
471 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
472 
473 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
474 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
475 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
476 
477 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
478 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
479 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
480 
481 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
482 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
483 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
484 
485 
486 static int mdio_read(struct net_device *dev, int phy_id, int location);
487 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
488 static int rhine_open(struct net_device *dev);
489 static void rhine_reset_task(struct work_struct *work);
490 static void rhine_slow_event_task(struct work_struct *work);
491 static void rhine_tx_timeout(struct net_device *dev);
492 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
493  struct net_device *dev);
494 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
495 static void rhine_tx(struct net_device *dev);
496 static int rhine_rx(struct net_device *dev, int limit);
497 static void rhine_set_rx_mode(struct net_device *dev);
498 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
499 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500 static const struct ethtool_ops netdev_ethtool_ops;
501 static int rhine_close(struct net_device *dev);
502 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
503 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
504 static void rhine_restart_tx(struct net_device *dev);
505 
506 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
507 {
508  void __iomem *ioaddr = rp->base;
509  int i;
510 
511  for (i = 0; i < 1024; i++) {
512  bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
513 
514  if (low ^ has_mask_bits)
515  break;
516  udelay(10);
517  }
518  if (i > 64) {
519  netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
520  "count: %04d\n", low ? "low" : "high", reg, mask, i);
521  }
522 }
523 
524 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
525 {
526  rhine_wait_bit(rp, reg, mask, false);
527 }
528 
529 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
530 {
531  rhine_wait_bit(rp, reg, mask, true);
532 }
533 
534 static u32 rhine_get_events(struct rhine_private *rp)
535 {
536  void __iomem *ioaddr = rp->base;
537  u32 intr_status;
538 
539  intr_status = ioread16(ioaddr + IntrStatus);
540  /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
541  if (rp->quirks & rqStatusWBRace)
542  intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
543  return intr_status;
544 }
545 
546 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
547 {
548  void __iomem *ioaddr = rp->base;
549 
550  if (rp->quirks & rqStatusWBRace)
551  iowrite8(mask >> 16, ioaddr + IntrStatus2);
552  iowrite16(mask, ioaddr + IntrStatus);
553  mmiowb();
554 }
555 
556 /*
557  * Get power related registers into sane state.
558  * Notify user about past WOL event.
559  */
560 static void rhine_power_init(struct net_device *dev)
561 {
562  struct rhine_private *rp = netdev_priv(dev);
563  void __iomem *ioaddr = rp->base;
564  u16 wolstat;
565 
566  if (rp->quirks & rqWOL) {
567  /* Make sure chip is in power state D0 */
568  iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
569 
570  /* Disable "force PME-enable" */
571  iowrite8(0x80, ioaddr + WOLcgClr);
572 
573  /* Clear power-event config bits (WOL) */
574  iowrite8(0xFF, ioaddr + WOLcrClr);
575  /* More recent cards can manage two additional patterns */
576  if (rp->quirks & rq6patterns)
577  iowrite8(0x03, ioaddr + WOLcrClr1);
578 
579  /* Save power-event status bits */
580  wolstat = ioread8(ioaddr + PwrcsrSet);
581  if (rp->quirks & rq6patterns)
582  wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
583 
584  /* Clear power-event status bits */
585  iowrite8(0xFF, ioaddr + PwrcsrClr);
586  if (rp->quirks & rq6patterns)
587  iowrite8(0x03, ioaddr + PwrcsrClr1);
588 
589  if (wolstat) {
590  char *reason;
591  switch (wolstat) {
592  case WOLmagic:
593  reason = "Magic packet";
594  break;
595  case WOLlnkon:
596  reason = "Link went up";
597  break;
598  case WOLlnkoff:
599  reason = "Link went down";
600  break;
601  case WOLucast:
602  reason = "Unicast packet";
603  break;
604  case WOLbmcast:
605  reason = "Multicast/broadcast packet";
606  break;
607  default:
608  reason = "Unknown";
609  }
610  netdev_info(dev, "Woke system up. Reason: %s\n",
611  reason);
612  }
613  }
614 }
615 
616 static void rhine_chip_reset(struct net_device *dev)
617 {
618  struct rhine_private *rp = netdev_priv(dev);
619  void __iomem *ioaddr = rp->base;
620  u8 cmd1;
621 
622  iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
623  IOSYNC;
624 
625  if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
626  netdev_info(dev, "Reset not complete yet. Trying harder.\n");
627 
628  /* Force reset */
629  if (rp->quirks & rqForceReset)
630  iowrite8(0x40, ioaddr + MiscCmd);
631 
632  /* Reset can take somewhat longer (rare) */
633  rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
634  }
635 
636  cmd1 = ioread8(ioaddr + ChipCmd1);
637  netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
638  "failed" : "succeeded");
639 }
640 
641 #ifdef USE_MMIO
642 static void enable_mmio(long pioaddr, u32 quirks)
643 {
644  int n;
645  if (quirks & rqRhineI) {
646  /* More recent docs say that this bit is reserved ... */
647  n = inb(pioaddr + ConfigA) | 0x20;
648  outb(n, pioaddr + ConfigA);
649  } else {
650  n = inb(pioaddr + ConfigD) | 0x80;
651  outb(n, pioaddr + ConfigD);
652  }
653 }
654 #endif
655 
656 /*
657  * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
658  * (plus 0x6C for Rhine-I/II)
659  */
660 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
661 {
662  struct rhine_private *rp = netdev_priv(dev);
663  void __iomem *ioaddr = rp->base;
664  int i;
665 
666  outb(0x20, pioaddr + MACRegEEcsr);
667  for (i = 0; i < 1024; i++) {
668  if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
669  break;
670  }
671  if (i > 512)
672  pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
673 
674 #ifdef USE_MMIO
675  /*
676  * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
677  * MMIO. If reloading EEPROM was done first this could be avoided, but
678  * it is not known if that still works with the "win98-reboot" problem.
679  */
680  enable_mmio(pioaddr, rp->quirks);
681 #endif
682 
683  /* Turn off EEPROM-controlled wake-up (magic packet) */
684  if (rp->quirks & rqWOL)
685  iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
686 
687 }
688 
689 #ifdef CONFIG_NET_POLL_CONTROLLER
690 static void rhine_poll(struct net_device *dev)
691 {
692  struct rhine_private *rp = netdev_priv(dev);
693  const int irq = rp->pdev->irq;
694 
695  disable_irq(irq);
696  rhine_interrupt(irq, dev);
697  enable_irq(irq);
698 }
699 #endif
700 
701 static void rhine_kick_tx_threshold(struct rhine_private *rp)
702 {
703  if (rp->tx_thresh < 0xe0) {
704  void __iomem *ioaddr = rp->base;
705 
706  rp->tx_thresh += 0x20;
707  BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
708  }
709 }
710 
711 static void rhine_tx_err(struct rhine_private *rp, u32 status)
712 {
713  struct net_device *dev = rp->dev;
714 
715  if (status & IntrTxAborted) {
716  netif_info(rp, tx_err, dev,
717  "Abort %08x, frame dropped\n", status);
718  }
719 
720  if (status & IntrTxUnderrun) {
721  rhine_kick_tx_threshold(rp);
722  netif_info(rp, tx_err ,dev, "Transmitter underrun, "
723  "Tx threshold now %02x\n", rp->tx_thresh);
724  }
725 
726  if (status & IntrTxDescRace)
727  netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
728 
729  if ((status & IntrTxError) &&
730  (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
731  rhine_kick_tx_threshold(rp);
732  netif_info(rp, tx_err, dev, "Unspecified error. "
733  "Tx threshold now %02x\n", rp->tx_thresh);
734  }
735 
736  rhine_restart_tx(dev);
737 }
738 
739 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
740 {
741  void __iomem *ioaddr = rp->base;
742  struct net_device_stats *stats = &rp->dev->stats;
743 
744  stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
745  stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
746 
747  /*
748  * Clears the "tally counters" for CRC errors and missed frames(?).
749  * It has been reported that some chips need a write of 0 to clear
750  * these, for others the counters are set to 1 when written to and
751  * instead cleared when read. So we clear them both ways ...
752  */
753  iowrite32(0, ioaddr + RxMissed);
754  ioread16(ioaddr + RxCRCErrs);
755  ioread16(ioaddr + RxMissed);
756 }
757 
758 #define RHINE_EVENT_NAPI_RX (IntrRxDone | \
759  IntrRxErr | \
760  IntrRxEmpty | \
761  IntrRxOverflow | \
762  IntrRxDropped | \
763  IntrRxNoBuf | \
764  IntrRxWakeUp)
765 
766 #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
767  IntrTxAborted | \
768  IntrTxUnderrun | \
769  IntrTxDescRace)
770 #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
771 
772 #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
773  RHINE_EVENT_NAPI_TX | \
774  IntrStatsMax)
775 #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
776 #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
777 
778 static int rhine_napipoll(struct napi_struct *napi, int budget)
779 {
780  struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
781  struct net_device *dev = rp->dev;
782  void __iomem *ioaddr = rp->base;
783  u16 enable_mask = RHINE_EVENT & 0xffff;
784  int work_done = 0;
785  u32 status;
786 
787  status = rhine_get_events(rp);
788  rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
789 
790  if (status & RHINE_EVENT_NAPI_RX)
791  work_done += rhine_rx(dev, budget);
792 
793  if (status & RHINE_EVENT_NAPI_TX) {
794  if (status & RHINE_EVENT_NAPI_TX_ERR) {
795  /* Avoid scavenging before Tx engine turned off */
796  rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
797  if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
798  netif_warn(rp, tx_err, dev, "Tx still on\n");
799  }
800 
801  rhine_tx(dev);
802 
803  if (status & RHINE_EVENT_NAPI_TX_ERR)
804  rhine_tx_err(rp, status);
805  }
806 
807  if (status & IntrStatsMax) {
808  spin_lock(&rp->lock);
809  rhine_update_rx_crc_and_missed_errord(rp);
810  spin_unlock(&rp->lock);
811  }
812 
813  if (status & RHINE_EVENT_SLOW) {
814  enable_mask &= ~RHINE_EVENT_SLOW;
816  }
817 
818  if (work_done < budget) {
819  napi_complete(napi);
820  iowrite16(enable_mask, ioaddr + IntrEnable);
821  mmiowb();
822  }
823  return work_done;
824 }
825 
826 static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
827 {
828  struct rhine_private *rp = netdev_priv(dev);
829 
830  /* Reset the chip to erase previous misconfiguration. */
831  rhine_chip_reset(dev);
832 
833  /* Rhine-I needs extra time to recuperate before EEPROM reload */
834  if (rp->quirks & rqRhineI)
835  msleep(5);
836 
837  /* Reload EEPROM controlled bytes cleared by soft reset */
838  rhine_reload_eeprom(pioaddr, dev);
839 }
840 
841 static const struct net_device_ops rhine_netdev_ops = {
842  .ndo_open = rhine_open,
843  .ndo_stop = rhine_close,
844  .ndo_start_xmit = rhine_start_tx,
845  .ndo_get_stats = rhine_get_stats,
846  .ndo_set_rx_mode = rhine_set_rx_mode,
847  .ndo_change_mtu = eth_change_mtu,
848  .ndo_validate_addr = eth_validate_addr,
849  .ndo_set_mac_address = eth_mac_addr,
850  .ndo_do_ioctl = netdev_ioctl,
851  .ndo_tx_timeout = rhine_tx_timeout,
852  .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
853  .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
854 #ifdef CONFIG_NET_POLL_CONTROLLER
855  .ndo_poll_controller = rhine_poll,
856 #endif
857 };
858 
859 static int __devinit rhine_init_one(struct pci_dev *pdev,
860  const struct pci_device_id *ent)
861 {
862  struct net_device *dev;
863  struct rhine_private *rp;
864  int i, rc;
865  u32 quirks;
866  long pioaddr;
867  long memaddr;
868  void __iomem *ioaddr;
869  int io_size, phy_id;
870  const char *name;
871 #ifdef USE_MMIO
872  int bar = 1;
873 #else
874  int bar = 0;
875 #endif
876 
877 /* when built into the kernel, we only print version if device is found */
878 #ifndef MODULE
879  pr_info_once("%s\n", version);
880 #endif
881 
882  io_size = 256;
883  phy_id = 0;
884  quirks = 0;
885  name = "Rhine";
886  if (pdev->revision < VTunknown0) {
887  quirks = rqRhineI;
888  io_size = 128;
889  }
890  else if (pdev->revision >= VT6102) {
891  quirks = rqWOL | rqForceReset;
892  if (pdev->revision < VT6105) {
893  name = "Rhine II";
894  quirks |= rqStatusWBRace; /* Rhine-II exclusive */
895  }
896  else {
897  phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
898  if (pdev->revision >= VT6105_B0)
899  quirks |= rq6patterns;
900  if (pdev->revision < VT6105M)
901  name = "Rhine III";
902  else
903  name = "Rhine III (Management Adapter)";
904  }
905  }
906 
907  rc = pci_enable_device(pdev);
908  if (rc)
909  goto err_out;
910 
911  /* this should always be supported */
912  rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
913  if (rc) {
914  dev_err(&pdev->dev,
915  "32-bit PCI DMA addresses not supported by the card!?\n");
916  goto err_out;
917  }
918 
919  /* sanity check */
920  if ((pci_resource_len(pdev, 0) < io_size) ||
921  (pci_resource_len(pdev, 1) < io_size)) {
922  rc = -EIO;
923  dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
924  goto err_out;
925  }
926 
927  pioaddr = pci_resource_start(pdev, 0);
928  memaddr = pci_resource_start(pdev, 1);
929 
930  pci_set_master(pdev);
931 
932  dev = alloc_etherdev(sizeof(struct rhine_private));
933  if (!dev) {
934  rc = -ENOMEM;
935  goto err_out;
936  }
937  SET_NETDEV_DEV(dev, &pdev->dev);
938 
939  rp = netdev_priv(dev);
940  rp->dev = dev;
941  rp->quirks = quirks;
942  rp->pioaddr = pioaddr;
943  rp->pdev = pdev;
944  rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
945 
946  rc = pci_request_regions(pdev, DRV_NAME);
947  if (rc)
948  goto err_out_free_netdev;
949 
950  ioaddr = pci_iomap(pdev, bar, io_size);
951  if (!ioaddr) {
952  rc = -EIO;
953  dev_err(&pdev->dev,
954  "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
955  pci_name(pdev), io_size, memaddr);
956  goto err_out_free_res;
957  }
958 
959 #ifdef USE_MMIO
960  enable_mmio(pioaddr, quirks);
961 
962  /* Check that selected MMIO registers match the PIO ones */
963  i = 0;
964  while (mmio_verify_registers[i]) {
965  int reg = mmio_verify_registers[i++];
966  unsigned char a = inb(pioaddr+reg);
967  unsigned char b = readb(ioaddr+reg);
968  if (a != b) {
969  rc = -EIO;
970  dev_err(&pdev->dev,
971  "MMIO do not match PIO [%02x] (%02x != %02x)\n",
972  reg, a, b);
973  goto err_out_unmap;
974  }
975  }
976 #endif /* USE_MMIO */
977 
978  rp->base = ioaddr;
979 
980  /* Get chip registers into a sane state */
981  rhine_power_init(dev);
982  rhine_hw_init(dev, pioaddr);
983 
984  for (i = 0; i < 6; i++)
985  dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
986 
987  if (!is_valid_ether_addr(dev->dev_addr)) {
988  /* Report it and use a random ethernet address instead */
989  netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
990  eth_hw_addr_random(dev);
991  netdev_info(dev, "Using random MAC address: %pM\n",
992  dev->dev_addr);
993  }
994  memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
995 
996  /* For Rhine-I/II, phy_id is loaded from EEPROM */
997  if (!phy_id)
998  phy_id = ioread8(ioaddr + 0x6C);
999 
1000  spin_lock_init(&rp->lock);
1001  mutex_init(&rp->task_lock);
1002  INIT_WORK(&rp->reset_task, rhine_reset_task);
1003  INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1004 
1005  rp->mii_if.dev = dev;
1006  rp->mii_if.mdio_read = mdio_read;
1007  rp->mii_if.mdio_write = mdio_write;
1008  rp->mii_if.phy_id_mask = 0x1f;
1009  rp->mii_if.reg_num_mask = 0x1f;
1010 
1011  /* The chip-specific entries in the device structure. */
1012  dev->netdev_ops = &rhine_netdev_ops;
1013  dev->ethtool_ops = &netdev_ethtool_ops,
1014  dev->watchdog_timeo = TX_TIMEOUT;
1015 
1016  netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1017 
1018  if (rp->quirks & rqRhineI)
1020 
1021  if (pdev->revision >= VT6105M)
1024 
1025  /* dev->name not defined before register_netdev()! */
1026  rc = register_netdev(dev);
1027  if (rc)
1028  goto err_out_unmap;
1029 
1030  netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1031  name,
1032 #ifdef USE_MMIO
1033  memaddr,
1034 #else
1035  (long)ioaddr,
1036 #endif
1037  dev->dev_addr, pdev->irq);
1038 
1039  pci_set_drvdata(pdev, dev);
1040 
1041  {
1042  u16 mii_cmd;
1043  int mii_status = mdio_read(dev, phy_id, 1);
1044  mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1045  mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1046  if (mii_status != 0xffff && mii_status != 0x0000) {
1047  rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1048  netdev_info(dev,
1049  "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1050  phy_id,
1051  mii_status, rp->mii_if.advertising,
1052  mdio_read(dev, phy_id, 5));
1053 
1054  /* set IFF_RUNNING */
1055  if (mii_status & BMSR_LSTATUS)
1056  netif_carrier_on(dev);
1057  else
1058  netif_carrier_off(dev);
1059 
1060  }
1061  }
1062  rp->mii_if.phy_id = phy_id;
1063  if (avoid_D3)
1064  netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1065 
1066  return 0;
1067 
1068 err_out_unmap:
1069  pci_iounmap(pdev, ioaddr);
1070 err_out_free_res:
1071  pci_release_regions(pdev);
1072 err_out_free_netdev:
1073  free_netdev(dev);
1074 err_out:
1075  return rc;
1076 }
1077 
1078 static int alloc_ring(struct net_device* dev)
1079 {
1080  struct rhine_private *rp = netdev_priv(dev);
1081  void *ring;
1082  dma_addr_t ring_dma;
1083 
1084  ring = pci_alloc_consistent(rp->pdev,
1085  RX_RING_SIZE * sizeof(struct rx_desc) +
1086  TX_RING_SIZE * sizeof(struct tx_desc),
1087  &ring_dma);
1088  if (!ring) {
1089  netdev_err(dev, "Could not allocate DMA memory\n");
1090  return -ENOMEM;
1091  }
1092  if (rp->quirks & rqRhineI) {
1093  rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1095  &rp->tx_bufs_dma);
1096  if (rp->tx_bufs == NULL) {
1098  RX_RING_SIZE * sizeof(struct rx_desc) +
1099  TX_RING_SIZE * sizeof(struct tx_desc),
1100  ring, ring_dma);
1101  return -ENOMEM;
1102  }
1103  }
1104 
1105  rp->rx_ring = ring;
1106  rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1107  rp->rx_ring_dma = ring_dma;
1108  rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1109 
1110  return 0;
1111 }
1112 
1113 static void free_ring(struct net_device* dev)
1114 {
1115  struct rhine_private *rp = netdev_priv(dev);
1116 
1118  RX_RING_SIZE * sizeof(struct rx_desc) +
1119  TX_RING_SIZE * sizeof(struct tx_desc),
1120  rp->rx_ring, rp->rx_ring_dma);
1121  rp->tx_ring = NULL;
1122 
1123  if (rp->tx_bufs)
1125  rp->tx_bufs, rp->tx_bufs_dma);
1126 
1127  rp->tx_bufs = NULL;
1128 
1129 }
1130 
1131 static void alloc_rbufs(struct net_device *dev)
1132 {
1133  struct rhine_private *rp = netdev_priv(dev);
1134  dma_addr_t next;
1135  int i;
1136 
1137  rp->dirty_rx = rp->cur_rx = 0;
1138 
1139  rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1140  rp->rx_head_desc = &rp->rx_ring[0];
1141  next = rp->rx_ring_dma;
1142 
1143  /* Init the ring entries */
1144  for (i = 0; i < RX_RING_SIZE; i++) {
1145  rp->rx_ring[i].rx_status = 0;
1146  rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1147  next += sizeof(struct rx_desc);
1148  rp->rx_ring[i].next_desc = cpu_to_le32(next);
1149  rp->rx_skbuff[i] = NULL;
1150  }
1151  /* Mark the last entry as wrapping the ring. */
1152  rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1153 
1154  /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1155  for (i = 0; i < RX_RING_SIZE; i++) {
1156  struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1157  rp->rx_skbuff[i] = skb;
1158  if (skb == NULL)
1159  break;
1160 
1161  rp->rx_skbuff_dma[i] =
1162  pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1164 
1165  rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1166  rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1167  }
1168  rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1169 }
1170 
1171 static void free_rbufs(struct net_device* dev)
1172 {
1173  struct rhine_private *rp = netdev_priv(dev);
1174  int i;
1175 
1176  /* Free all the skbuffs in the Rx queue. */
1177  for (i = 0; i < RX_RING_SIZE; i++) {
1178  rp->rx_ring[i].rx_status = 0;
1179  rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1180  if (rp->rx_skbuff[i]) {
1181  pci_unmap_single(rp->pdev,
1182  rp->rx_skbuff_dma[i],
1184  dev_kfree_skb(rp->rx_skbuff[i]);
1185  }
1186  rp->rx_skbuff[i] = NULL;
1187  }
1188 }
1189 
1190 static void alloc_tbufs(struct net_device* dev)
1191 {
1192  struct rhine_private *rp = netdev_priv(dev);
1193  dma_addr_t next;
1194  int i;
1195 
1196  rp->dirty_tx = rp->cur_tx = 0;
1197  next = rp->tx_ring_dma;
1198  for (i = 0; i < TX_RING_SIZE; i++) {
1199  rp->tx_skbuff[i] = NULL;
1200  rp->tx_ring[i].tx_status = 0;
1201  rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1202  next += sizeof(struct tx_desc);
1203  rp->tx_ring[i].next_desc = cpu_to_le32(next);
1204  if (rp->quirks & rqRhineI)
1205  rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1206  }
1207  rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1208 
1209 }
1210 
1211 static void free_tbufs(struct net_device* dev)
1212 {
1213  struct rhine_private *rp = netdev_priv(dev);
1214  int i;
1215 
1216  for (i = 0; i < TX_RING_SIZE; i++) {
1217  rp->tx_ring[i].tx_status = 0;
1218  rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1219  rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1220  if (rp->tx_skbuff[i]) {
1221  if (rp->tx_skbuff_dma[i]) {
1222  pci_unmap_single(rp->pdev,
1223  rp->tx_skbuff_dma[i],
1224  rp->tx_skbuff[i]->len,
1226  }
1227  dev_kfree_skb(rp->tx_skbuff[i]);
1228  }
1229  rp->tx_skbuff[i] = NULL;
1230  rp->tx_buf[i] = NULL;
1231  }
1232 }
1233 
1234 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1235 {
1236  struct rhine_private *rp = netdev_priv(dev);
1237  void __iomem *ioaddr = rp->base;
1238 
1239  mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1240 
1241  if (rp->mii_if.full_duplex)
1242  iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1243  ioaddr + ChipCmd1);
1244  else
1245  iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1246  ioaddr + ChipCmd1);
1247 
1248  netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1249  rp->mii_if.force_media, netif_carrier_ok(dev));
1250 }
1251 
1252 /* Called after status of force_media possibly changed */
1253 static void rhine_set_carrier(struct mii_if_info *mii)
1254 {
1255  struct net_device *dev = mii->dev;
1256  struct rhine_private *rp = netdev_priv(dev);
1257 
1258  if (mii->force_media) {
1259  /* autoneg is off: Link is always assumed to be up */
1260  if (!netif_carrier_ok(dev))
1261  netif_carrier_on(dev);
1262  } else /* Let MMI library update carrier status */
1263  rhine_check_media(dev, 0);
1264 
1265  netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1266  mii->force_media, netif_carrier_ok(dev));
1267 }
1268 
1277 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1278 {
1279  int i;
1280 
1281  iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1282  wmb();
1283 
1284  /* Paranoid -- idx out of range should never happen */
1285  idx &= (MCAM_SIZE - 1);
1286 
1287  iowrite8((u8) idx, ioaddr + CamAddr);
1288 
1289  for (i = 0; i < 6; i++, addr++)
1290  iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1291  udelay(10);
1292  wmb();
1293 
1294  iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1295  udelay(10);
1296 
1297  iowrite8(0, ioaddr + CamCon);
1298 }
1299 
1308 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1309 {
1310  iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1311  wmb();
1312 
1313  /* Paranoid -- idx out of range should never happen */
1314  idx &= (VCAM_SIZE - 1);
1315 
1316  iowrite8((u8) idx, ioaddr + CamAddr);
1317 
1318  iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1319  udelay(10);
1320  wmb();
1321 
1322  iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1323  udelay(10);
1324 
1325  iowrite8(0, ioaddr + CamCon);
1326 }
1327 
1335 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1336 {
1337  iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1338  wmb();
1339 
1340  /* write mask */
1341  iowrite32(mask, ioaddr + CamMask);
1342 
1343  /* disable CAMEN */
1344  iowrite8(0, ioaddr + CamCon);
1345 }
1346 
1354 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1355 {
1356  iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1357  wmb();
1358 
1359  /* write mask */
1360  iowrite32(mask, ioaddr + CamMask);
1361 
1362  /* disable CAMEN */
1363  iowrite8(0, ioaddr + CamCon);
1364 }
1365 
1373 static void rhine_init_cam_filter(struct net_device *dev)
1374 {
1375  struct rhine_private *rp = netdev_priv(dev);
1376  void __iomem *ioaddr = rp->base;
1377 
1378  /* Disable all CAMs */
1379  rhine_set_vlan_cam_mask(ioaddr, 0);
1380  rhine_set_cam_mask(ioaddr, 0);
1381 
1382  /* disable hardware VLAN support */
1383  BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1385 }
1386 
1393 static void rhine_update_vcam(struct net_device *dev)
1394 {
1395  struct rhine_private *rp = netdev_priv(dev);
1396  void __iomem *ioaddr = rp->base;
1397  u16 vid;
1398  u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1399  unsigned int i = 0;
1400 
1402  rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1403  vCAMmask |= 1 << i;
1404  if (++i >= VCAM_SIZE)
1405  break;
1406  }
1407  rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1408 }
1409 
1410 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1411 {
1412  struct rhine_private *rp = netdev_priv(dev);
1413 
1414  spin_lock_bh(&rp->lock);
1415  set_bit(vid, rp->active_vlans);
1416  rhine_update_vcam(dev);
1417  spin_unlock_bh(&rp->lock);
1418  return 0;
1419 }
1420 
1421 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1422 {
1423  struct rhine_private *rp = netdev_priv(dev);
1424 
1425  spin_lock_bh(&rp->lock);
1426  clear_bit(vid, rp->active_vlans);
1427  rhine_update_vcam(dev);
1428  spin_unlock_bh(&rp->lock);
1429  return 0;
1430 }
1431 
1432 static void init_registers(struct net_device *dev)
1433 {
1434  struct rhine_private *rp = netdev_priv(dev);
1435  void __iomem *ioaddr = rp->base;
1436  int i;
1437 
1438  for (i = 0; i < 6; i++)
1439  iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1440 
1441  /* Initialize other registers. */
1442  iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1443  /* Configure initial FIFO thresholds. */
1444  iowrite8(0x20, ioaddr + TxConfig);
1445  rp->tx_thresh = 0x20;
1446  rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1447 
1448  iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1449  iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1450 
1451  rhine_set_rx_mode(dev);
1452 
1453  if (rp->pdev->revision >= VT6105M)
1454  rhine_init_cam_filter(dev);
1455 
1456  napi_enable(&rp->napi);
1457 
1458  iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1459 
1461  ioaddr + ChipCmd);
1462  rhine_check_media(dev, 1);
1463 }
1464 
1465 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1466 static void rhine_enable_linkmon(struct rhine_private *rp)
1467 {
1468  void __iomem *ioaddr = rp->base;
1469 
1470  iowrite8(0, ioaddr + MIICmd);
1471  iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1472  iowrite8(0x80, ioaddr + MIICmd);
1473 
1474  rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1475 
1476  iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1477 }
1478 
1479 /* Disable MII link status auto-polling (required for MDIO access) */
1480 static void rhine_disable_linkmon(struct rhine_private *rp)
1481 {
1482  void __iomem *ioaddr = rp->base;
1483 
1484  iowrite8(0, ioaddr + MIICmd);
1485 
1486  if (rp->quirks & rqRhineI) {
1487  iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1488 
1489  /* Can be called from ISR. Evil. */
1490  mdelay(1);
1491 
1492  /* 0x80 must be set immediately before turning it off */
1493  iowrite8(0x80, ioaddr + MIICmd);
1494 
1495  rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1496 
1497  /* Heh. Now clear 0x80 again. */
1498  iowrite8(0, ioaddr + MIICmd);
1499  }
1500  else
1501  rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1502 }
1503 
1504 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1505 
1506 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1507 {
1508  struct rhine_private *rp = netdev_priv(dev);
1509  void __iomem *ioaddr = rp->base;
1510  int result;
1511 
1512  rhine_disable_linkmon(rp);
1513 
1514  /* rhine_disable_linkmon already cleared MIICmd */
1515  iowrite8(phy_id, ioaddr + MIIPhyAddr);
1516  iowrite8(regnum, ioaddr + MIIRegAddr);
1517  iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1518  rhine_wait_bit_low(rp, MIICmd, 0x40);
1519  result = ioread16(ioaddr + MIIData);
1520 
1521  rhine_enable_linkmon(rp);
1522  return result;
1523 }
1524 
1525 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1526 {
1527  struct rhine_private *rp = netdev_priv(dev);
1528  void __iomem *ioaddr = rp->base;
1529 
1530  rhine_disable_linkmon(rp);
1531 
1532  /* rhine_disable_linkmon already cleared MIICmd */
1533  iowrite8(phy_id, ioaddr + MIIPhyAddr);
1534  iowrite8(regnum, ioaddr + MIIRegAddr);
1535  iowrite16(value, ioaddr + MIIData);
1536  iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1537  rhine_wait_bit_low(rp, MIICmd, 0x20);
1538 
1539  rhine_enable_linkmon(rp);
1540 }
1541 
1542 static void rhine_task_disable(struct rhine_private *rp)
1543 {
1544  mutex_lock(&rp->task_lock);
1545  rp->task_enable = false;
1546  mutex_unlock(&rp->task_lock);
1547 
1550 }
1551 
1552 static void rhine_task_enable(struct rhine_private *rp)
1553 {
1554  mutex_lock(&rp->task_lock);
1555  rp->task_enable = true;
1556  mutex_unlock(&rp->task_lock);
1557 }
1558 
1559 static int rhine_open(struct net_device *dev)
1560 {
1561  struct rhine_private *rp = netdev_priv(dev);
1562  void __iomem *ioaddr = rp->base;
1563  int rc;
1564 
1565  rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1566  dev);
1567  if (rc)
1568  return rc;
1569 
1570  netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1571 
1572  rc = alloc_ring(dev);
1573  if (rc) {
1574  free_irq(rp->pdev->irq, dev);
1575  return rc;
1576  }
1577  alloc_rbufs(dev);
1578  alloc_tbufs(dev);
1579  rhine_chip_reset(dev);
1580  rhine_task_enable(rp);
1581  init_registers(dev);
1582 
1583  netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1584  __func__, ioread16(ioaddr + ChipCmd),
1585  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1586 
1587  netif_start_queue(dev);
1588 
1589  return 0;
1590 }
1591 
1592 static void rhine_reset_task(struct work_struct *work)
1593 {
1594  struct rhine_private *rp = container_of(work, struct rhine_private,
1595  reset_task);
1596  struct net_device *dev = rp->dev;
1597 
1598  mutex_lock(&rp->task_lock);
1599 
1600  if (!rp->task_enable)
1601  goto out_unlock;
1602 
1603  napi_disable(&rp->napi);
1604  spin_lock_bh(&rp->lock);
1605 
1606  /* clear all descriptors */
1607  free_tbufs(dev);
1608  free_rbufs(dev);
1609  alloc_tbufs(dev);
1610  alloc_rbufs(dev);
1611 
1612  /* Reinitialize the hardware. */
1613  rhine_chip_reset(dev);
1614  init_registers(dev);
1615 
1616  spin_unlock_bh(&rp->lock);
1617 
1618  dev->trans_start = jiffies; /* prevent tx timeout */
1619  dev->stats.tx_errors++;
1620  netif_wake_queue(dev);
1621 
1622 out_unlock:
1623  mutex_unlock(&rp->task_lock);
1624 }
1625 
1626 static void rhine_tx_timeout(struct net_device *dev)
1627 {
1628  struct rhine_private *rp = netdev_priv(dev);
1629  void __iomem *ioaddr = rp->base;
1630 
1631  netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1632  ioread16(ioaddr + IntrStatus),
1633  mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1634 
1635  schedule_work(&rp->reset_task);
1636 }
1637 
1638 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1639  struct net_device *dev)
1640 {
1641  struct rhine_private *rp = netdev_priv(dev);
1642  void __iomem *ioaddr = rp->base;
1643  unsigned entry;
1644 
1645  /* Caution: the write order is important here, set the field
1646  with the "ownership" bits last. */
1647 
1648  /* Calculate the next Tx descriptor entry. */
1649  entry = rp->cur_tx % TX_RING_SIZE;
1650 
1651  if (skb_padto(skb, ETH_ZLEN))
1652  return NETDEV_TX_OK;
1653 
1654  rp->tx_skbuff[entry] = skb;
1655 
1656  if ((rp->quirks & rqRhineI) &&
1657  (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1658  /* Must use alignment buffer. */
1659  if (skb->len > PKT_BUF_SZ) {
1660  /* packet too long, drop it */
1661  dev_kfree_skb(skb);
1662  rp->tx_skbuff[entry] = NULL;
1663  dev->stats.tx_dropped++;
1664  return NETDEV_TX_OK;
1665  }
1666 
1667  /* Padding is not copied and so must be redone. */
1668  skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1669  if (skb->len < ETH_ZLEN)
1670  memset(rp->tx_buf[entry] + skb->len, 0,
1671  ETH_ZLEN - skb->len);
1672  rp->tx_skbuff_dma[entry] = 0;
1673  rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1674  (rp->tx_buf[entry] -
1675  rp->tx_bufs));
1676  } else {
1677  rp->tx_skbuff_dma[entry] =
1678  pci_map_single(rp->pdev, skb->data, skb->len,
1680  rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1681  }
1682 
1683  rp->tx_ring[entry].desc_length =
1684  cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1685 
1686  if (unlikely(vlan_tx_tag_present(skb))) {
1687  rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1688  /* request tagging */
1689  rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1690  }
1691  else
1692  rp->tx_ring[entry].tx_status = 0;
1693 
1694  /* lock eth irq */
1695  wmb();
1696  rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1697  wmb();
1698 
1699  rp->cur_tx++;
1700 
1701  /* Non-x86 Todo: explicitly flush cache lines here. */
1702 
1703  if (vlan_tx_tag_present(skb))
1704  /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1705  BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1706 
1707  /* Wake the potentially-idle transmit channel */
1708  iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1709  ioaddr + ChipCmd1);
1710  IOSYNC;
1711 
1712  if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1713  netif_stop_queue(dev);
1714 
1715  netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1716  rp->cur_tx - 1, entry);
1717 
1718  return NETDEV_TX_OK;
1719 }
1720 
1721 static void rhine_irq_disable(struct rhine_private *rp)
1722 {
1723  iowrite16(0x0000, rp->base + IntrEnable);
1724  mmiowb();
1725 }
1726 
1727 /* The interrupt handler does all of the Rx thread work and cleans up
1728  after the Tx thread. */
1729 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1730 {
1731  struct net_device *dev = dev_instance;
1732  struct rhine_private *rp = netdev_priv(dev);
1733  u32 status;
1734  int handled = 0;
1735 
1736  status = rhine_get_events(rp);
1737 
1738  netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1739 
1740  if (status & RHINE_EVENT) {
1741  handled = 1;
1742 
1743  rhine_irq_disable(rp);
1744  napi_schedule(&rp->napi);
1745  }
1746 
1747  if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1748  netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1749  status);
1750  }
1751 
1752  return IRQ_RETVAL(handled);
1753 }
1754 
1755 /* This routine is logically part of the interrupt handler, but isolated
1756  for clarity. */
1757 static void rhine_tx(struct net_device *dev)
1758 {
1759  struct rhine_private *rp = netdev_priv(dev);
1760  int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1761 
1762  /* find and cleanup dirty tx descriptors */
1763  while (rp->dirty_tx != rp->cur_tx) {
1764  txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1765  netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1766  entry, txstatus);
1767  if (txstatus & DescOwn)
1768  break;
1769  if (txstatus & 0x8000) {
1770  netif_dbg(rp, tx_done, dev,
1771  "Transmit error, Tx status %08x\n", txstatus);
1772  dev->stats.tx_errors++;
1773  if (txstatus & 0x0400)
1774  dev->stats.tx_carrier_errors++;
1775  if (txstatus & 0x0200)
1776  dev->stats.tx_window_errors++;
1777  if (txstatus & 0x0100)
1778  dev->stats.tx_aborted_errors++;
1779  if (txstatus & 0x0080)
1780  dev->stats.tx_heartbeat_errors++;
1781  if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1782  (txstatus & 0x0800) || (txstatus & 0x1000)) {
1783  dev->stats.tx_fifo_errors++;
1784  rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1785  break; /* Keep the skb - we try again */
1786  }
1787  /* Transmitter restarted in 'abnormal' handler. */
1788  } else {
1789  if (rp->quirks & rqRhineI)
1790  dev->stats.collisions += (txstatus >> 3) & 0x0F;
1791  else
1792  dev->stats.collisions += txstatus & 0x0F;
1793  netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1794  (txstatus >> 3) & 0xF, txstatus & 0xF);
1795  dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1796  dev->stats.tx_packets++;
1797  }
1798  /* Free the original skb. */
1799  if (rp->tx_skbuff_dma[entry]) {
1800  pci_unmap_single(rp->pdev,
1801  rp->tx_skbuff_dma[entry],
1802  rp->tx_skbuff[entry]->len,
1804  }
1805  dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1806  rp->tx_skbuff[entry] = NULL;
1807  entry = (++rp->dirty_tx) % TX_RING_SIZE;
1808  }
1809  if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1810  netif_wake_queue(dev);
1811 }
1812 
1822 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1823 {
1824  u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1825  return be16_to_cpup((__be16 *)trailer);
1826 }
1827 
1828 /* Process up to limit frames from receive ring */
1829 static int rhine_rx(struct net_device *dev, int limit)
1830 {
1831  struct rhine_private *rp = netdev_priv(dev);
1832  int count;
1833  int entry = rp->cur_rx % RX_RING_SIZE;
1834 
1835  netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1836  entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1837 
1838  /* If EOP is set on the next entry, it's a new packet. Send it up. */
1839  for (count = 0; count < limit; ++count) {
1840  struct rx_desc *desc = rp->rx_head_desc;
1843  int data_size = desc_status >> 16;
1844 
1845  if (desc_status & DescOwn)
1846  break;
1847 
1848  netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1849  desc_status);
1850 
1851  if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1852  if ((desc_status & RxWholePkt) != RxWholePkt) {
1853  netdev_warn(dev,
1854  "Oversized Ethernet frame spanned multiple buffers, "
1855  "entry %#x length %d status %08x!\n",
1856  entry, data_size,
1857  desc_status);
1858  netdev_warn(dev,
1859  "Oversized Ethernet frame %p vs %p\n",
1860  rp->rx_head_desc,
1861  &rp->rx_ring[entry]);
1862  dev->stats.rx_length_errors++;
1863  } else if (desc_status & RxErr) {
1864  /* There was a error. */
1865  netif_dbg(rp, rx_err, dev,
1866  "%s() Rx error %08x\n", __func__,
1867  desc_status);
1868  dev->stats.rx_errors++;
1869  if (desc_status & 0x0030)
1870  dev->stats.rx_length_errors++;
1871  if (desc_status & 0x0048)
1872  dev->stats.rx_fifo_errors++;
1873  if (desc_status & 0x0004)
1874  dev->stats.rx_frame_errors++;
1875  if (desc_status & 0x0002) {
1876  /* this can also be updated outside the interrupt handler */
1877  spin_lock(&rp->lock);
1878  dev->stats.rx_crc_errors++;
1879  spin_unlock(&rp->lock);
1880  }
1881  }
1882  } else {
1883  struct sk_buff *skb = NULL;
1884  /* Length should omit the CRC */
1885  int pkt_len = data_size - 4;
1886  u16 vlan_tci = 0;
1887 
1888  /* Check if the packet is long enough to accept without
1889  copying to a minimally-sized skbuff. */
1890  if (pkt_len < rx_copybreak)
1891  skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1892  if (skb) {
1893  pci_dma_sync_single_for_cpu(rp->pdev,
1894  rp->rx_skbuff_dma[entry],
1895  rp->rx_buf_sz,
1897 
1898  skb_copy_to_linear_data(skb,
1899  rp->rx_skbuff[entry]->data,
1900  pkt_len);
1901  skb_put(skb, pkt_len);
1902  pci_dma_sync_single_for_device(rp->pdev,
1903  rp->rx_skbuff_dma[entry],
1904  rp->rx_buf_sz,
1906  } else {
1907  skb = rp->rx_skbuff[entry];
1908  if (skb == NULL) {
1909  netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1910  break;
1911  }
1912  rp->rx_skbuff[entry] = NULL;
1913  skb_put(skb, pkt_len);
1914  pci_unmap_single(rp->pdev,
1915  rp->rx_skbuff_dma[entry],
1916  rp->rx_buf_sz,
1918  }
1919 
1920  if (unlikely(desc_length & DescTag))
1921  vlan_tci = rhine_get_vlan_tci(skb, data_size);
1922 
1923  skb->protocol = eth_type_trans(skb, dev);
1924 
1925  if (unlikely(desc_length & DescTag))
1926  __vlan_hwaccel_put_tag(skb, vlan_tci);
1927  netif_receive_skb(skb);
1928  dev->stats.rx_bytes += pkt_len;
1929  dev->stats.rx_packets++;
1930  }
1931  entry = (++rp->cur_rx) % RX_RING_SIZE;
1932  rp->rx_head_desc = &rp->rx_ring[entry];
1933  }
1934 
1935  /* Refill the Rx ring buffers. */
1936  for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1937  struct sk_buff *skb;
1938  entry = rp->dirty_rx % RX_RING_SIZE;
1939  if (rp->rx_skbuff[entry] == NULL) {
1940  skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1941  rp->rx_skbuff[entry] = skb;
1942  if (skb == NULL)
1943  break; /* Better luck next round. */
1944  rp->rx_skbuff_dma[entry] =
1945  pci_map_single(rp->pdev, skb->data,
1946  rp->rx_buf_sz,
1948  rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1949  }
1950  rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1951  }
1952 
1953  return count;
1954 }
1955 
1956 static void rhine_restart_tx(struct net_device *dev) {
1957  struct rhine_private *rp = netdev_priv(dev);
1958  void __iomem *ioaddr = rp->base;
1959  int entry = rp->dirty_tx % TX_RING_SIZE;
1960  u32 intr_status;
1961 
1962  /*
1963  * If new errors occurred, we need to sort them out before doing Tx.
1964  * In that case the ISR will be back here RSN anyway.
1965  */
1966  intr_status = rhine_get_events(rp);
1967 
1968  if ((intr_status & IntrTxErrSummary) == 0) {
1969 
1970  /* We know better than the chip where it should continue. */
1971  iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1972  ioaddr + TxRingPtr);
1973 
1974  iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1975  ioaddr + ChipCmd);
1976 
1977  if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1978  /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1979  BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1980 
1981  iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1982  ioaddr + ChipCmd1);
1983  IOSYNC;
1984  }
1985  else {
1986  /* This should never happen */
1987  netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
1988  intr_status);
1989  }
1990 
1991 }
1992 
1993 static void rhine_slow_event_task(struct work_struct *work)
1994 {
1995  struct rhine_private *rp =
1997  struct net_device *dev = rp->dev;
1998  u32 intr_status;
1999 
2000  mutex_lock(&rp->task_lock);
2001 
2002  if (!rp->task_enable)
2003  goto out_unlock;
2004 
2005  intr_status = rhine_get_events(rp);
2006  rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2007 
2008  if (intr_status & IntrLinkChange)
2009  rhine_check_media(dev, 0);
2010 
2011  if (intr_status & IntrPCIErr)
2012  netif_warn(rp, hw, dev, "PCI error\n");
2013 
2014  napi_disable(&rp->napi);
2015  rhine_irq_disable(rp);
2016  /* Slow and safe. Consider __napi_schedule as a replacement ? */
2017  napi_enable(&rp->napi);
2018  napi_schedule(&rp->napi);
2019 
2020 out_unlock:
2021  mutex_unlock(&rp->task_lock);
2022 }
2023 
2024 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
2025 {
2026  struct rhine_private *rp = netdev_priv(dev);
2027 
2028  spin_lock_bh(&rp->lock);
2029  rhine_update_rx_crc_and_missed_errord(rp);
2030  spin_unlock_bh(&rp->lock);
2031 
2032  return &dev->stats;
2033 }
2034 
2035 static void rhine_set_rx_mode(struct net_device *dev)
2036 {
2037  struct rhine_private *rp = netdev_priv(dev);
2038  void __iomem *ioaddr = rp->base;
2039  u32 mc_filter[2]; /* Multicast hash filter */
2040  u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2041  struct netdev_hw_addr *ha;
2042 
2043  if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2044  rx_mode = 0x1C;
2045  iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2046  iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2047  } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2048  (dev->flags & IFF_ALLMULTI)) {
2049  /* Too many to match, or accept all multicasts. */
2050  iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2051  iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2052  } else if (rp->pdev->revision >= VT6105M) {
2053  int i = 0;
2054  u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2055  netdev_for_each_mc_addr(ha, dev) {
2056  if (i == MCAM_SIZE)
2057  break;
2058  rhine_set_cam(ioaddr, i, ha->addr);
2059  mCAMmask |= 1 << i;
2060  i++;
2061  }
2062  rhine_set_cam_mask(ioaddr, mCAMmask);
2063  } else {
2064  memset(mc_filter, 0, sizeof(mc_filter));
2065  netdev_for_each_mc_addr(ha, dev) {
2066  int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2067 
2068  mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2069  }
2070  iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2071  iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2072  }
2073  /* enable/disable VLAN receive filtering */
2074  if (rp->pdev->revision >= VT6105M) {
2075  if (dev->flags & IFF_PROMISC)
2077  else
2079  }
2080  BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2081 }
2082 
2083 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2084 {
2085  struct rhine_private *rp = netdev_priv(dev);
2086 
2087  strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2088  strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2089  strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2090 }
2091 
2092 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2093 {
2094  struct rhine_private *rp = netdev_priv(dev);
2095  int rc;
2096 
2097  mutex_lock(&rp->task_lock);
2098  rc = mii_ethtool_gset(&rp->mii_if, cmd);
2099  mutex_unlock(&rp->task_lock);
2100 
2101  return rc;
2102 }
2103 
2104 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2105 {
2106  struct rhine_private *rp = netdev_priv(dev);
2107  int rc;
2108 
2109  mutex_lock(&rp->task_lock);
2110  rc = mii_ethtool_sset(&rp->mii_if, cmd);
2111  rhine_set_carrier(&rp->mii_if);
2112  mutex_unlock(&rp->task_lock);
2113 
2114  return rc;
2115 }
2116 
2117 static int netdev_nway_reset(struct net_device *dev)
2118 {
2119  struct rhine_private *rp = netdev_priv(dev);
2120 
2121  return mii_nway_restart(&rp->mii_if);
2122 }
2123 
2124 static u32 netdev_get_link(struct net_device *dev)
2125 {
2126  struct rhine_private *rp = netdev_priv(dev);
2127 
2128  return mii_link_ok(&rp->mii_if);
2129 }
2130 
2131 static u32 netdev_get_msglevel(struct net_device *dev)
2132 {
2133  struct rhine_private *rp = netdev_priv(dev);
2134 
2135  return rp->msg_enable;
2136 }
2137 
2138 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2139 {
2140  struct rhine_private *rp = netdev_priv(dev);
2141 
2142  rp->msg_enable = value;
2143 }
2144 
2145 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2146 {
2147  struct rhine_private *rp = netdev_priv(dev);
2148 
2149  if (!(rp->quirks & rqWOL))
2150  return;
2151 
2152  spin_lock_irq(&rp->lock);
2153  wol->supported = WAKE_PHY | WAKE_MAGIC |
2154  WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2155  wol->wolopts = rp->wolopts;
2156  spin_unlock_irq(&rp->lock);
2157 }
2158 
2159 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2160 {
2161  struct rhine_private *rp = netdev_priv(dev);
2163  WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2164 
2165  if (!(rp->quirks & rqWOL))
2166  return -EINVAL;
2167 
2168  if (wol->wolopts & ~support)
2169  return -EINVAL;
2170 
2171  spin_lock_irq(&rp->lock);
2172  rp->wolopts = wol->wolopts;
2173  spin_unlock_irq(&rp->lock);
2174 
2175  return 0;
2176 }
2177 
2178 static const struct ethtool_ops netdev_ethtool_ops = {
2179  .get_drvinfo = netdev_get_drvinfo,
2180  .get_settings = netdev_get_settings,
2181  .set_settings = netdev_set_settings,
2182  .nway_reset = netdev_nway_reset,
2183  .get_link = netdev_get_link,
2184  .get_msglevel = netdev_get_msglevel,
2185  .set_msglevel = netdev_set_msglevel,
2186  .get_wol = rhine_get_wol,
2187  .set_wol = rhine_set_wol,
2188 };
2189 
2190 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2191 {
2192  struct rhine_private *rp = netdev_priv(dev);
2193  int rc;
2194 
2195  if (!netif_running(dev))
2196  return -EINVAL;
2197 
2198  mutex_lock(&rp->task_lock);
2199  rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2200  rhine_set_carrier(&rp->mii_if);
2201  mutex_unlock(&rp->task_lock);
2202 
2203  return rc;
2204 }
2205 
2206 static int rhine_close(struct net_device *dev)
2207 {
2208  struct rhine_private *rp = netdev_priv(dev);
2209  void __iomem *ioaddr = rp->base;
2210 
2211  rhine_task_disable(rp);
2212  napi_disable(&rp->napi);
2213  netif_stop_queue(dev);
2214 
2215  netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2216  ioread16(ioaddr + ChipCmd));
2217 
2218  /* Switch to loopback mode to avoid hardware races. */
2219  iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2220 
2221  rhine_irq_disable(rp);
2222 
2223  /* Stop the chip's Tx and Rx processes. */
2224  iowrite16(CmdStop, ioaddr + ChipCmd);
2225 
2226  free_irq(rp->pdev->irq, dev);
2227  free_rbufs(dev);
2228  free_tbufs(dev);
2229  free_ring(dev);
2230 
2231  return 0;
2232 }
2233 
2234 
2235 static void __devexit rhine_remove_one(struct pci_dev *pdev)
2236 {
2237  struct net_device *dev = pci_get_drvdata(pdev);
2238  struct rhine_private *rp = netdev_priv(dev);
2239 
2240  unregister_netdev(dev);
2241 
2242  pci_iounmap(pdev, rp->base);
2243  pci_release_regions(pdev);
2244 
2245  free_netdev(dev);
2246  pci_disable_device(pdev);
2247  pci_set_drvdata(pdev, NULL);
2248 }
2249 
2250 static void rhine_shutdown (struct pci_dev *pdev)
2251 {
2252  struct net_device *dev = pci_get_drvdata(pdev);
2253  struct rhine_private *rp = netdev_priv(dev);
2254  void __iomem *ioaddr = rp->base;
2255 
2256  if (!(rp->quirks & rqWOL))
2257  return; /* Nothing to do for non-WOL adapters */
2258 
2259  rhine_power_init(dev);
2260 
2261  /* Make sure we use pattern 0, 1 and not 4, 5 */
2262  if (rp->quirks & rq6patterns)
2263  iowrite8(0x04, ioaddr + WOLcgClr);
2264 
2265  spin_lock(&rp->lock);
2266 
2267  if (rp->wolopts & WAKE_MAGIC) {
2268  iowrite8(WOLmagic, ioaddr + WOLcrSet);
2269  /*
2270  * Turn EEPROM-controlled wake-up back on -- some hardware may
2271  * not cooperate otherwise.
2272  */
2273  iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2274  }
2275 
2276  if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2277  iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2278 
2279  if (rp->wolopts & WAKE_PHY)
2280  iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2281 
2282  if (rp->wolopts & WAKE_UCAST)
2283  iowrite8(WOLucast, ioaddr + WOLcrSet);
2284 
2285  if (rp->wolopts) {
2286  /* Enable legacy WOL (for old motherboards) */
2287  iowrite8(0x01, ioaddr + PwcfgSet);
2288  iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2289  }
2290 
2291  spin_unlock(&rp->lock);
2292 
2293  if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2294  iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2295 
2296  pci_wake_from_d3(pdev, true);
2298  }
2299 }
2300 
2301 #ifdef CONFIG_PM_SLEEP
2302 static int rhine_suspend(struct device *device)
2303 {
2304  struct pci_dev *pdev = to_pci_dev(device);
2305  struct net_device *dev = pci_get_drvdata(pdev);
2306  struct rhine_private *rp = netdev_priv(dev);
2307 
2308  if (!netif_running(dev))
2309  return 0;
2310 
2311  rhine_task_disable(rp);
2312  rhine_irq_disable(rp);
2313  napi_disable(&rp->napi);
2314 
2315  netif_device_detach(dev);
2316 
2317  rhine_shutdown(pdev);
2318 
2319  return 0;
2320 }
2321 
2322 static int rhine_resume(struct device *device)
2323 {
2324  struct pci_dev *pdev = to_pci_dev(device);
2325  struct net_device *dev = pci_get_drvdata(pdev);
2326  struct rhine_private *rp = netdev_priv(dev);
2327 
2328  if (!netif_running(dev))
2329  return 0;
2330 
2331 #ifdef USE_MMIO
2332  enable_mmio(rp->pioaddr, rp->quirks);
2333 #endif
2334  rhine_power_init(dev);
2335  free_tbufs(dev);
2336  free_rbufs(dev);
2337  alloc_tbufs(dev);
2338  alloc_rbufs(dev);
2339  rhine_task_enable(rp);
2340  spin_lock_bh(&rp->lock);
2341  init_registers(dev);
2342  spin_unlock_bh(&rp->lock);
2343 
2344  netif_device_attach(dev);
2345 
2346  return 0;
2347 }
2348 
2349 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2350 #define RHINE_PM_OPS (&rhine_pm_ops)
2351 
2352 #else
2353 
2354 #define RHINE_PM_OPS NULL
2355 
2356 #endif /* !CONFIG_PM_SLEEP */
2357 
2358 static struct pci_driver rhine_driver = {
2359  .name = DRV_NAME,
2360  .id_table = rhine_pci_tbl,
2361  .probe = rhine_init_one,
2362  .remove = __devexit_p(rhine_remove_one),
2363  .shutdown = rhine_shutdown,
2364  .driver.pm = RHINE_PM_OPS,
2365 };
2366 
2367 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2368  {
2369  .ident = "EPIA-M",
2370  .matches = {
2371  DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2372  DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2373  },
2374  },
2375  {
2376  .ident = "KV7",
2377  .matches = {
2378  DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2379  DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2380  },
2381  },
2382  { NULL }
2383 };
2384 
2385 static int __init rhine_init(void)
2386 {
2387 /* when a module, this is printed whether or not devices are found in probe */
2388 #ifdef MODULE
2389  pr_info("%s\n", version);
2390 #endif
2391  if (dmi_check_system(rhine_dmi_table)) {
2392  /* these BIOSes fail at PXE boot if chip is in D3 */
2393  avoid_D3 = true;
2394  pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2395  }
2396  else if (avoid_D3)
2397  pr_info("avoid_D3 set\n");
2398 
2399  return pci_register_driver(&rhine_driver);
2400 }
2401 
2402 
2403 static void __exit rhine_cleanup(void)
2404 {
2405  pci_unregister_driver(&rhine_driver);
2406 }
2407 
2408 
2409 module_init(rhine_init);
2410 module_exit(rhine_cleanup);