Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sis190.c
Go to the documentation of this file.
1 /*
2  sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3 
4  Copyright (c) 2003 K.M. Liu <[email protected]>
5  Copyright (c) 2003, 2004 Jeff Garzik <[email protected]>
6  Copyright (c) 2003, 2004, 2005 Francois Romieu <[email protected]>
7 
8  Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
9  genuine driver.
10 
11  This software may be used and distributed according to the terms of
12  the GNU General Public License (GPL), incorporated herein by reference.
13  Drivers based on or derived from this code fall under the GPL and must
14  retain the authorship, copyright and license notice. This file is not
15  a complete program and may only be used when the entire operating
16  system is licensed under the GPL.
17 
18  See the file COPYING in this distribution for more information.
19 
20 */
21 
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/netdevice.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/pci.h>
32 #include <linux/mii.h>
33 #include <linux/delay.h>
34 #include <linux/crc32.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/slab.h>
37 #include <asm/irq.h>
38 
39 #define PHY_MAX_ADDR 32
40 #define PHY_ID_ANY 0x1f
41 #define MII_REG_ANY 0x1f
42 
43 #define DRV_VERSION "1.4"
44 #define DRV_NAME "sis190"
45 #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
46 
47 #define sis190_rx_skb netif_rx
48 #define sis190_rx_quota(count, quota) count
49 
50 #define NUM_TX_DESC 64 /* [8..1024] */
51 #define NUM_RX_DESC 64 /* [8..8192] */
52 #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
53 #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
54 #define RX_BUF_SIZE 1536
55 #define RX_BUF_MASK 0xfff8
56 
57 #define SIS190_REGS_SIZE 0x80
58 #define SIS190_TX_TIMEOUT (6*HZ)
59 #define SIS190_PHY_TIMEOUT (10*HZ)
60 #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
61  NETIF_MSG_LINK | NETIF_MSG_IFUP | \
62  NETIF_MSG_IFDOWN)
63 
64 /* Enhanced PHY access register bit definitions */
65 #define EhnMIIread 0x0000
66 #define EhnMIIwrite 0x0020
67 #define EhnMIIdataShift 16
68 #define EhnMIIpmdShift 6 /* 7016 only */
69 #define EhnMIIregShift 11
70 #define EhnMIIreq 0x0010
71 #define EhnMIInotDone 0x0010
72 
73 /* Write/read MMIO register */
74 #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
75 #define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
76 #define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
77 #define SIS_R8(reg) readb (ioaddr + (reg))
78 #define SIS_R16(reg) readw (ioaddr + (reg))
79 #define SIS_R32(reg) readl (ioaddr + (reg))
80 
81 #define SIS_PCI_COMMIT() SIS_R32(IntrControl)
82 
84  TxControl = 0x00,
86  rsv0 = 0x08, // reserved
87  TxSts = 0x0c, // unused (Control/Status)
88  RxControl = 0x10,
90  rsv1 = 0x18, // reserved
91  RxSts = 0x1c, // unused
92  IntrStatus = 0x20,
93  IntrMask = 0x24,
94  IntrControl = 0x28,
95  IntrTimer = 0x2c, // unused (Interrupt Timer)
96  PMControl = 0x30, // unused (Power Mgmt Control/Status)
97  rsv2 = 0x34, // reserved
98  ROMControl = 0x38,
99  ROMInterface = 0x3c,
101  GMIIControl = 0x44,
102  GIoCR = 0x48, // unused (GMAC IO Compensation)
103  GIoCtrl = 0x4c, // unused (GMAC IO Control)
104  TxMacControl = 0x50,
105  TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit)
106  RGDelay = 0x58, // unused (RGMII Tx Internal Delay)
107  rsv3 = 0x5c, // reserved
108  RxMacControl = 0x60,
109  RxMacAddr = 0x62,
110  RxHashTable = 0x68,
111  // Undocumented = 0x6c,
112  RxWolCtrl = 0x70,
113  RxWolData = 0x74, // unused (Rx WOL Data Access)
114  RxMPSControl = 0x78, // unused (Rx MPS Control)
115  rsv4 = 0x7c, // reserved
116 };
117 
119  /* IntrStatus */
120  SoftInt = 0x40000000, // unused
121  Timeup = 0x20000000, // unused
122  PauseFrame = 0x00080000, // unused
123  MagicPacket = 0x00040000, // unused
124  WakeupFrame = 0x00020000, // unused
125  LinkChange = 0x00010000,
126  RxQEmpty = 0x00000080,
127  RxQInt = 0x00000040,
128  TxQ1Empty = 0x00000020, // unused
129  TxQ1Int = 0x00000010,
130  TxQ0Empty = 0x00000008, // unused
131  TxQ0Int = 0x00000004,
132  RxHalt = 0x00000002,
133  TxHalt = 0x00000001,
134 
135  /* {Rx/Tx}CmdBits */
136  CmdReset = 0x10,
137  CmdRxEnb = 0x08, // unused
138  CmdTxEnb = 0x01,
139  RxBufEmpty = 0x01, // unused
140 
141  /* Cfg9346Bits */
142  Cfg9346_Lock = 0x00, // unused
143  Cfg9346_Unlock = 0xc0, // unused
144 
145  /* RxMacControl */
146  AcceptErr = 0x20, // unused
147  AcceptRunt = 0x10, // unused
148  AcceptBroadcast = 0x0800,
149  AcceptMulticast = 0x0400,
150  AcceptMyPhys = 0x0200,
151  AcceptAllPhys = 0x0100,
152 
153  /* RxConfigBits */
155  RxCfgDMAShift = 8, // 0x1a in RxControl ?
156 
157  /* TxConfigBits */
159  TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
160 
161  LinkStatus = 0x02, // unused
162  FullDup = 0x01, // unused
163 
164  /* TBICSRBit */
165  TBILinkOK = 0x02000000, // unused
166 };
167 
168 struct TxDesc {
173 };
174 
175 struct RxDesc {
180 };
181 
183  /* _Desc.status */
184  OWNbit = 0x80000000, // RXOWN/TXOWN
185  INTbit = 0x40000000, // RXINT/TXINT
186  CRCbit = 0x00020000, // CRCOFF/CRCEN
187  PADbit = 0x00010000, // PREADD/PADEN
188  /* _Desc.size */
189  RingEnd = 0x80000000,
190  /* TxDesc.status */
191  LSEN = 0x08000000, // TSO ? -- FR
192  IPCS = 0x04000000,
193  TCPCS = 0x02000000,
194  UDPCS = 0x01000000,
195  BSTEN = 0x00800000,
196  EXTEN = 0x00400000,
197  DEFEN = 0x00200000,
198  BKFEN = 0x00100000,
199  CRSEN = 0x00080000,
200  COLEN = 0x00040000,
201  THOL3 = 0x30000000,
202  THOL2 = 0x20000000,
203  THOL1 = 0x10000000,
204  THOL0 = 0x00000000,
205 
206  WND = 0x00080000,
207  TABRT = 0x00040000,
208  FIFO = 0x00020000,
209  LINK = 0x00010000,
210  ColCountMask = 0x0000ffff,
211  /* RxDesc.status */
212  IPON = 0x20000000,
213  TCPON = 0x10000000,
214  UDPON = 0x08000000,
215  Wakup = 0x00400000,
216  Magic = 0x00200000,
217  Pause = 0x00100000,
218  DEFbit = 0x00200000,
219  BCAST = 0x000c0000,
220  MCAST = 0x00080000,
221  UCAST = 0x00040000,
222  /* RxDesc.PSize */
223  TAGON = 0x80000000,
224  RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR
225  ABORT = 0x00800000,
226  SHORT = 0x00400000,
227  LIMIT = 0x00200000,
228  MIIER = 0x00100000,
229  OVRUN = 0x00080000,
230  NIBON = 0x00040000,
231  COLON = 0x00020000,
232  CRCOK = 0x00010000,
233  RxSizeMask = 0x0000ffff
234  /*
235  * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
236  * provide two (unused with Linux) Tx queues. No publicly
237  * available documentation alas.
238  */
239 };
240 
242  EECS = 0x00000001, // unused
243  EECLK = 0x00000002, // unused
244  EEDO = 0x00000008, // unused
245  EEDI = 0x00000004, // unused
246  EEREQ = 0x00000080,
247  EEROP = 0x00000200,
248  EEWOP = 0x00000100 // unused
249 };
250 
251 /* EEPROM Addresses */
254  EEPROMCLK = 0x01, // unused
255  EEPROMInfo = 0x02,
257 };
258 
263 };
264 
267  struct pci_dev *pci_dev;
268  struct net_device *dev;
288  enum {
292  } link_status;
293 };
294 
295 struct sis190_phy {
296  struct list_head list;
297  int phy_id;
298  u16 id[2];
301 };
302 
304  UNKNOWN = 0x00,
305  HOME = 0x01,
306  LAN = 0x02,
307  MIX = 0x03
308 };
309 
310 static struct mii_chip_info {
311  const char *name;
312  u16 id[2];
313  unsigned int type;
314  u32 feature;
315 } mii_chip_table[] = {
316  { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 },
317  { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
318  { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
319  { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
320  { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
321  { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 },
322  { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 },
323  { NULL, }
324 };
325 
326 static const struct {
327  const char *name;
328 } sis_chip_info[] = {
329  { "SiS 190 PCI Fast Ethernet adapter" },
330  { "SiS 191 PCI Gigabit Ethernet adapter" },
331 };
332 
333 static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
334  { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
335  { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
336  { 0, },
337 };
338 
339 MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
340 
341 static int rx_copybreak = 200;
342 
343 static struct {
345 } debug = { -1 };
346 
347 MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver");
348 module_param(rx_copybreak, int, 0);
349 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
350 module_param_named(debug, debug.msg_enable, int, 0);
351 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
352 MODULE_AUTHOR("K.M. Liu <[email protected]>, Ueimor <[email protected]>");
354 MODULE_LICENSE("GPL");
355 
356 static const u32 sis190_intr_mask =
358 
359 /*
360  * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
361  * The chips use a 64 element hash table based on the Ethernet CRC.
362  */
363 static const int multicast_filter_limit = 32;
364 
365 static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
366 {
367  unsigned int i;
368 
369  SIS_W32(GMIIControl, ctl);
370 
371  msleep(1);
372 
373  for (i = 0; i < 100; i++) {
375  break;
376  msleep(1);
377  }
378 
379  if (i > 99)
380  pr_err("PHY command failed !\n");
381 }
382 
383 static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
384 {
385  __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
386  (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
387  (((u32) val) << EhnMIIdataShift));
388 }
389 
390 static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
391 {
392  __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
393  (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
394 
395  return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
396 }
397 
398 static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
399 {
400  struct sis190_private *tp = netdev_priv(dev);
401 
402  mdio_write(tp->mmio_addr, phy_id, reg, val);
403 }
404 
405 static int __mdio_read(struct net_device *dev, int phy_id, int reg)
406 {
407  struct sis190_private *tp = netdev_priv(dev);
408 
409  return mdio_read(tp->mmio_addr, phy_id, reg);
410 }
411 
412 static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
413 {
414  mdio_read(ioaddr, phy_id, reg);
415  return mdio_read(ioaddr, phy_id, reg);
416 }
417 
418 static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
419 {
420  u16 data = 0xffff;
421  unsigned int i;
422 
423  if (!(SIS_R32(ROMControl) & 0x0002))
424  return 0;
425 
426  SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
427 
428  for (i = 0; i < 200; i++) {
429  if (!(SIS_R32(ROMInterface) & EEREQ)) {
430  data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
431  break;
432  }
433  msleep(1);
434  }
435 
436  return data;
437 }
438 
439 static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
440 {
441  SIS_W32(IntrMask, 0x00);
442  SIS_W32(IntrStatus, 0xffffffff);
443  SIS_PCI_COMMIT();
444 }
445 
446 static void sis190_asic_down(void __iomem *ioaddr)
447 {
448  /* Stop the chip's Tx and Rx DMA processes. */
449 
450  SIS_W32(TxControl, 0x1a00);
451  SIS_W32(RxControl, 0x1a00);
452 
453  sis190_irq_mask_and_ack(ioaddr);
454 }
455 
456 static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
457 {
458  desc->size |= cpu_to_le32(RingEnd);
459 }
460 
461 static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
462 {
463  u32 eor = le32_to_cpu(desc->size) & RingEnd;
464 
465  desc->PSize = 0x0;
466  desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
467  wmb();
468  desc->status = cpu_to_le32(OWNbit | INTbit);
469 }
470 
471 static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
472  u32 rx_buf_sz)
473 {
474  desc->addr = cpu_to_le32(mapping);
475  sis190_give_to_asic(desc, rx_buf_sz);
476 }
477 
478 static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
479 {
480  desc->PSize = 0x0;
481  desc->addr = cpu_to_le32(0xdeadbeef);
482  desc->size &= cpu_to_le32(RingEnd);
483  wmb();
484  desc->status = 0x0;
485 }
486 
487 static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
488  struct RxDesc *desc)
489 {
490  u32 rx_buf_sz = tp->rx_buf_sz;
491  struct sk_buff *skb;
493 
494  skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
495  if (unlikely(!skb))
496  goto skb_alloc_failed;
497  mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
499  if (pci_dma_mapping_error(tp->pci_dev, mapping))
500  goto out;
501  sis190_map_to_asic(desc, mapping, rx_buf_sz);
502 
503  return skb;
504 
505 out:
506  dev_kfree_skb_any(skb);
507 skb_alloc_failed:
508  sis190_make_unusable_by_asic(desc);
509  return NULL;
510 }
511 
512 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
513  u32 start, u32 end)
514 {
515  u32 cur;
516 
517  for (cur = start; cur < end; cur++) {
518  unsigned int i = cur % NUM_RX_DESC;
519 
520  if (tp->Rx_skbuff[i])
521  continue;
522 
523  tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
524 
525  if (!tp->Rx_skbuff[i])
526  break;
527  }
528  return cur - start;
529 }
530 
531 static bool sis190_try_rx_copy(struct sis190_private *tp,
532  struct sk_buff **sk_buff, int pkt_size,
534 {
535  struct sk_buff *skb;
536  bool done = false;
537 
538  if (pkt_size >= rx_copybreak)
539  goto out;
540 
541  skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
542  if (!skb)
543  goto out;
544 
545  pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
547  skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
548  *sk_buff = skb;
549  done = true;
550 out:
551  return done;
552 }
553 
554 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
555 {
556 #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
557 
558  if ((status & CRCOK) && !(status & ErrMask))
559  return 0;
560 
561  if (!(status & CRCOK))
562  stats->rx_crc_errors++;
563  else if (status & OVRUN)
564  stats->rx_over_errors++;
565  else if (status & (SHORT | LIMIT))
566  stats->rx_length_errors++;
567  else if (status & (MIIER | NIBON | COLON))
568  stats->rx_frame_errors++;
569 
570  stats->rx_errors++;
571  return -1;
572 }
573 
574 static int sis190_rx_interrupt(struct net_device *dev,
575  struct sis190_private *tp, void __iomem *ioaddr)
576 {
577  struct net_device_stats *stats = &dev->stats;
578  u32 rx_left, cur_rx = tp->cur_rx;
579  u32 delta, count;
580 
581  rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
582  rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
583 
584  for (; rx_left > 0; rx_left--, cur_rx++) {
585  unsigned int entry = cur_rx % NUM_RX_DESC;
586  struct RxDesc *desc = tp->RxDescRing + entry;
587  u32 status;
588 
589  if (le32_to_cpu(desc->status) & OWNbit)
590  break;
591 
592  status = le32_to_cpu(desc->PSize);
593 
594  //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
595 
596  if (sis190_rx_pkt_err(status, stats) < 0)
597  sis190_give_to_asic(desc, tp->rx_buf_sz);
598  else {
599  struct sk_buff *skb = tp->Rx_skbuff[entry];
600  dma_addr_t addr = le32_to_cpu(desc->addr);
601  int pkt_size = (status & RxSizeMask) - 4;
602  struct pci_dev *pdev = tp->pci_dev;
603 
604  if (unlikely(pkt_size > tp->rx_buf_sz)) {
605  netif_info(tp, intr, dev,
606  "(frag) status = %08x\n", status);
607  stats->rx_dropped++;
608  stats->rx_length_errors++;
609  sis190_give_to_asic(desc, tp->rx_buf_sz);
610  continue;
611  }
612 
613 
614  if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
615  pci_dma_sync_single_for_device(pdev, addr,
617  sis190_give_to_asic(desc, tp->rx_buf_sz);
618  } else {
619  pci_unmap_single(pdev, addr, tp->rx_buf_sz,
621  tp->Rx_skbuff[entry] = NULL;
622  sis190_make_unusable_by_asic(desc);
623  }
624 
625  skb_put(skb, pkt_size);
626  skb->protocol = eth_type_trans(skb, dev);
627 
628  sis190_rx_skb(skb);
629 
630  stats->rx_packets++;
631  stats->rx_bytes += pkt_size;
632  if ((status & BCAST) == MCAST)
633  stats->multicast++;
634  }
635  }
636  count = cur_rx - tp->cur_rx;
637  tp->cur_rx = cur_rx;
638 
639  delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
640  if (!delta && count)
641  netif_info(tp, intr, dev, "no Rx buffer allocated\n");
642  tp->dirty_rx += delta;
643 
644  if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
645  netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
646 
647  return count;
648 }
649 
650 static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
651  struct TxDesc *desc)
652 {
653  unsigned int len;
654 
655  len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
656 
657  pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
658 
659  memset(desc, 0x00, sizeof(*desc));
660 }
661 
662 static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
663 {
664 #define TxErrMask (WND | TABRT | FIFO | LINK)
665 
666  if (!unlikely(status & TxErrMask))
667  return 0;
668 
669  if (status & WND)
670  stats->tx_window_errors++;
671  if (status & TABRT)
672  stats->tx_aborted_errors++;
673  if (status & FIFO)
674  stats->tx_fifo_errors++;
675  if (status & LINK)
676  stats->tx_carrier_errors++;
677 
678  stats->tx_errors++;
679 
680  return -1;
681 }
682 
683 static void sis190_tx_interrupt(struct net_device *dev,
684  struct sis190_private *tp, void __iomem *ioaddr)
685 {
686  struct net_device_stats *stats = &dev->stats;
687  u32 pending, dirty_tx = tp->dirty_tx;
688  /*
689  * It would not be needed if queueing was allowed to be enabled
690  * again too early (hint: think preempt and unclocked smp systems).
691  */
692  unsigned int queue_stopped;
693 
694  smp_rmb();
695  pending = tp->cur_tx - dirty_tx;
696  queue_stopped = (pending == NUM_TX_DESC);
697 
698  for (; pending; pending--, dirty_tx++) {
699  unsigned int entry = dirty_tx % NUM_TX_DESC;
700  struct TxDesc *txd = tp->TxDescRing + entry;
701  u32 status = le32_to_cpu(txd->status);
702  struct sk_buff *skb;
703 
704  if (status & OWNbit)
705  break;
706 
707  skb = tp->Tx_skbuff[entry];
708 
709  if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
710  stats->tx_packets++;
711  stats->tx_bytes += skb->len;
712  stats->collisions += ((status & ColCountMask) - 1);
713  }
714 
715  sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
716  tp->Tx_skbuff[entry] = NULL;
717  dev_kfree_skb_irq(skb);
718  }
719 
720  if (tp->dirty_tx != dirty_tx) {
721  tp->dirty_tx = dirty_tx;
722  smp_wmb();
723  if (queue_stopped)
724  netif_wake_queue(dev);
725  }
726 }
727 
728 /*
729  * The interrupt handler does all of the Rx thread work and cleans up after
730  * the Tx thread.
731  */
732 static irqreturn_t sis190_irq(int irq, void *__dev)
733 {
734  struct net_device *dev = __dev;
735  struct sis190_private *tp = netdev_priv(dev);
736  void __iomem *ioaddr = tp->mmio_addr;
737  unsigned int handled = 0;
738  u32 status;
739 
740  status = SIS_R32(IntrStatus);
741 
742  if ((status == 0xffffffff) || !status)
743  goto out;
744 
745  handled = 1;
746 
747  if (unlikely(!netif_running(dev))) {
748  sis190_asic_down(ioaddr);
749  goto out;
750  }
751 
752  SIS_W32(IntrStatus, status);
753 
754 // netif_info(tp, intr, dev, "status = %08x\n", status);
755 
756  if (status & LinkChange) {
757  netif_info(tp, intr, dev, "link change\n");
758  del_timer(&tp->timer);
759  schedule_work(&tp->phy_task);
760  }
761 
762  if (status & RxQInt)
763  sis190_rx_interrupt(dev, tp, ioaddr);
764 
765  if (status & TxQ0Int)
766  sis190_tx_interrupt(dev, tp, ioaddr);
767 out:
768  return IRQ_RETVAL(handled);
769 }
770 
771 #ifdef CONFIG_NET_POLL_CONTROLLER
772 static void sis190_netpoll(struct net_device *dev)
773 {
774  struct sis190_private *tp = netdev_priv(dev);
775  const int irq = tp->pci_dev->irq;
776 
777  disable_irq(irq);
778  sis190_irq(irq, dev);
779  enable_irq(irq);
780 }
781 #endif
782 
783 static void sis190_free_rx_skb(struct sis190_private *tp,
784  struct sk_buff **sk_buff, struct RxDesc *desc)
785 {
786  struct pci_dev *pdev = tp->pci_dev;
787 
788  pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
790  dev_kfree_skb(*sk_buff);
791  *sk_buff = NULL;
792  sis190_make_unusable_by_asic(desc);
793 }
794 
795 static void sis190_rx_clear(struct sis190_private *tp)
796 {
797  unsigned int i;
798 
799  for (i = 0; i < NUM_RX_DESC; i++) {
800  if (!tp->Rx_skbuff[i])
801  continue;
802  sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
803  }
804 }
805 
806 static void sis190_init_ring_indexes(struct sis190_private *tp)
807 {
808  tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
809 }
810 
811 static int sis190_init_ring(struct net_device *dev)
812 {
813  struct sis190_private *tp = netdev_priv(dev);
814 
815  sis190_init_ring_indexes(tp);
816 
817  memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
818  memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
819 
820  if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
821  goto err_rx_clear;
822 
823  sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
824 
825  return 0;
826 
827 err_rx_clear:
828  sis190_rx_clear(tp);
829  return -ENOMEM;
830 }
831 
832 static void sis190_set_rx_mode(struct net_device *dev)
833 {
834  struct sis190_private *tp = netdev_priv(dev);
835  void __iomem *ioaddr = tp->mmio_addr;
836  unsigned long flags;
837  u32 mc_filter[2]; /* Multicast hash filter */
838  u16 rx_mode;
839 
840  if (dev->flags & IFF_PROMISC) {
841  rx_mode =
844  mc_filter[1] = mc_filter[0] = 0xffffffff;
845  } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
846  (dev->flags & IFF_ALLMULTI)) {
847  /* Too many to filter perfectly -- accept all multicasts. */
849  mc_filter[1] = mc_filter[0] = 0xffffffff;
850  } else {
851  struct netdev_hw_addr *ha;
852 
853  rx_mode = AcceptBroadcast | AcceptMyPhys;
854  mc_filter[1] = mc_filter[0] = 0;
855  netdev_for_each_mc_addr(ha, dev) {
856  int bit_nr =
857  ether_crc(ETH_ALEN, ha->addr) & 0x3f;
858  mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
859  rx_mode |= AcceptMulticast;
860  }
861  }
862 
863  spin_lock_irqsave(&tp->lock, flags);
864 
865  SIS_W16(RxMacControl, rx_mode | 0x2);
866  SIS_W32(RxHashTable, mc_filter[0]);
867  SIS_W32(RxHashTable + 4, mc_filter[1]);
868 
869  spin_unlock_irqrestore(&tp->lock, flags);
870 }
871 
872 static void sis190_soft_reset(void __iomem *ioaddr)
873 {
874  SIS_W32(IntrControl, 0x8000);
875  SIS_PCI_COMMIT();
876  SIS_W32(IntrControl, 0x0);
877  sis190_asic_down(ioaddr);
878 }
879 
880 static void sis190_hw_start(struct net_device *dev)
881 {
882  struct sis190_private *tp = netdev_priv(dev);
883  void __iomem *ioaddr = tp->mmio_addr;
884 
885  sis190_soft_reset(ioaddr);
886 
889 
890  SIS_W32(IntrStatus, 0xffffffff);
891  SIS_W32(IntrMask, 0x0);
892  SIS_W32(GMIIControl, 0x0);
893  SIS_W32(TxMacControl, 0x60);
894  SIS_W16(RxMacControl, 0x02);
895  SIS_W32(RxHashTable, 0x0);
896  SIS_W32(0x6c, 0x0);
897  SIS_W32(RxWolCtrl, 0x0);
898  SIS_W32(RxWolData, 0x0);
899 
900  SIS_PCI_COMMIT();
901 
902  sis190_set_rx_mode(dev);
903 
904  /* Enable all known interrupts by setting the interrupt mask. */
905  SIS_W32(IntrMask, sis190_intr_mask);
906 
907  SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
908  SIS_W32(RxControl, 0x1a1d);
909 
910  netif_start_queue(dev);
911 }
912 
913 static void sis190_phy_task(struct work_struct *work)
914 {
915  struct sis190_private *tp =
916  container_of(work, struct sis190_private, phy_task);
917  struct net_device *dev = tp->dev;
918  void __iomem *ioaddr = tp->mmio_addr;
919  int phy_id = tp->mii_if.phy_id;
920  u16 val;
921 
922  rtnl_lock();
923 
924  if (!netif_running(dev))
925  goto out_unlock;
926 
927  val = mdio_read(ioaddr, phy_id, MII_BMCR);
928  if (val & BMCR_RESET) {
929  // FIXME: needlessly high ? -- FR 02/07/2005
930  mod_timer(&tp->timer, jiffies + HZ/10);
931  goto out_unlock;
932  }
933 
934  val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
935  if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
936  netif_carrier_off(dev);
937  netif_warn(tp, link, dev, "auto-negotiating...\n");
938  tp->link_status = LNK_AUTONEG;
939  } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
940  /* Rejoice ! */
941  struct {
942  int val;
943  u32 ctl;
944  const char *msg;
945  } reg31[] = {
946  { LPA_1000FULL, 0x07000c00 | 0x00001000,
947  "1000 Mbps Full Duplex" },
948  { LPA_1000HALF, 0x07000c00,
949  "1000 Mbps Half Duplex" },
950  { LPA_100FULL, 0x04000800 | 0x00001000,
951  "100 Mbps Full Duplex" },
952  { LPA_100HALF, 0x04000800,
953  "100 Mbps Half Duplex" },
954  { LPA_10FULL, 0x04000400 | 0x00001000,
955  "10 Mbps Full Duplex" },
956  { LPA_10HALF, 0x04000400,
957  "10 Mbps Half Duplex" },
958  { 0, 0x04000400, "unknown" }
959  }, *p = NULL;
960  u16 adv, autoexp, gigadv, gigrec;
961 
962  val = mdio_read(ioaddr, phy_id, 0x1f);
963  netif_info(tp, link, dev, "mii ext = %04x\n", val);
964 
965  val = mdio_read(ioaddr, phy_id, MII_LPA);
966  adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
967  autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
968  netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
969  val, adv, autoexp);
970 
971  if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
972  /* check for gigabit speed */
973  gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000);
974  gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000);
975  val = (gigadv & (gigrec >> 2));
976  if (val & ADVERTISE_1000FULL)
977  p = reg31;
978  else if (val & ADVERTISE_1000HALF)
979  p = reg31 + 1;
980  }
981  if (!p) {
982  val &= adv;
983 
984  for (p = reg31; p->val; p++) {
985  if ((val & p->val) == p->val)
986  break;
987  }
988  }
989 
990  p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;
991 
992  if ((tp->features & F_HAS_RGMII) &&
993  (tp->features & F_PHY_BCM5461)) {
994  // Set Tx Delay in RGMII mode.
995  mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);
996  udelay(200);
997  mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);
998  p->ctl |= 0x03000000;
999  }
1000 
1001  SIS_W32(StationControl, p->ctl);
1002 
1003  if (tp->features & F_HAS_RGMII) {
1004  SIS_W32(RGDelay, 0x0441);
1005  SIS_W32(RGDelay, 0x0440);
1006  }
1007 
1008  tp->negotiated_lpa = p->val;
1009 
1010  netif_info(tp, link, dev, "link on %s mode\n", p->msg);
1011  netif_carrier_on(dev);
1012  tp->link_status = LNK_ON;
1013  } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
1014  tp->link_status = LNK_OFF;
1015  mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
1016 
1017 out_unlock:
1018  rtnl_unlock();
1019 }
1020 
1021 static void sis190_phy_timer(unsigned long __opaque)
1022 {
1023  struct net_device *dev = (struct net_device *)__opaque;
1024  struct sis190_private *tp = netdev_priv(dev);
1025 
1026  if (likely(netif_running(dev)))
1027  schedule_work(&tp->phy_task);
1028 }
1029 
1030 static inline void sis190_delete_timer(struct net_device *dev)
1031 {
1032  struct sis190_private *tp = netdev_priv(dev);
1033 
1034  del_timer_sync(&tp->timer);
1035 }
1036 
1037 static inline void sis190_request_timer(struct net_device *dev)
1038 {
1039  struct sis190_private *tp = netdev_priv(dev);
1040  struct timer_list *timer = &tp->timer;
1041 
1042  init_timer(timer);
1043  timer->expires = jiffies + SIS190_PHY_TIMEOUT;
1044  timer->data = (unsigned long)dev;
1045  timer->function = sis190_phy_timer;
1046  add_timer(timer);
1047 }
1048 
1049 static void sis190_set_rxbufsize(struct sis190_private *tp,
1050  struct net_device *dev)
1051 {
1052  unsigned int mtu = dev->mtu;
1053 
1054  tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
1055  /* RxDesc->size has a licence to kill the lower bits */
1056  if (tp->rx_buf_sz & 0x07) {
1057  tp->rx_buf_sz += 8;
1058  tp->rx_buf_sz &= RX_BUF_MASK;
1059  }
1060 }
1061 
1062 static int sis190_open(struct net_device *dev)
1063 {
1064  struct sis190_private *tp = netdev_priv(dev);
1065  struct pci_dev *pdev = tp->pci_dev;
1066  int rc = -ENOMEM;
1067 
1068  sis190_set_rxbufsize(tp, dev);
1069 
1070  /*
1071  * Rx and Tx descriptors need 256 bytes alignment.
1072  * pci_alloc_consistent() guarantees a stronger alignment.
1073  */
1075  if (!tp->TxDescRing)
1076  goto out;
1077 
1079  if (!tp->RxDescRing)
1080  goto err_free_tx_0;
1081 
1082  rc = sis190_init_ring(dev);
1083  if (rc < 0)
1084  goto err_free_rx_1;
1085 
1086  sis190_request_timer(dev);
1087 
1088  rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
1089  if (rc < 0)
1090  goto err_release_timer_2;
1091 
1092  sis190_hw_start(dev);
1093 out:
1094  return rc;
1095 
1096 err_release_timer_2:
1097  sis190_delete_timer(dev);
1098  sis190_rx_clear(tp);
1099 err_free_rx_1:
1101 err_free_tx_0:
1103  goto out;
1104 }
1105 
1106 static void sis190_tx_clear(struct sis190_private *tp)
1107 {
1108  unsigned int i;
1109 
1110  for (i = 0; i < NUM_TX_DESC; i++) {
1111  struct sk_buff *skb = tp->Tx_skbuff[i];
1112 
1113  if (!skb)
1114  continue;
1115 
1116  sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
1117  tp->Tx_skbuff[i] = NULL;
1118  dev_kfree_skb(skb);
1119 
1120  tp->dev->stats.tx_dropped++;
1121  }
1122  tp->cur_tx = tp->dirty_tx = 0;
1123 }
1124 
1125 static void sis190_down(struct net_device *dev)
1126 {
1127  struct sis190_private *tp = netdev_priv(dev);
1128  void __iomem *ioaddr = tp->mmio_addr;
1129  unsigned int poll_locked = 0;
1130 
1131  sis190_delete_timer(dev);
1132 
1133  netif_stop_queue(dev);
1134 
1135  do {
1136  spin_lock_irq(&tp->lock);
1137 
1138  sis190_asic_down(ioaddr);
1139 
1140  spin_unlock_irq(&tp->lock);
1141 
1142  synchronize_irq(tp->pci_dev->irq);
1143 
1144  if (!poll_locked)
1145  poll_locked++;
1146 
1148 
1149  } while (SIS_R32(IntrMask));
1150 
1151  sis190_tx_clear(tp);
1152  sis190_rx_clear(tp);
1153 }
1154 
1155 static int sis190_close(struct net_device *dev)
1156 {
1157  struct sis190_private *tp = netdev_priv(dev);
1158  struct pci_dev *pdev = tp->pci_dev;
1159 
1160  sis190_down(dev);
1161 
1162  free_irq(pdev->irq, dev);
1163 
1166 
1167  tp->TxDescRing = NULL;
1168  tp->RxDescRing = NULL;
1169 
1170  return 0;
1171 }
1172 
1173 static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
1174  struct net_device *dev)
1175 {
1176  struct sis190_private *tp = netdev_priv(dev);
1177  void __iomem *ioaddr = tp->mmio_addr;
1178  u32 len, entry, dirty_tx;
1179  struct TxDesc *desc;
1181 
1182  if (unlikely(skb->len < ETH_ZLEN)) {
1183  if (skb_padto(skb, ETH_ZLEN)) {
1184  dev->stats.tx_dropped++;
1185  goto out;
1186  }
1187  len = ETH_ZLEN;
1188  } else {
1189  len = skb->len;
1190  }
1191 
1192  entry = tp->cur_tx % NUM_TX_DESC;
1193  desc = tp->TxDescRing + entry;
1194 
1195  if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1196  netif_stop_queue(dev);
1197  netif_err(tp, tx_err, dev,
1198  "BUG! Tx Ring full when queue awake!\n");
1199  return NETDEV_TX_BUSY;
1200  }
1201 
1202  mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1203  if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
1204  netif_err(tp, tx_err, dev,
1205  "PCI mapping failed, dropping packet");
1206  return NETDEV_TX_BUSY;
1207  }
1208 
1209  tp->Tx_skbuff[entry] = skb;
1210 
1211  desc->PSize = cpu_to_le32(len);
1212  desc->addr = cpu_to_le32(mapping);
1213 
1214  desc->size = cpu_to_le32(len);
1215  if (entry == (NUM_TX_DESC - 1))
1216  desc->size |= cpu_to_le32(RingEnd);
1217 
1218  wmb();
1219 
1220  desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1222  /* Half Duplex */
1223  desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
1225  desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
1226  }
1227 
1228  tp->cur_tx++;
1229 
1230  smp_wmb();
1231 
1232  SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1233 
1234  dirty_tx = tp->dirty_tx;
1235  if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1236  netif_stop_queue(dev);
1237  smp_rmb();
1238  if (dirty_tx != tp->dirty_tx)
1239  netif_wake_queue(dev);
1240  }
1241 out:
1242  return NETDEV_TX_OK;
1243 }
1244 
1245 static void sis190_free_phy(struct list_head *first_phy)
1246 {
1247  struct sis190_phy *cur, *next;
1248 
1249  list_for_each_entry_safe(cur, next, first_phy, list) {
1250  kfree(cur);
1251  }
1252 }
1253 
1262 static u16 sis190_default_phy(struct net_device *dev)
1263 {
1264  struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
1265  struct sis190_private *tp = netdev_priv(dev);
1266  struct mii_if_info *mii_if = &tp->mii_if;
1267  void __iomem *ioaddr = tp->mmio_addr;
1268  u16 status;
1269 
1270  phy_home = phy_default = phy_lan = NULL;
1271 
1272  list_for_each_entry(phy, &tp->first_phy, list) {
1273  status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
1274 
1275  // Link ON & Not select default PHY & not ghost PHY.
1276  if ((status & BMSR_LSTATUS) &&
1277  !phy_default &&
1278  (phy->type != UNKNOWN)) {
1279  phy_default = phy;
1280  } else {
1281  status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
1282  mdio_write(ioaddr, phy->phy_id, MII_BMCR,
1283  status | BMCR_ANENABLE | BMCR_ISOLATE);
1284  if (phy->type == HOME)
1285  phy_home = phy;
1286  else if (phy->type == LAN)
1287  phy_lan = phy;
1288  }
1289  }
1290 
1291  if (!phy_default) {
1292  if (phy_home)
1293  phy_default = phy_home;
1294  else if (phy_lan)
1295  phy_default = phy_lan;
1296  else
1297  phy_default = list_first_entry(&tp->first_phy,
1298  struct sis190_phy, list);
1299  }
1300 
1301  if (mii_if->phy_id != phy_default->phy_id) {
1302  mii_if->phy_id = phy_default->phy_id;
1303  if (netif_msg_probe(tp))
1304  pr_info("%s: Using transceiver at address %d as default\n",
1305  pci_name(tp->pci_dev), mii_if->phy_id);
1306  }
1307 
1308  status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
1309  status &= (~BMCR_ISOLATE);
1310 
1311  mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
1312  status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
1313 
1314  return status;
1315 }
1316 
1317 static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
1318  struct sis190_phy *phy, unsigned int phy_id,
1319  u16 mii_status)
1320 {
1321  void __iomem *ioaddr = tp->mmio_addr;
1322  struct mii_chip_info *p;
1323 
1324  INIT_LIST_HEAD(&phy->list);
1325  phy->status = mii_status;
1326  phy->phy_id = phy_id;
1327 
1328  phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
1329  phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
1330 
1331  for (p = mii_chip_table; p->type; p++) {
1332  if ((p->id[0] == phy->id[0]) &&
1333  (p->id[1] == (phy->id[1] & 0xfff0))) {
1334  break;
1335  }
1336  }
1337 
1338  if (p->id[1]) {
1339  phy->type = (p->type == MIX) ?
1340  ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
1341  LAN : HOME) : p->type;
1342  tp->features |= p->feature;
1343  if (netif_msg_probe(tp))
1344  pr_info("%s: %s transceiver at address %d\n",
1345  pci_name(tp->pci_dev), p->name, phy_id);
1346  } else {
1347  phy->type = UNKNOWN;
1348  if (netif_msg_probe(tp))
1349  pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
1350  pci_name(tp->pci_dev),
1351  phy->id[0], (phy->id[1] & 0xfff0), phy_id);
1352  }
1353 }
1354 
1355 static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1356 {
1357  if (tp->features & F_PHY_88E1111) {
1358  void __iomem *ioaddr = tp->mmio_addr;
1359  int phy_id = tp->mii_if.phy_id;
1360  u16 reg[2][2] = {
1361  { 0x808b, 0x0ce1 },
1362  { 0x808f, 0x0c60 }
1363  }, *p;
1364 
1365  p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1];
1366 
1367  mdio_write(ioaddr, phy_id, 0x1b, p[0]);
1368  udelay(200);
1369  mdio_write(ioaddr, phy_id, 0x14, p[1]);
1370  udelay(200);
1371  }
1372 }
1373 
1382 static int __devinit sis190_mii_probe(struct net_device *dev)
1383 {
1384  struct sis190_private *tp = netdev_priv(dev);
1385  struct mii_if_info *mii_if = &tp->mii_if;
1386  void __iomem *ioaddr = tp->mmio_addr;
1387  int phy_id;
1388  int rc = 0;
1389 
1390  INIT_LIST_HEAD(&tp->first_phy);
1391 
1392  for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1393  struct sis190_phy *phy;
1394  u16 status;
1395 
1396  status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
1397 
1398  // Try next mii if the current one is not accessible.
1399  if (status == 0xffff || status == 0x0000)
1400  continue;
1401 
1402  phy = kmalloc(sizeof(*phy), GFP_KERNEL);
1403  if (!phy) {
1404  sis190_free_phy(&tp->first_phy);
1405  rc = -ENOMEM;
1406  goto out;
1407  }
1408 
1409  sis190_init_phy(dev, tp, phy, phy_id, status);
1410 
1411  list_add(&tp->first_phy, &phy->list);
1412  }
1413 
1414  if (list_empty(&tp->first_phy)) {
1415  if (netif_msg_probe(tp))
1416  pr_info("%s: No MII transceivers found!\n",
1417  pci_name(tp->pci_dev));
1418  rc = -EIO;
1419  goto out;
1420  }
1421 
1422  /* Select default PHY for mac */
1423  sis190_default_phy(dev);
1424 
1425  sis190_mii_probe_88e1111_fixup(tp);
1426 
1427  mii_if->dev = dev;
1428  mii_if->mdio_read = __mdio_read;
1429  mii_if->mdio_write = __mdio_write;
1430  mii_if->phy_id_mask = PHY_ID_ANY;
1431  mii_if->reg_num_mask = MII_REG_ANY;
1432 out:
1433  return rc;
1434 }
1435 
1436 static void sis190_mii_remove(struct net_device *dev)
1437 {
1438  struct sis190_private *tp = netdev_priv(dev);
1439 
1440  sis190_free_phy(&tp->first_phy);
1441 }
1442 
1443 static void sis190_release_board(struct pci_dev *pdev)
1444 {
1445  struct net_device *dev = pci_get_drvdata(pdev);
1446  struct sis190_private *tp = netdev_priv(dev);
1447 
1448  iounmap(tp->mmio_addr);
1449  pci_release_regions(pdev);
1450  pci_disable_device(pdev);
1451  free_netdev(dev);
1452 }
1453 
1454 static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1455 {
1456  struct sis190_private *tp;
1457  struct net_device *dev;
1458  void __iomem *ioaddr;
1459  int rc;
1460 
1461  dev = alloc_etherdev(sizeof(*tp));
1462  if (!dev) {
1463  rc = -ENOMEM;
1464  goto err_out_0;
1465  }
1466 
1467  SET_NETDEV_DEV(dev, &pdev->dev);
1468 
1469  tp = netdev_priv(dev);
1470  tp->dev = dev;
1471  tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1472 
1473  rc = pci_enable_device(pdev);
1474  if (rc < 0) {
1475  if (netif_msg_probe(tp))
1476  pr_err("%s: enable failure\n", pci_name(pdev));
1477  goto err_free_dev_1;
1478  }
1479 
1480  rc = -ENODEV;
1481 
1482  if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1483  if (netif_msg_probe(tp))
1484  pr_err("%s: region #0 is no MMIO resource\n",
1485  pci_name(pdev));
1486  goto err_pci_disable_2;
1487  }
1488  if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1489  if (netif_msg_probe(tp))
1490  pr_err("%s: invalid PCI region size(s)\n",
1491  pci_name(pdev));
1492  goto err_pci_disable_2;
1493  }
1494 
1495  rc = pci_request_regions(pdev, DRV_NAME);
1496  if (rc < 0) {
1497  if (netif_msg_probe(tp))
1498  pr_err("%s: could not request regions\n",
1499  pci_name(pdev));
1500  goto err_pci_disable_2;
1501  }
1502 
1503  rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1504  if (rc < 0) {
1505  if (netif_msg_probe(tp))
1506  pr_err("%s: DMA configuration failed\n",
1507  pci_name(pdev));
1508  goto err_free_res_3;
1509  }
1510 
1511  pci_set_master(pdev);
1512 
1513  ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1514  if (!ioaddr) {
1515  if (netif_msg_probe(tp))
1516  pr_err("%s: cannot remap MMIO, aborting\n",
1517  pci_name(pdev));
1518  rc = -EIO;
1519  goto err_free_res_3;
1520  }
1521 
1522  tp->pci_dev = pdev;
1523  tp->mmio_addr = ioaddr;
1524  tp->link_status = LNK_OFF;
1525 
1526  sis190_irq_mask_and_ack(ioaddr);
1527 
1528  sis190_soft_reset(ioaddr);
1529 out:
1530  return dev;
1531 
1532 err_free_res_3:
1533  pci_release_regions(pdev);
1534 err_pci_disable_2:
1535  pci_disable_device(pdev);
1536 err_free_dev_1:
1537  free_netdev(dev);
1538 err_out_0:
1539  dev = ERR_PTR(rc);
1540  goto out;
1541 }
1542 
1543 static void sis190_tx_timeout(struct net_device *dev)
1544 {
1545  struct sis190_private *tp = netdev_priv(dev);
1546  void __iomem *ioaddr = tp->mmio_addr;
1547  u8 tmp8;
1548 
1549  /* Disable Tx, if not already */
1550  tmp8 = SIS_R8(TxControl);
1551  if (tmp8 & CmdTxEnb)
1552  SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1553 
1554  netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
1556 
1557  /* Disable interrupts by clearing the interrupt mask. */
1558  SIS_W32(IntrMask, 0x0000);
1559 
1560  /* Stop a shared interrupt from scavenging while we are. */
1561  spin_lock_irq(&tp->lock);
1562  sis190_tx_clear(tp);
1563  spin_unlock_irq(&tp->lock);
1564 
1565  /* ...and finally, reset everything. */
1566  sis190_hw_start(dev);
1567 
1568  netif_wake_queue(dev);
1569 }
1570 
1571 static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1572 {
1573  tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1574 }
1575 
1576 static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1577  struct net_device *dev)
1578 {
1579  struct sis190_private *tp = netdev_priv(dev);
1580  void __iomem *ioaddr = tp->mmio_addr;
1581  u16 sig;
1582  int i;
1583 
1584  if (netif_msg_probe(tp))
1585  pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
1586 
1587  /* Check to see if there is a sane EEPROM */
1588  sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
1589 
1590  if ((sig == 0xffff) || (sig == 0x0000)) {
1591  if (netif_msg_probe(tp))
1592  pr_info("%s: Error EEPROM read %x\n",
1593  pci_name(pdev), sig);
1594  return -EIO;
1595  }
1596 
1597  /* Get MAC address from EEPROM */
1598  for (i = 0; i < ETH_ALEN / 2; i++) {
1599  u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
1600 
1601  ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
1602  }
1603 
1604  sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
1605 
1606  return 0;
1607 }
1608 
1618 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1619  struct net_device *dev)
1620 {
1621  static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
1622  struct sis190_private *tp = netdev_priv(dev);
1623  struct pci_dev *isa_bridge;
1624  u8 reg, tmp8;
1625  unsigned int i;
1626 
1627  if (netif_msg_probe(tp))
1628  pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
1629 
1630  for (i = 0; i < ARRAY_SIZE(ids); i++) {
1631  isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
1632  if (isa_bridge)
1633  break;
1634  }
1635 
1636  if (!isa_bridge) {
1637  if (netif_msg_probe(tp))
1638  pr_info("%s: Can not find ISA bridge\n",
1639  pci_name(pdev));
1640  return -EIO;
1641  }
1642 
1643  /* Enable port 78h & 79h to access APC Registers. */
1644  pci_read_config_byte(isa_bridge, 0x48, &tmp8);
1645  reg = (tmp8 & ~0x02);
1646  pci_write_config_byte(isa_bridge, 0x48, reg);
1647  udelay(50);
1648  pci_read_config_byte(isa_bridge, 0x48, &reg);
1649 
1650  for (i = 0; i < ETH_ALEN; i++) {
1651  outb(0x9 + i, 0x78);
1652  dev->dev_addr[i] = inb(0x79);
1653  }
1654 
1655  outb(0x12, 0x78);
1656  reg = inb(0x79);
1657 
1658  sis190_set_rgmii(tp, reg);
1659 
1660  /* Restore the value to ISA Bridge */
1661  pci_write_config_byte(isa_bridge, 0x48, tmp8);
1662  pci_dev_put(isa_bridge);
1663 
1664  return 0;
1665 }
1666 
1674 static inline void sis190_init_rxfilter(struct net_device *dev)
1675 {
1676  struct sis190_private *tp = netdev_priv(dev);
1677  void __iomem *ioaddr = tp->mmio_addr;
1678  u16 ctl;
1679  int i;
1680 
1681  ctl = SIS_R16(RxMacControl);
1682  /*
1683  * Disable packet filtering before setting filter.
1684  * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
1685  * only and followed by RxMacAddr (6 bytes). Strange. -- FR
1686  */
1687  SIS_W16(RxMacControl, ctl & ~0x0f00);
1688 
1689  for (i = 0; i < ETH_ALEN; i++)
1690  SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
1691 
1692  SIS_W16(RxMacControl, ctl);
1693  SIS_PCI_COMMIT();
1694 }
1695 
1696 static int __devinit sis190_get_mac_addr(struct pci_dev *pdev,
1697  struct net_device *dev)
1698 {
1699  int rc;
1700 
1701  rc = sis190_get_mac_addr_from_eeprom(pdev, dev);
1702  if (rc < 0) {
1703  u8 reg;
1704 
1705  pci_read_config_byte(pdev, 0x73, &reg);
1706 
1707  if (reg & 0x00000001)
1708  rc = sis190_get_mac_addr_from_apc(pdev, dev);
1709  }
1710  return rc;
1711 }
1712 
1713 static void sis190_set_speed_auto(struct net_device *dev)
1714 {
1715  struct sis190_private *tp = netdev_priv(dev);
1716  void __iomem *ioaddr = tp->mmio_addr;
1717  int phy_id = tp->mii_if.phy_id;
1718  int val;
1719 
1720  netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
1721 
1722  val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
1723 
1724  // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1725  // unchanged.
1726  mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1729 
1730  // Enable 1000 Full Mode.
1731  mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
1732 
1733  // Enable auto-negotiation and restart auto-negotiation.
1734  mdio_write(ioaddr, phy_id, MII_BMCR,
1735  BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1736 }
1737 
1738 static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1739 {
1740  struct sis190_private *tp = netdev_priv(dev);
1741 
1742  return mii_ethtool_gset(&tp->mii_if, cmd);
1743 }
1744 
1745 static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1746 {
1747  struct sis190_private *tp = netdev_priv(dev);
1748 
1749  return mii_ethtool_sset(&tp->mii_if, cmd);
1750 }
1751 
1752 static void sis190_get_drvinfo(struct net_device *dev,
1753  struct ethtool_drvinfo *info)
1754 {
1755  struct sis190_private *tp = netdev_priv(dev);
1756 
1757  strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1758  strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1759  strlcpy(info->bus_info, pci_name(tp->pci_dev),
1760  sizeof(info->bus_info));
1761 }
1762 
1763 static int sis190_get_regs_len(struct net_device *dev)
1764 {
1765  return SIS190_REGS_SIZE;
1766 }
1767 
1768 static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1769  void *p)
1770 {
1771  struct sis190_private *tp = netdev_priv(dev);
1772  unsigned long flags;
1773 
1774  if (regs->len > SIS190_REGS_SIZE)
1775  regs->len = SIS190_REGS_SIZE;
1776 
1777  spin_lock_irqsave(&tp->lock, flags);
1778  memcpy_fromio(p, tp->mmio_addr, regs->len);
1779  spin_unlock_irqrestore(&tp->lock, flags);
1780 }
1781 
1782 static int sis190_nway_reset(struct net_device *dev)
1783 {
1784  struct sis190_private *tp = netdev_priv(dev);
1785 
1786  return mii_nway_restart(&tp->mii_if);
1787 }
1788 
1789 static u32 sis190_get_msglevel(struct net_device *dev)
1790 {
1791  struct sis190_private *tp = netdev_priv(dev);
1792 
1793  return tp->msg_enable;
1794 }
1795 
1796 static void sis190_set_msglevel(struct net_device *dev, u32 value)
1797 {
1798  struct sis190_private *tp = netdev_priv(dev);
1799 
1800  tp->msg_enable = value;
1801 }
1802 
1803 static const struct ethtool_ops sis190_ethtool_ops = {
1804  .get_settings = sis190_get_settings,
1805  .set_settings = sis190_set_settings,
1806  .get_drvinfo = sis190_get_drvinfo,
1807  .get_regs_len = sis190_get_regs_len,
1808  .get_regs = sis190_get_regs,
1809  .get_link = ethtool_op_get_link,
1810  .get_msglevel = sis190_get_msglevel,
1811  .set_msglevel = sis190_set_msglevel,
1812  .nway_reset = sis190_nway_reset,
1813 };
1814 
1815 static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1816 {
1817  struct sis190_private *tp = netdev_priv(dev);
1818 
1819  return !netif_running(dev) ? -EINVAL :
1820  generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1821 }
1822 
1823 static int sis190_mac_addr(struct net_device *dev, void *p)
1824 {
1825  int rc;
1826 
1827  rc = eth_mac_addr(dev, p);
1828  if (!rc)
1829  sis190_init_rxfilter(dev);
1830  return rc;
1831 }
1832 
1833 static const struct net_device_ops sis190_netdev_ops = {
1834  .ndo_open = sis190_open,
1835  .ndo_stop = sis190_close,
1836  .ndo_do_ioctl = sis190_ioctl,
1837  .ndo_start_xmit = sis190_start_xmit,
1838  .ndo_tx_timeout = sis190_tx_timeout,
1839  .ndo_set_rx_mode = sis190_set_rx_mode,
1840  .ndo_change_mtu = eth_change_mtu,
1841  .ndo_set_mac_address = sis190_mac_addr,
1842  .ndo_validate_addr = eth_validate_addr,
1843 #ifdef CONFIG_NET_POLL_CONTROLLER
1844  .ndo_poll_controller = sis190_netpoll,
1845 #endif
1846 };
1847 
1848 static int __devinit sis190_init_one(struct pci_dev *pdev,
1849  const struct pci_device_id *ent)
1850 {
1851  static int printed_version = 0;
1852  struct sis190_private *tp;
1853  struct net_device *dev;
1854  void __iomem *ioaddr;
1855  int rc;
1856 
1857  if (!printed_version) {
1858  if (netif_msg_drv(&debug))
1859  pr_info(SIS190_DRIVER_NAME " loaded\n");
1860  printed_version = 1;
1861  }
1862 
1863  dev = sis190_init_board(pdev);
1864  if (IS_ERR(dev)) {
1865  rc = PTR_ERR(dev);
1866  goto out;
1867  }
1868 
1869  pci_set_drvdata(pdev, dev);
1870 
1871  tp = netdev_priv(dev);
1872  ioaddr = tp->mmio_addr;
1873 
1874  rc = sis190_get_mac_addr(pdev, dev);
1875  if (rc < 0)
1876  goto err_release_board;
1877 
1878  sis190_init_rxfilter(dev);
1879 
1880  INIT_WORK(&tp->phy_task, sis190_phy_task);
1881 
1882  dev->netdev_ops = &sis190_netdev_ops;
1883 
1884  SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1886 
1887  spin_lock_init(&tp->lock);
1888 
1889  rc = sis190_mii_probe(dev);
1890  if (rc < 0)
1891  goto err_release_board;
1892 
1893  rc = register_netdev(dev);
1894  if (rc < 0)
1895  goto err_remove_mii;
1896 
1897  if (netif_msg_probe(tp)) {
1898  netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1899  pci_name(pdev),
1900  sis_chip_info[ent->driver_data].name,
1901  ioaddr, pdev->irq, dev->dev_addr);
1902  netdev_info(dev, "%s mode.\n",
1903  (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1904  }
1905 
1906  netif_carrier_off(dev);
1907 
1908  sis190_set_speed_auto(dev);
1909 out:
1910  return rc;
1911 
1912 err_remove_mii:
1913  sis190_mii_remove(dev);
1914 err_release_board:
1915  sis190_release_board(pdev);
1916  goto out;
1917 }
1918 
1919 static void __devexit sis190_remove_one(struct pci_dev *pdev)
1920 {
1921  struct net_device *dev = pci_get_drvdata(pdev);
1922  struct sis190_private *tp = netdev_priv(dev);
1923 
1924  sis190_mii_remove(dev);
1925  cancel_work_sync(&tp->phy_task);
1926  unregister_netdev(dev);
1927  sis190_release_board(pdev);
1928  pci_set_drvdata(pdev, NULL);
1929 }
1930 
1931 static struct pci_driver sis190_pci_driver = {
1932  .name = DRV_NAME,
1933  .id_table = sis190_pci_tbl,
1934  .probe = sis190_init_one,
1935  .remove = __devexit_p(sis190_remove_one),
1936 };
1937 
1938 static int __init sis190_init_module(void)
1939 {
1940  return pci_register_driver(&sis190_pci_driver);
1941 }
1942 
1943 static void __exit sis190_cleanup_module(void)
1944 {
1945  pci_unregister_driver(&sis190_pci_driver);
1946 }
1947 
1948 module_init(sis190_init_module);
1949 module_exit(sis190_cleanup_module);