Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dl2k.c
Go to the documentation of this file.
1 /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
2 /*
3  Copyright (c) 2001, 2002 by D-Link Corporation
4  Written by Edward Peng.<[email protected]>
5  Created 03-May-2001, base on Linux' sundance.c.
6 
7  This program is free software; you can redistribute it and/or modify
8  it under the terms of the GNU General Public License as published by
9  the Free Software Foundation; either version 2 of the License, or
10  (at your option) any later version.
11 */
12 
13 #define DRV_NAME "DL2000/TC902x-based linux driver"
14 #define DRV_VERSION "v1.19"
15 #define DRV_RELDATE "2007/08/12"
16 #include "dl2k.h"
17 #include <linux/dma-mapping.h>
18 
19 #define dw32(reg, val) iowrite32(val, ioaddr + (reg))
20 #define dw16(reg, val) iowrite16(val, ioaddr + (reg))
21 #define dw8(reg, val) iowrite8(val, ioaddr + (reg))
22 #define dr32(reg) ioread32(ioaddr + (reg))
23 #define dr16(reg) ioread16(ioaddr + (reg))
24 #define dr8(reg) ioread8(ioaddr + (reg))
25 
26 static char version[] __devinitdata =
28 #define MAX_UNITS 8
29 static int mtu[MAX_UNITS];
30 static int vlan[MAX_UNITS];
31 static int jumbo[MAX_UNITS];
32 static char *media[MAX_UNITS];
33 static int tx_flow=-1;
34 static int rx_flow=-1;
35 static int copy_thresh;
36 static int rx_coalesce=10; /* Rx frame count each interrupt */
37 static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */
38 static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */
39 
40 
41 MODULE_AUTHOR ("Edward Peng");
42 MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
43 MODULE_LICENSE("GPL");
44 module_param_array(mtu, int, NULL, 0);
45 module_param_array(media, charp, NULL, 0);
46 module_param_array(vlan, int, NULL, 0);
47 module_param_array(jumbo, int, NULL, 0);
48 module_param(tx_flow, int, 0);
49 module_param(rx_flow, int, 0);
50 module_param(copy_thresh, int, 0);
51 module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */
52 module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */
53 module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
54 
55 
56 /* Enable the default interrupts */
57 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
58  UpdateStats | LinkEvent)
59 
60 static void dl2k_enable_int(struct netdev_private *np)
61 {
62  void __iomem *ioaddr = np->ioaddr;
63 
65 }
66 
67 static const int max_intrloop = 50;
68 static const int multicast_filter_limit = 0x40;
69 
70 static int rio_open (struct net_device *dev);
71 static void rio_timer (unsigned long data);
72 static void rio_tx_timeout (struct net_device *dev);
73 static void alloc_list (struct net_device *dev);
74 static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
75 static irqreturn_t rio_interrupt (int irq, void *dev_instance);
76 static void rio_free_tx (struct net_device *dev, int irq);
77 static void tx_error (struct net_device *dev, int tx_status);
78 static int receive_packet (struct net_device *dev);
79 static void rio_error (struct net_device *dev, int int_status);
80 static int change_mtu (struct net_device *dev, int new_mtu);
81 static void set_multicast (struct net_device *dev);
82 static struct net_device_stats *get_stats (struct net_device *dev);
83 static int clear_stats (struct net_device *dev);
84 static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
85 static int rio_close (struct net_device *dev);
86 static int find_miiphy (struct net_device *dev);
87 static int parse_eeprom (struct net_device *dev);
88 static int read_eeprom (struct netdev_private *, int eep_addr);
89 static int mii_wait_link (struct net_device *dev, int wait);
90 static int mii_set_media (struct net_device *dev);
91 static int mii_get_media (struct net_device *dev);
92 static int mii_set_media_pcs (struct net_device *dev);
93 static int mii_get_media_pcs (struct net_device *dev);
94 static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
95 static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
96  u16 data);
97 
98 static const struct ethtool_ops ethtool_ops;
99 
100 static const struct net_device_ops netdev_ops = {
101  .ndo_open = rio_open,
102  .ndo_start_xmit = start_xmit,
103  .ndo_stop = rio_close,
104  .ndo_get_stats = get_stats,
105  .ndo_validate_addr = eth_validate_addr,
106  .ndo_set_mac_address = eth_mac_addr,
107  .ndo_set_rx_mode = set_multicast,
108  .ndo_do_ioctl = rio_ioctl,
109  .ndo_tx_timeout = rio_tx_timeout,
110  .ndo_change_mtu = change_mtu,
111 };
112 
113 static int __devinit
114 rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
115 {
116  struct net_device *dev;
117  struct netdev_private *np;
118  static int card_idx;
119  int chip_idx = ent->driver_data;
120  int err, irq;
121  void __iomem *ioaddr;
122  static int version_printed;
123  void *ring_space;
125 
126  if (!version_printed++)
127  printk ("%s", version);
128 
129  err = pci_enable_device (pdev);
130  if (err)
131  return err;
132 
133  irq = pdev->irq;
134  err = pci_request_regions (pdev, "dl2k");
135  if (err)
136  goto err_out_disable;
137 
138  pci_set_master (pdev);
139 
140  err = -ENOMEM;
141 
142  dev = alloc_etherdev (sizeof (*np));
143  if (!dev)
144  goto err_out_res;
145  SET_NETDEV_DEV(dev, &pdev->dev);
146 
147  np = netdev_priv(dev);
148 
149  /* IO registers range. */
150  ioaddr = pci_iomap(pdev, 0, 0);
151  if (!ioaddr)
152  goto err_out_dev;
153  np->eeprom_addr = ioaddr;
154 
155 #ifdef MEM_MAPPING
156  /* MM registers range. */
157  ioaddr = pci_iomap(pdev, 1, 0);
158  if (!ioaddr)
159  goto err_out_iounmap;
160 #endif
161  np->ioaddr = ioaddr;
162  np->chip_id = chip_idx;
163  np->pdev = pdev;
164  spin_lock_init (&np->tx_lock);
165  spin_lock_init (&np->rx_lock);
166 
167  /* Parse manual configuration */
168  np->an_enable = 1;
169  np->tx_coalesce = 1;
170  if (card_idx < MAX_UNITS) {
171  if (media[card_idx] != NULL) {
172  np->an_enable = 0;
173  if (strcmp (media[card_idx], "auto") == 0 ||
174  strcmp (media[card_idx], "autosense") == 0 ||
175  strcmp (media[card_idx], "0") == 0 ) {
176  np->an_enable = 2;
177  } else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
178  strcmp (media[card_idx], "4") == 0) {
179  np->speed = 100;
180  np->full_duplex = 1;
181  } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
182  strcmp (media[card_idx], "3") == 0) {
183  np->speed = 100;
184  np->full_duplex = 0;
185  } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
186  strcmp (media[card_idx], "2") == 0) {
187  np->speed = 10;
188  np->full_duplex = 1;
189  } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
190  strcmp (media[card_idx], "1") == 0) {
191  np->speed = 10;
192  np->full_duplex = 0;
193  } else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
194  strcmp (media[card_idx], "6") == 0) {
195  np->speed=1000;
196  np->full_duplex=1;
197  } else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
198  strcmp (media[card_idx], "5") == 0) {
199  np->speed = 1000;
200  np->full_duplex = 0;
201  } else {
202  np->an_enable = 1;
203  }
204  }
205  if (jumbo[card_idx] != 0) {
206  np->jumbo = 1;
207  dev->mtu = MAX_JUMBO;
208  } else {
209  np->jumbo = 0;
210  if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
211  dev->mtu = mtu[card_idx];
212  }
213  np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
214  vlan[card_idx] : 0;
215  if (rx_coalesce > 0 && rx_timeout > 0) {
216  np->rx_coalesce = rx_coalesce;
217  np->rx_timeout = rx_timeout;
218  np->coalesce = 1;
219  }
220  np->tx_flow = (tx_flow == 0) ? 0 : 1;
221  np->rx_flow = (rx_flow == 0) ? 0 : 1;
222 
223  if (tx_coalesce < 1)
224  tx_coalesce = 1;
225  else if (tx_coalesce > TX_RING_SIZE-1)
227  }
228  dev->netdev_ops = &netdev_ops;
229  dev->watchdog_timeo = TX_TIMEOUT;
231 #if 0
232  dev->features = NETIF_F_IP_CSUM;
233 #endif
234  pci_set_drvdata (pdev, dev);
235 
236  ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
237  if (!ring_space)
238  goto err_out_iounmap;
239  np->tx_ring = ring_space;
240  np->tx_ring_dma = ring_dma;
241 
242  ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
243  if (!ring_space)
244  goto err_out_unmap_tx;
245  np->rx_ring = ring_space;
246  np->rx_ring_dma = ring_dma;
247 
248  /* Parse eeprom data */
249  parse_eeprom (dev);
250 
251  /* Find PHY address */
252  err = find_miiphy (dev);
253  if (err)
254  goto err_out_unmap_rx;
255 
256  /* Fiber device? */
257  np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
258  np->link_status = 0;
259  /* Set media and reset PHY */
260  if (np->phy_media) {
261  /* default Auto-Negotiation for fiber deivices */
262  if (np->an_enable == 2) {
263  np->an_enable = 1;
264  }
265  mii_set_media_pcs (dev);
266  } else {
267  /* Auto-Negotiation is mandatory for 1000BASE-T,
268  IEEE 802.3ab Annex 28D page 14 */
269  if (np->speed == 1000)
270  np->an_enable = 1;
271  mii_set_media (dev);
272  }
273 
274  err = register_netdev (dev);
275  if (err)
276  goto err_out_unmap_rx;
277 
278  card_idx++;
279 
280  printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
281  dev->name, np->name, dev->dev_addr, irq);
282  if (tx_coalesce > 1)
283  printk(KERN_INFO "tx_coalesce:\t%d packets\n",
284  tx_coalesce);
285  if (np->coalesce)
287  "rx_coalesce:\t%d packets\n"
288  "rx_timeout: \t%d ns\n",
289  np->rx_coalesce, np->rx_timeout*640);
290  if (np->vlan)
291  printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
292  return 0;
293 
294 err_out_unmap_rx:
296 err_out_unmap_tx:
298 err_out_iounmap:
299 #ifdef MEM_MAPPING
300  pci_iounmap(pdev, np->ioaddr);
301 #endif
302  pci_iounmap(pdev, np->eeprom_addr);
303 err_out_dev:
304  free_netdev (dev);
305 err_out_res:
306  pci_release_regions (pdev);
307 err_out_disable:
308  pci_disable_device (pdev);
309  return err;
310 }
311 
312 static int
313 find_miiphy (struct net_device *dev)
314 {
315  struct netdev_private *np = netdev_priv(dev);
316  int i, phy_found = 0;
317  np = netdev_priv(dev);
318  np->phy_addr = 1;
319 
320  for (i = 31; i >= 0; i--) {
321  int mii_status = mii_read (dev, i, 1);
322  if (mii_status != 0xffff && mii_status != 0x0000) {
323  np->phy_addr = i;
324  phy_found++;
325  }
326  }
327  if (!phy_found) {
328  printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
329  return -ENODEV;
330  }
331  return 0;
332 }
333 
334 static int
335 parse_eeprom (struct net_device *dev)
336 {
337  struct netdev_private *np = netdev_priv(dev);
338  void __iomem *ioaddr = np->ioaddr;
339  int i, j;
340  u8 sromdata[256];
341  u8 *psib;
342  u32 crc;
343  PSROM_t psrom = (PSROM_t) sromdata;
344 
345  int cid, next;
346 
347  for (i = 0; i < 128; i++)
348  ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
349 
350  if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
351  /* Check CRC */
352  crc = ~ether_crc_le (256 - 4, sromdata);
353  if (psrom->crc != cpu_to_le32(crc)) {
354  printk (KERN_ERR "%s: EEPROM data CRC error.\n",
355  dev->name);
356  return -1;
357  }
358  }
359 
360  /* Set MAC address */
361  for (i = 0; i < 6; i++)
362  dev->dev_addr[i] = psrom->mac_addr[i];
363 
364  if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
365  return 0;
366  }
367 
368  /* Parse Software Information Block */
369  i = 0x30;
370  psib = (u8 *) sromdata;
371  do {
372  cid = psib[i++];
373  next = psib[i++];
374  if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
375  printk (KERN_ERR "Cell data error\n");
376  return -1;
377  }
378  switch (cid) {
379  case 0: /* Format version */
380  break;
381  case 1: /* End of cell */
382  return 0;
383  case 2: /* Duplex Polarity */
384  np->duplex_polarity = psib[i];
385  dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
386  break;
387  case 3: /* Wake Polarity */
388  np->wake_polarity = psib[i];
389  break;
390  case 9: /* Adapter description */
391  j = (next - i > 255) ? 255 : next - i;
392  memcpy (np->name, &(psib[i]), j);
393  break;
394  case 4:
395  case 5:
396  case 6:
397  case 7:
398  case 8: /* Reversed */
399  break;
400  default: /* Unknown cell */
401  return -1;
402  }
403  i = next;
404  } while (1);
405 
406  return 0;
407 }
408 
409 static int
410 rio_open (struct net_device *dev)
411 {
412  struct netdev_private *np = netdev_priv(dev);
413  void __iomem *ioaddr = np->ioaddr;
414  const int irq = np->pdev->irq;
415  int i;
416  u16 macctrl;
417 
418  i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
419  if (i)
420  return i;
421 
422  /* Reset all logic functions */
423  dw16(ASICCtrl + 2,
425  mdelay(10);
426 
427  /* DebugCtrl bit 4, 5, 9 must set */
428  dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
429 
430  /* Jumbo frame */
431  if (np->jumbo != 0)
433 
434  alloc_list (dev);
435 
436  /* Get station address */
437  for (i = 0; i < 6; i++)
438  dw8(StationAddr0 + i, dev->dev_addr[i]);
439 
440  set_multicast (dev);
441  if (np->coalesce) {
442  dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
443  }
444  /* Set RIO to poll every N*320nsec. */
445  dw8(RxDMAPollPeriod, 0x20);
446  dw8(TxDMAPollPeriod, 0xff);
447  dw8(RxDMABurstThresh, 0x30);
448  dw8(RxDMAUrgentThresh, 0x30);
449  dw32(RmonStatMask, 0x0007ffff);
450  /* clear statistics */
451  clear_stats (dev);
452 
453  /* VLAN supported */
454  if (np->vlan) {
455  /* priority field in RxDMAIntCtrl */
456  dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
457  /* VLANId */
458  dw16(VLANId, np->vlan);
459  /* Length/Type should be 0x8100 */
460  dw32(VLANTag, 0x8100 << 16 | np->vlan);
461  /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
462  VLAN information tagged by TFC' VID, CFI fields. */
464  }
465 
466  init_timer (&np->timer);
467  np->timer.expires = jiffies + 1*HZ;
468  np->timer.data = (unsigned long) dev;
469  np->timer.function = rio_timer;
470  add_timer (&np->timer);
471 
472  /* Start Tx/Rx */
474 
475  macctrl = 0;
476  macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
477  macctrl |= (np->full_duplex) ? DuplexSelect : 0;
478  macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
479  macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
480  dw16(MACCtrl, macctrl);
481 
482  netif_start_queue (dev);
483 
484  dl2k_enable_int(np);
485  return 0;
486 }
487 
488 static void
489 rio_timer (unsigned long data)
490 {
491  struct net_device *dev = (struct net_device *)data;
492  struct netdev_private *np = netdev_priv(dev);
493  unsigned int entry;
494  int next_tick = 1*HZ;
495  unsigned long flags;
496 
497  spin_lock_irqsave(&np->rx_lock, flags);
498  /* Recover rx ring exhausted error */
499  if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
500  printk(KERN_INFO "Try to recover rx ring exhausted...\n");
501  /* Re-allocate skbuffs to fill the descriptor ring */
502  for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
503  struct sk_buff *skb;
504  entry = np->old_rx % RX_RING_SIZE;
505  /* Dropped packets don't need to re-allocate */
506  if (np->rx_skbuff[entry] == NULL) {
507  skb = netdev_alloc_skb_ip_align(dev,
508  np->rx_buf_sz);
509  if (skb == NULL) {
510  np->rx_ring[entry].fraginfo = 0;
512  "%s: Still unable to re-allocate Rx skbuff.#%d\n",
513  dev->name, entry);
514  break;
515  }
516  np->rx_skbuff[entry] = skb;
517  np->rx_ring[entry].fraginfo =
518  cpu_to_le64 (pci_map_single
519  (np->pdev, skb->data, np->rx_buf_sz,
521  }
522  np->rx_ring[entry].fraginfo |=
523  cpu_to_le64((u64)np->rx_buf_sz << 48);
524  np->rx_ring[entry].status = 0;
525  } /* end for */
526  } /* end if */
527  spin_unlock_irqrestore (&np->rx_lock, flags);
528  np->timer.expires = jiffies + next_tick;
529  add_timer(&np->timer);
530 }
531 
532 static void
533 rio_tx_timeout (struct net_device *dev)
534 {
535  struct netdev_private *np = netdev_priv(dev);
536  void __iomem *ioaddr = np->ioaddr;
537 
538  printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
539  dev->name, dr32(TxStatus));
540  rio_free_tx(dev, 0);
541  dev->if_port = 0;
542  dev->trans_start = jiffies; /* prevent tx timeout */
543 }
544 
545  /* allocate and initialize Tx and Rx descriptors */
546 static void
547 alloc_list (struct net_device *dev)
548 {
549  struct netdev_private *np = netdev_priv(dev);
550  void __iomem *ioaddr = np->ioaddr;
551  int i;
552 
553  np->cur_rx = np->cur_tx = 0;
554  np->old_rx = np->old_tx = 0;
555  np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
556 
557  /* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
558  for (i = 0; i < TX_RING_SIZE; i++) {
559  np->tx_skbuff[i] = NULL;
561  np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
562  ((i+1)%TX_RING_SIZE) *
563  sizeof (struct netdev_desc));
564  }
565 
566  /* Initialize Rx descriptors */
567  for (i = 0; i < RX_RING_SIZE; i++) {
568  np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
569  ((i + 1) % RX_RING_SIZE) *
570  sizeof (struct netdev_desc));
571  np->rx_ring[i].status = 0;
572  np->rx_ring[i].fraginfo = 0;
573  np->rx_skbuff[i] = NULL;
574  }
575 
576  /* Allocate the rx buffers */
577  for (i = 0; i < RX_RING_SIZE; i++) {
578  /* Allocated fixed size of skbuff */
579  struct sk_buff *skb;
580 
581  skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
582  np->rx_skbuff[i] = skb;
583  if (skb == NULL) {
585  "%s: alloc_list: allocate Rx buffer error! ",
586  dev->name);
587  break;
588  }
589  /* Rubicon now supports 40 bits of addressing space. */
590  np->rx_ring[i].fraginfo =
591  cpu_to_le64 ( pci_map_single (
592  np->pdev, skb->data, np->rx_buf_sz,
594  np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
595  }
596 
597  /* Set RFDListPtr */
599  dw32(RFDListPtr1, 0);
600 }
601 
602 static netdev_tx_t
603 start_xmit (struct sk_buff *skb, struct net_device *dev)
604 {
605  struct netdev_private *np = netdev_priv(dev);
606  void __iomem *ioaddr = np->ioaddr;
607  struct netdev_desc *txdesc;
608  unsigned entry;
609  u64 tfc_vlan_tag = 0;
610 
611  if (np->link_status == 0) { /* Link Down */
612  dev_kfree_skb(skb);
613  return NETDEV_TX_OK;
614  }
615  entry = np->cur_tx % TX_RING_SIZE;
616  np->tx_skbuff[entry] = skb;
617  txdesc = &np->tx_ring[entry];
618 
619 #if 0
620  if (skb->ip_summed == CHECKSUM_PARTIAL) {
621  txdesc->status |=
624  }
625 #endif
626  if (np->vlan) {
627  tfc_vlan_tag = VLANTagInsert |
628  ((u64)np->vlan << 32) |
629  ((u64)skb->priority << 45);
630  }
631  txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
632  skb->len,
634  txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
635 
636  /* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
637  * Work around: Always use 1 descriptor in 10Mbps mode */
638  if (entry % np->tx_coalesce == 0 || np->speed == 10)
639  txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
641  TxDMAIndicate |
642  (1 << FragCountShift));
643  else
644  txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
646  (1 << FragCountShift));
647 
648  /* TxDMAPollNow */
649  dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
650  /* Schedule ISR */
651  dw32(CountDown, 10000);
652  np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
653  if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
654  < TX_QUEUE_LEN - 1 && np->speed != 10) {
655  /* do nothing */
656  } else if (!netif_queue_stopped(dev)) {
657  netif_stop_queue (dev);
658  }
659 
660  /* The first TFDListPtr */
661  if (!dr32(TFDListPtr0)) {
663  entry * sizeof (struct netdev_desc));
664  dw32(TFDListPtr1, 0);
665  }
666 
667  return NETDEV_TX_OK;
668 }
669 
670 static irqreturn_t
671 rio_interrupt (int irq, void *dev_instance)
672 {
673  struct net_device *dev = dev_instance;
674  struct netdev_private *np = netdev_priv(dev);
675  void __iomem *ioaddr = np->ioaddr;
676  unsigned int_status;
677  int cnt = max_intrloop;
678  int handled = 0;
679 
680  while (1) {
681  int_status = dr16(IntStatus);
682  dw16(IntStatus, int_status);
683  int_status &= DEFAULT_INTR;
684  if (int_status == 0 || --cnt < 0)
685  break;
686  handled = 1;
687  /* Processing received packets */
688  if (int_status & RxDMAComplete)
689  receive_packet (dev);
690  /* TxDMAComplete interrupt */
691  if ((int_status & (TxDMAComplete|IntRequested))) {
692  int tx_status;
693  tx_status = dr32(TxStatus);
694  if (tx_status & 0x01)
695  tx_error (dev, tx_status);
696  /* Free used tx skbuffs */
697  rio_free_tx (dev, 1);
698  }
699 
700  /* Handle uncommon events */
701  if (int_status &
703  rio_error (dev, int_status);
704  }
705  if (np->cur_tx != np->old_tx)
706  dw32(CountDown, 100);
707  return IRQ_RETVAL(handled);
708 }
709 
710 static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
711 {
712  return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
713 }
714 
715 static void
716 rio_free_tx (struct net_device *dev, int irq)
717 {
718  struct netdev_private *np = netdev_priv(dev);
719  int entry = np->old_tx % TX_RING_SIZE;
720  int tx_use = 0;
721  unsigned long flag = 0;
722 
723  if (irq)
724  spin_lock(&np->tx_lock);
725  else
726  spin_lock_irqsave(&np->tx_lock, flag);
727 
728  /* Free used tx skbuffs */
729  while (entry != np->cur_tx) {
730  struct sk_buff *skb;
731 
732  if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
733  break;
734  skb = np->tx_skbuff[entry];
735  pci_unmap_single (np->pdev,
736  desc_to_dma(&np->tx_ring[entry]),
737  skb->len, PCI_DMA_TODEVICE);
738  if (irq)
739  dev_kfree_skb_irq (skb);
740  else
741  dev_kfree_skb (skb);
742 
743  np->tx_skbuff[entry] = NULL;
744  entry = (entry + 1) % TX_RING_SIZE;
745  tx_use++;
746  }
747  if (irq)
748  spin_unlock(&np->tx_lock);
749  else
750  spin_unlock_irqrestore(&np->tx_lock, flag);
751  np->old_tx = entry;
752 
753  /* If the ring is no longer full, clear tx_full and
754  call netif_wake_queue() */
755 
756  if (netif_queue_stopped(dev) &&
757  ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
758  < TX_QUEUE_LEN - 1 || np->speed == 10)) {
759  netif_wake_queue (dev);
760  }
761 }
762 
763 static void
764 tx_error (struct net_device *dev, int tx_status)
765 {
766  struct netdev_private *np = netdev_priv(dev);
767  void __iomem *ioaddr = np->ioaddr;
768  int frame_id;
769  int i;
770 
771  frame_id = (tx_status & 0xffff0000);
772  printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
773  dev->name, tx_status, frame_id);
774  np->stats.tx_errors++;
775  /* Ttransmit Underrun */
776  if (tx_status & 0x10) {
777  np->stats.tx_fifo_errors++;
779  /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
780  dw16(ASICCtrl + 2,
782  /* Wait for ResetBusy bit clear */
783  for (i = 50; i > 0; i--) {
784  if (!(dr16(ASICCtrl + 2) & ResetBusy))
785  break;
786  mdelay (1);
787  }
788  rio_free_tx (dev, 1);
789  /* Reset TFDListPtr */
791  np->old_tx * sizeof (struct netdev_desc));
792  dw32(TFDListPtr1, 0);
793 
794  /* Let TxStartThresh stay default value */
795  }
796  /* Late Collision */
797  if (tx_status & 0x04) {
798  np->stats.tx_fifo_errors++;
799  /* TxReset and clear FIFO */
800  dw16(ASICCtrl + 2, TxReset | FIFOReset);
801  /* Wait reset done */
802  for (i = 50; i > 0; i--) {
803  if (!(dr16(ASICCtrl + 2) & ResetBusy))
804  break;
805  mdelay (1);
806  }
807  /* Let TxStartThresh stay default value */
808  }
809  /* Maximum Collisions */
810 #ifdef ETHER_STATS
811  if (tx_status & 0x08)
812  np->stats.collisions16++;
813 #else
814  if (tx_status & 0x08)
815  np->stats.collisions++;
816 #endif
817  /* Restart the Tx */
819 }
820 
821 static int
822 receive_packet (struct net_device *dev)
823 {
824  struct netdev_private *np = netdev_priv(dev);
825  int entry = np->cur_rx % RX_RING_SIZE;
826  int cnt = 30;
827 
828  /* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
829  while (1) {
830  struct netdev_desc *desc = &np->rx_ring[entry];
831  int pkt_len;
833 
834  if (!(desc->status & cpu_to_le64(RFDDone)) ||
835  !(desc->status & cpu_to_le64(FrameStart)) ||
836  !(desc->status & cpu_to_le64(FrameEnd)))
837  break;
838 
839  /* Chip omits the CRC. */
840  frame_status = le64_to_cpu(desc->status);
841  pkt_len = frame_status & 0xffff;
842  if (--cnt < 0)
843  break;
844  /* Update rx error statistics, drop packet. */
845  if (frame_status & RFS_Errors) {
846  np->stats.rx_errors++;
847  if (frame_status & (RxRuntFrame | RxLengthError))
848  np->stats.rx_length_errors++;
849  if (frame_status & RxFCSError)
850  np->stats.rx_crc_errors++;
851  if (frame_status & RxAlignmentError && np->speed != 1000)
852  np->stats.rx_frame_errors++;
853  if (frame_status & RxFIFOOverrun)
854  np->stats.rx_fifo_errors++;
855  } else {
856  struct sk_buff *skb;
857 
858  /* Small skbuffs for short packets */
859  if (pkt_len > copy_thresh) {
860  pci_unmap_single (np->pdev,
861  desc_to_dma(desc),
862  np->rx_buf_sz,
864  skb_put (skb = np->rx_skbuff[entry], pkt_len);
865  np->rx_skbuff[entry] = NULL;
866  } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
867  pci_dma_sync_single_for_cpu(np->pdev,
868  desc_to_dma(desc),
869  np->rx_buf_sz,
871  skb_copy_to_linear_data (skb,
872  np->rx_skbuff[entry]->data,
873  pkt_len);
874  skb_put (skb, pkt_len);
875  pci_dma_sync_single_for_device(np->pdev,
876  desc_to_dma(desc),
877  np->rx_buf_sz,
879  }
880  skb->protocol = eth_type_trans (skb, dev);
881 #if 0
882  /* Checksum done by hw, but csum value unavailable. */
883  if (np->pdev->pci_rev_id >= 0x0c &&
884  !(frame_status & (TCPError | UDPError | IPError))) {
886  }
887 #endif
888  netif_rx (skb);
889  }
890  entry = (entry + 1) % RX_RING_SIZE;
891  }
892  spin_lock(&np->rx_lock);
893  np->cur_rx = entry;
894  /* Re-allocate skbuffs to fill the descriptor ring */
895  entry = np->old_rx;
896  while (entry != np->cur_rx) {
897  struct sk_buff *skb;
898  /* Dropped packets don't need to re-allocate */
899  if (np->rx_skbuff[entry] == NULL) {
900  skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
901  if (skb == NULL) {
902  np->rx_ring[entry].fraginfo = 0;
904  "%s: receive_packet: "
905  "Unable to re-allocate Rx skbuff.#%d\n",
906  dev->name, entry);
907  break;
908  }
909  np->rx_skbuff[entry] = skb;
910  np->rx_ring[entry].fraginfo =
911  cpu_to_le64 (pci_map_single
912  (np->pdev, skb->data, np->rx_buf_sz,
914  }
915  np->rx_ring[entry].fraginfo |=
916  cpu_to_le64((u64)np->rx_buf_sz << 48);
917  np->rx_ring[entry].status = 0;
918  entry = (entry + 1) % RX_RING_SIZE;
919  }
920  np->old_rx = entry;
921  spin_unlock(&np->rx_lock);
922  return 0;
923 }
924 
925 static void
926 rio_error (struct net_device *dev, int int_status)
927 {
928  struct netdev_private *np = netdev_priv(dev);
929  void __iomem *ioaddr = np->ioaddr;
930  u16 macctrl;
931 
932  /* Link change event */
933  if (int_status & LinkEvent) {
934  if (mii_wait_link (dev, 10) == 0) {
935  printk (KERN_INFO "%s: Link up\n", dev->name);
936  if (np->phy_media)
937  mii_get_media_pcs (dev);
938  else
939  mii_get_media (dev);
940  if (np->speed == 1000)
941  np->tx_coalesce = tx_coalesce;
942  else
943  np->tx_coalesce = 1;
944  macctrl = 0;
945  macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
946  macctrl |= (np->full_duplex) ? DuplexSelect : 0;
947  macctrl |= (np->tx_flow) ?
949  macctrl |= (np->rx_flow) ?
951  dw16(MACCtrl, macctrl);
952  np->link_status = 1;
953  netif_carrier_on(dev);
954  } else {
955  printk (KERN_INFO "%s: Link off\n", dev->name);
956  np->link_status = 0;
957  netif_carrier_off(dev);
958  }
959  }
960 
961  /* UpdateStats statistics registers */
962  if (int_status & UpdateStats) {
963  get_stats (dev);
964  }
965 
966  /* PCI Error, a catastronphic error related to the bus interface
967  occurs, set GlobalReset and HostReset to reset. */
968  if (int_status & HostError) {
969  printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
970  dev->name, int_status);
972  mdelay (500);
973  }
974 }
975 
976 static struct net_device_stats *
977 get_stats (struct net_device *dev)
978 {
979  struct netdev_private *np = netdev_priv(dev);
980  void __iomem *ioaddr = np->ioaddr;
981 #ifdef MEM_MAPPING
982  int i;
983 #endif
984  unsigned int stat_reg;
985 
986  /* All statistics registers need to be acknowledged,
987  else statistic overflow could cause problems */
988 
989  np->stats.rx_packets += dr32(FramesRcvOk);
990  np->stats.tx_packets += dr32(FramesXmtOk);
991  np->stats.rx_bytes += dr32(OctetRcvOk);
992  np->stats.tx_bytes += dr32(OctetXmtOk);
993 
994  np->stats.multicast = dr32(McstFramesRcvdOk);
995  np->stats.collisions += dr32(SingleColFrames)
996  + dr32(MultiColFrames);
997 
998  /* detailed tx errors */
999  stat_reg = dr16(FramesAbortXSColls);
1000  np->stats.tx_aborted_errors += stat_reg;
1001  np->stats.tx_errors += stat_reg;
1002 
1003  stat_reg = dr16(CarrierSenseErrors);
1004  np->stats.tx_carrier_errors += stat_reg;
1005  np->stats.tx_errors += stat_reg;
1006 
1007  /* Clear all other statistic register. */
1025 
1026 #ifdef MEM_MAPPING
1027  for (i = 0x100; i <= 0x150; i += 4)
1028  dr32(i);
1029 #endif
1035  return &np->stats;
1036 }
1037 
1038 static int
1039 clear_stats (struct net_device *dev)
1040 {
1041  struct netdev_private *np = netdev_priv(dev);
1042  void __iomem *ioaddr = np->ioaddr;
1043 #ifdef MEM_MAPPING
1044  int i;
1045 #endif
1046 
1047  /* All statistics registers need to be acknowledged,
1048  else statistic overflow could cause problems */
1049  dr32(FramesRcvOk);
1050  dr32(FramesXmtOk);
1051  dr32(OctetRcvOk);
1052  dr32(OctetXmtOk);
1053 
1058  /* detailed rx errors */
1063 
1064  /* detailed tx errors */
1067 
1068  /* Clear all other statistic register. */
1081 #ifdef MEM_MAPPING
1082  for (i = 0x100; i <= 0x150; i += 4)
1083  dr32(i);
1084 #endif
1090  return 0;
1091 }
1092 
1093 
1094 static int
1095 change_mtu (struct net_device *dev, int new_mtu)
1096 {
1097  struct netdev_private *np = netdev_priv(dev);
1098  int max = (np->jumbo) ? MAX_JUMBO : 1536;
1099 
1100  if ((new_mtu < 68) || (new_mtu > max)) {
1101  return -EINVAL;
1102  }
1103 
1104  dev->mtu = new_mtu;
1105 
1106  return 0;
1107 }
1108 
1109 static void
1110 set_multicast (struct net_device *dev)
1111 {
1112  struct netdev_private *np = netdev_priv(dev);
1113  void __iomem *ioaddr = np->ioaddr;
1114  u32 hash_table[2];
1115  u16 rx_mode = 0;
1116 
1117  hash_table[0] = hash_table[1] = 0;
1118  /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
1119  hash_table[1] |= 0x02000000;
1120  if (dev->flags & IFF_PROMISC) {
1121  /* Receive all frames promiscuously. */
1122  rx_mode = ReceiveAllFrames;
1123  } else if ((dev->flags & IFF_ALLMULTI) ||
1124  (netdev_mc_count(dev) > multicast_filter_limit)) {
1125  /* Receive broadcast and multicast frames */
1127  } else if (!netdev_mc_empty(dev)) {
1128  struct netdev_hw_addr *ha;
1129  /* Receive broadcast frames and multicast frames filtering
1130  by Hashtable */
1131  rx_mode =
1133  netdev_for_each_mc_addr(ha, dev) {
1134  int bit, index = 0;
1135  int crc = ether_crc_le(ETH_ALEN, ha->addr);
1136  /* The inverted high significant 6 bits of CRC are
1137  used as an index to hashtable */
1138  for (bit = 0; bit < 6; bit++)
1139  if (crc & (1 << (31 - bit)))
1140  index |= (1 << bit);
1141  hash_table[index / 32] |= (1 << (index % 32));
1142  }
1143  } else {
1144  rx_mode = ReceiveBroadcast | ReceiveUnicast;
1145  }
1146  if (np->vlan) {
1147  /* ReceiveVLANMatch field in ReceiveMode */
1148  rx_mode |= ReceiveVLANMatch;
1149  }
1150 
1151  dw32(HashTable0, hash_table[0]);
1152  dw32(HashTable1, hash_table[1]);
1153  dw16(ReceiveMode, rx_mode);
1154 }
1155 
1156 static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1157 {
1158  struct netdev_private *np = netdev_priv(dev);
1159  strcpy(info->driver, "dl2k");
1160  strcpy(info->version, DRV_VERSION);
1161  strcpy(info->bus_info, pci_name(np->pdev));
1162 }
1163 
1164 static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1165 {
1166  struct netdev_private *np = netdev_priv(dev);
1167  if (np->phy_media) {
1168  /* fiber device */
1171  cmd->port = PORT_FIBRE;
1172  cmd->transceiver = XCVR_INTERNAL;
1173  } else {
1174  /* copper device */
1183  cmd->port = PORT_MII;
1184  cmd->transceiver = XCVR_INTERNAL;
1185  }
1186  if ( np->link_status ) {
1187  ethtool_cmd_speed_set(cmd, np->speed);
1188  cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1189  } else {
1190  ethtool_cmd_speed_set(cmd, -1);
1191  cmd->duplex = -1;
1192  }
1193  if ( np->an_enable)
1194  cmd->autoneg = AUTONEG_ENABLE;
1195  else
1196  cmd->autoneg = AUTONEG_DISABLE;
1197 
1198  cmd->phy_address = np->phy_addr;
1199  return 0;
1200 }
1201 
1202 static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1203 {
1204  struct netdev_private *np = netdev_priv(dev);
1205  netif_carrier_off(dev);
1206  if (cmd->autoneg == AUTONEG_ENABLE) {
1207  if (np->an_enable)
1208  return 0;
1209  else {
1210  np->an_enable = 1;
1211  mii_set_media(dev);
1212  return 0;
1213  }
1214  } else {
1215  np->an_enable = 0;
1216  if (np->speed == 1000) {
1217  ethtool_cmd_speed_set(cmd, SPEED_100);
1218  cmd->duplex = DUPLEX_FULL;
1219  printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
1220  }
1221  switch (ethtool_cmd_speed(cmd)) {
1222  case SPEED_10:
1223  np->speed = 10;
1224  np->full_duplex = (cmd->duplex == DUPLEX_FULL);
1225  break;
1226  case SPEED_100:
1227  np->speed = 100;
1228  np->full_duplex = (cmd->duplex == DUPLEX_FULL);
1229  break;
1230  case SPEED_1000: /* not supported */
1231  default:
1232  return -EINVAL;
1233  }
1234  mii_set_media(dev);
1235  }
1236  return 0;
1237 }
1238 
1239 static u32 rio_get_link(struct net_device *dev)
1240 {
1241  struct netdev_private *np = netdev_priv(dev);
1242  return np->link_status;
1243 }
1244 
1245 static const struct ethtool_ops ethtool_ops = {
1246  .get_drvinfo = rio_get_drvinfo,
1247  .get_settings = rio_get_settings,
1248  .set_settings = rio_set_settings,
1249  .get_link = rio_get_link,
1250 };
1251 
1252 static int
1253 rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1254 {
1255  int phy_addr;
1256  struct netdev_private *np = netdev_priv(dev);
1257  struct mii_ioctl_data *miidata = if_mii(rq);
1258 
1259  phy_addr = np->phy_addr;
1260  switch (cmd) {
1261  case SIOCGMIIPHY:
1262  miidata->phy_id = phy_addr;
1263  break;
1264  case SIOCGMIIREG:
1265  miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
1266  break;
1267  case SIOCSMIIREG:
1268  if (!capable(CAP_NET_ADMIN))
1269  return -EPERM;
1270  mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
1271  break;
1272  default:
1273  return -EOPNOTSUPP;
1274  }
1275  return 0;
1276 }
1277 
1278 #define EEP_READ 0x0200
1279 #define EEP_BUSY 0x8000
1280 /* Read the EEPROM word */
1281 /* We use I/O instruction to read/write eeprom to avoid fail on some machines */
1282 static int read_eeprom(struct netdev_private *np, int eep_addr)
1283 {
1284  void __iomem *ioaddr = np->eeprom_addr;
1285  int i = 1000;
1286 
1287  dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
1288  while (i-- > 0) {
1289  if (!(dr16(EepromCtrl) & EEP_BUSY))
1290  return dr16(EepromData);
1291  }
1292  return 0;
1293 }
1294 
1296  MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
1297  MII_DUPLEX = 0x08,
1298 };
1299 
1300 #define mii_delay() dr8(PhyCtrl)
1301 static void
1302 mii_sendbit (struct net_device *dev, u32 data)
1303 {
1304  struct netdev_private *np = netdev_priv(dev);
1305  void __iomem *ioaddr = np->ioaddr;
1306 
1307  data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
1308  dw8(PhyCtrl, data);
1309  mii_delay ();
1310  dw8(PhyCtrl, data | MII_CLK);
1311  mii_delay ();
1312 }
1313 
1314 static int
1315 mii_getbit (struct net_device *dev)
1316 {
1317  struct netdev_private *np = netdev_priv(dev);
1318  void __iomem *ioaddr = np->ioaddr;
1319  u8 data;
1320 
1321  data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
1322  dw8(PhyCtrl, data);
1323  mii_delay ();
1324  dw8(PhyCtrl, data | MII_CLK);
1325  mii_delay ();
1326  return (dr8(PhyCtrl) >> 1) & 1;
1327 }
1328 
1329 static void
1330 mii_send_bits (struct net_device *dev, u32 data, int len)
1331 {
1332  int i;
1333 
1334  for (i = len - 1; i >= 0; i--) {
1335  mii_sendbit (dev, data & (1 << i));
1336  }
1337 }
1338 
1339 static int
1340 mii_read (struct net_device *dev, int phy_addr, int reg_num)
1341 {
1342  u32 cmd;
1343  int i;
1344  u32 retval = 0;
1345 
1346  /* Preamble */
1347  mii_send_bits (dev, 0xffffffff, 32);
1348  /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1349  /* ST,OP = 0110'b for read operation */
1350  cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
1351  mii_send_bits (dev, cmd, 14);
1352  /* Turnaround */
1353  if (mii_getbit (dev))
1354  goto err_out;
1355  /* Read data */
1356  for (i = 0; i < 16; i++) {
1357  retval |= mii_getbit (dev);
1358  retval <<= 1;
1359  }
1360  /* End cycle */
1361  mii_getbit (dev);
1362  return (retval >> 1) & 0xffff;
1363 
1364  err_out:
1365  return 0;
1366 }
1367 static int
1368 mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
1369 {
1370  u32 cmd;
1371 
1372  /* Preamble */
1373  mii_send_bits (dev, 0xffffffff, 32);
1374  /* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1375  /* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1376  cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
1377  mii_send_bits (dev, cmd, 32);
1378  /* End cycle */
1379  mii_getbit (dev);
1380  return 0;
1381 }
1382 static int
1383 mii_wait_link (struct net_device *dev, int wait)
1384 {
1385  __u16 bmsr;
1386  int phy_addr;
1387  struct netdev_private *np;
1388 
1389  np = netdev_priv(dev);
1390  phy_addr = np->phy_addr;
1391 
1392  do {
1393  bmsr = mii_read (dev, phy_addr, MII_BMSR);
1394  if (bmsr & BMSR_LSTATUS)
1395  return 0;
1396  mdelay (1);
1397  } while (--wait > 0);
1398  return -1;
1399 }
1400 static int
1401 mii_get_media (struct net_device *dev)
1402 {
1403  __u16 negotiate;
1404  __u16 bmsr;
1405  __u16 mscr;
1406  __u16 mssr;
1407  int phy_addr;
1408  struct netdev_private *np;
1409 
1410  np = netdev_priv(dev);
1411  phy_addr = np->phy_addr;
1412 
1413  bmsr = mii_read (dev, phy_addr, MII_BMSR);
1414  if (np->an_enable) {
1415  if (!(bmsr & BMSR_ANEGCOMPLETE)) {
1416  /* Auto-Negotiation not completed */
1417  return -1;
1418  }
1419  negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) &
1420  mii_read (dev, phy_addr, MII_LPA);
1421  mscr = mii_read (dev, phy_addr, MII_CTRL1000);
1422  mssr = mii_read (dev, phy_addr, MII_STAT1000);
1423  if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) {
1424  np->speed = 1000;
1425  np->full_duplex = 1;
1426  printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1427  } else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) {
1428  np->speed = 1000;
1429  np->full_duplex = 0;
1430  printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
1431  } else if (negotiate & ADVERTISE_100FULL) {
1432  np->speed = 100;
1433  np->full_duplex = 1;
1434  printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
1435  } else if (negotiate & ADVERTISE_100HALF) {
1436  np->speed = 100;
1437  np->full_duplex = 0;
1438  printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
1439  } else if (negotiate & ADVERTISE_10FULL) {
1440  np->speed = 10;
1441  np->full_duplex = 1;
1442  printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
1443  } else if (negotiate & ADVERTISE_10HALF) {
1444  np->speed = 10;
1445  np->full_duplex = 0;
1446  printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
1447  }
1448  if (negotiate & ADVERTISE_PAUSE_CAP) {
1449  np->tx_flow &= 1;
1450  np->rx_flow &= 1;
1451  } else if (negotiate & ADVERTISE_PAUSE_ASYM) {
1452  np->tx_flow = 0;
1453  np->rx_flow &= 1;
1454  }
1455  /* else tx_flow, rx_flow = user select */
1456  } else {
1457  __u16 bmcr = mii_read (dev, phy_addr, MII_BMCR);
1458  switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) {
1459  case BMCR_SPEED1000:
1460  printk (KERN_INFO "Operating at 1000 Mbps, ");
1461  break;
1462  case BMCR_SPEED100:
1463  printk (KERN_INFO "Operating at 100 Mbps, ");
1464  break;
1465  case 0:
1466  printk (KERN_INFO "Operating at 10 Mbps, ");
1467  }
1468  if (bmcr & BMCR_FULLDPLX) {
1469  printk (KERN_CONT "Full duplex\n");
1470  } else {
1471  printk (KERN_CONT "Half duplex\n");
1472  }
1473  }
1474  if (np->tx_flow)
1475  printk(KERN_INFO "Enable Tx Flow Control\n");
1476  else
1477  printk(KERN_INFO "Disable Tx Flow Control\n");
1478  if (np->rx_flow)
1479  printk(KERN_INFO "Enable Rx Flow Control\n");
1480  else
1481  printk(KERN_INFO "Disable Rx Flow Control\n");
1482 
1483  return 0;
1484 }
1485 
1486 static int
1487 mii_set_media (struct net_device *dev)
1488 {
1489  __u16 pscr;
1490  __u16 bmcr;
1491  __u16 bmsr;
1492  __u16 anar;
1493  int phy_addr;
1494  struct netdev_private *np;
1495  np = netdev_priv(dev);
1496  phy_addr = np->phy_addr;
1497 
1498  /* Does user set speed? */
1499  if (np->an_enable) {
1500  /* Advertise capabilities */
1501  bmsr = mii_read (dev, phy_addr, MII_BMSR);
1502  anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
1503  ~(ADVERTISE_100FULL | ADVERTISE_10FULL |
1504  ADVERTISE_100HALF | ADVERTISE_10HALF |
1506  if (bmsr & BMSR_100FULL)
1507  anar |= ADVERTISE_100FULL;
1508  if (bmsr & BMSR_100HALF)
1509  anar |= ADVERTISE_100HALF;
1510  if (bmsr & BMSR_100BASE4)
1511  anar |= ADVERTISE_100BASE4;
1512  if (bmsr & BMSR_10FULL)
1513  anar |= ADVERTISE_10FULL;
1514  if (bmsr & BMSR_10HALF)
1515  anar |= ADVERTISE_10HALF;
1516  anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1517  mii_write (dev, phy_addr, MII_ADVERTISE, anar);
1518 
1519  /* Enable Auto crossover */
1520  pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1521  pscr |= 3 << 5; /* 11'b */
1522  mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1523 
1524  /* Soft reset PHY */
1525  mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
1527  mii_write (dev, phy_addr, MII_BMCR, bmcr);
1528  mdelay(1);
1529  } else {
1530  /* Force speed setting */
1531  /* 1) Disable Auto crossover */
1532  pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
1533  pscr &= ~(3 << 5);
1534  mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
1535 
1536  /* 2) PHY Reset */
1537  bmcr = mii_read (dev, phy_addr, MII_BMCR);
1538  bmcr |= BMCR_RESET;
1539  mii_write (dev, phy_addr, MII_BMCR, bmcr);
1540 
1541  /* 3) Power Down */
1542  bmcr = 0x1940; /* must be 0x1940 */
1543  mii_write (dev, phy_addr, MII_BMCR, bmcr);
1544  mdelay (100); /* wait a certain time */
1545 
1546  /* 4) Advertise nothing */
1547  mii_write (dev, phy_addr, MII_ADVERTISE, 0);
1548 
1549  /* 5) Set media and Power Up */
1550  bmcr = BMCR_PDOWN;
1551  if (np->speed == 100) {
1552  bmcr |= BMCR_SPEED100;
1553  printk (KERN_INFO "Manual 100 Mbps, ");
1554  } else if (np->speed == 10) {
1555  printk (KERN_INFO "Manual 10 Mbps, ");
1556  }
1557  if (np->full_duplex) {
1558  bmcr |= BMCR_FULLDPLX;
1559  printk (KERN_CONT "Full duplex\n");
1560  } else {
1561  printk (KERN_CONT "Half duplex\n");
1562  }
1563 #if 0
1564  /* Set 1000BaseT Master/Slave setting */
1565  mscr = mii_read (dev, phy_addr, MII_CTRL1000);
1566  mscr |= MII_MSCR_CFG_ENABLE;
1567  mscr &= ~MII_MSCR_CFG_VALUE = 0;
1568 #endif
1569  mii_write (dev, phy_addr, MII_BMCR, bmcr);
1570  mdelay(10);
1571  }
1572  return 0;
1573 }
1574 
1575 static int
1576 mii_get_media_pcs (struct net_device *dev)
1577 {
1578  __u16 negotiate;
1579  __u16 bmsr;
1580  int phy_addr;
1581  struct netdev_private *np;
1582 
1583  np = netdev_priv(dev);
1584  phy_addr = np->phy_addr;
1585 
1586  bmsr = mii_read (dev, phy_addr, PCS_BMSR);
1587  if (np->an_enable) {
1588  if (!(bmsr & BMSR_ANEGCOMPLETE)) {
1589  /* Auto-Negotiation not completed */
1590  return -1;
1591  }
1592  negotiate = mii_read (dev, phy_addr, PCS_ANAR) &
1593  mii_read (dev, phy_addr, PCS_ANLPAR);
1594  np->speed = 1000;
1595  if (negotiate & PCS_ANAR_FULL_DUPLEX) {
1596  printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
1597  np->full_duplex = 1;
1598  } else {
1599  printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
1600  np->full_duplex = 0;
1601  }
1602  if (negotiate & PCS_ANAR_PAUSE) {
1603  np->tx_flow &= 1;
1604  np->rx_flow &= 1;
1605  } else if (negotiate & PCS_ANAR_ASYMMETRIC) {
1606  np->tx_flow = 0;
1607  np->rx_flow &= 1;
1608  }
1609  /* else tx_flow, rx_flow = user select */
1610  } else {
1611  __u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
1612  printk (KERN_INFO "Operating at 1000 Mbps, ");
1613  if (bmcr & BMCR_FULLDPLX) {
1614  printk (KERN_CONT "Full duplex\n");
1615  } else {
1616  printk (KERN_CONT "Half duplex\n");
1617  }
1618  }
1619  if (np->tx_flow)
1620  printk(KERN_INFO "Enable Tx Flow Control\n");
1621  else
1622  printk(KERN_INFO "Disable Tx Flow Control\n");
1623  if (np->rx_flow)
1624  printk(KERN_INFO "Enable Rx Flow Control\n");
1625  else
1626  printk(KERN_INFO "Disable Rx Flow Control\n");
1627 
1628  return 0;
1629 }
1630 
1631 static int
1632 mii_set_media_pcs (struct net_device *dev)
1633 {
1634  __u16 bmcr;
1635  __u16 esr;
1636  __u16 anar;
1637  int phy_addr;
1638  struct netdev_private *np;
1639  np = netdev_priv(dev);
1640  phy_addr = np->phy_addr;
1641 
1642  /* Auto-Negotiation? */
1643  if (np->an_enable) {
1644  /* Advertise capabilities */
1645  esr = mii_read (dev, phy_addr, PCS_ESR);
1646  anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
1648  ~PCS_ANAR_FULL_DUPLEX;
1649  if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
1650  anar |= PCS_ANAR_HALF_DUPLEX;
1651  if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
1652  anar |= PCS_ANAR_FULL_DUPLEX;
1653  anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
1654  mii_write (dev, phy_addr, MII_ADVERTISE, anar);
1655 
1656  /* Soft reset PHY */
1657  mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
1659  mii_write (dev, phy_addr, MII_BMCR, bmcr);
1660  mdelay(1);
1661  } else {
1662  /* Force speed setting */
1663  /* PHY Reset */
1664  bmcr = BMCR_RESET;
1665  mii_write (dev, phy_addr, MII_BMCR, bmcr);
1666  mdelay(10);
1667  if (np->full_duplex) {
1668  bmcr = BMCR_FULLDPLX;
1669  printk (KERN_INFO "Manual full duplex\n");
1670  } else {
1671  bmcr = 0;
1672  printk (KERN_INFO "Manual half duplex\n");
1673  }
1674  mii_write (dev, phy_addr, MII_BMCR, bmcr);
1675  mdelay(10);
1676 
1677  /* Advertise nothing */
1678  mii_write (dev, phy_addr, MII_ADVERTISE, 0);
1679  }
1680  return 0;
1681 }
1682 
1683 
1684 static int
1685 rio_close (struct net_device *dev)
1686 {
1687  struct netdev_private *np = netdev_priv(dev);
1688  void __iomem *ioaddr = np->ioaddr;
1689 
1690  struct pci_dev *pdev = np->pdev;
1691  struct sk_buff *skb;
1692  int i;
1693 
1694  netif_stop_queue (dev);
1695 
1696  /* Disable interrupts */
1697  dw16(IntEnable, 0);
1698 
1699  /* Stop Tx and Rx logics */
1701 
1702  free_irq(pdev->irq, dev);
1703  del_timer_sync (&np->timer);
1704 
1705  /* Free all the skbuffs in the queue. */
1706  for (i = 0; i < RX_RING_SIZE; i++) {
1707  skb = np->rx_skbuff[i];
1708  if (skb) {
1709  pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
1710  skb->len, PCI_DMA_FROMDEVICE);
1711  dev_kfree_skb (skb);
1712  np->rx_skbuff[i] = NULL;
1713  }
1714  np->rx_ring[i].status = 0;
1715  np->rx_ring[i].fraginfo = 0;
1716  }
1717  for (i = 0; i < TX_RING_SIZE; i++) {
1718  skb = np->tx_skbuff[i];
1719  if (skb) {
1720  pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
1721  skb->len, PCI_DMA_TODEVICE);
1722  dev_kfree_skb (skb);
1723  np->tx_skbuff[i] = NULL;
1724  }
1725  }
1726 
1727  return 0;
1728 }
1729 
1730 static void __devexit
1731 rio_remove1 (struct pci_dev *pdev)
1732 {
1733  struct net_device *dev = pci_get_drvdata (pdev);
1734 
1735  if (dev) {
1736  struct netdev_private *np = netdev_priv(dev);
1737 
1738  unregister_netdev (dev);
1740  np->rx_ring_dma);
1742  np->tx_ring_dma);
1743 #ifdef MEM_MAPPING
1744  pci_iounmap(pdev, np->ioaddr);
1745 #endif
1746  pci_iounmap(pdev, np->eeprom_addr);
1747  free_netdev (dev);
1748  pci_release_regions (pdev);
1749  pci_disable_device (pdev);
1750  }
1751  pci_set_drvdata (pdev, NULL);
1752 }
1753 
1754 static struct pci_driver rio_driver = {
1755  .name = "dl2k",
1756  .id_table = rio_pci_tbl,
1757  .probe = rio_probe1,
1758  .remove = __devexit_p(rio_remove1),
1759 };
1760 
1761 static int __init
1762 rio_init (void)
1763 {
1764  return pci_register_driver(&rio_driver);
1765 }
1766 
1767 static void __exit
1768 rio_exit (void)
1769 {
1770  pci_unregister_driver (&rio_driver);
1771 }
1772 
1773 module_init (rio_init);
1774 module_exit (rio_exit);
1775 
1776 /*
1777 
1778 Compile command:
1779 
1780 gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
1781 
1782 Read Documentation/networking/dl2k.txt for details.
1783 
1784 */
1785