Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
starfire.c
Go to the documentation of this file.
1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
2 /*
3  Written 1998-2000 by Donald Becker.
4 
5  Current maintainer is Ion Badulescu <ionut ta badula tod org>. Please
6  send all bug reports to me, and not to Donald Becker, as this code
7  has been heavily modified from Donald's original version.
8 
9  This software may be used and distributed according to the terms of
10  the GNU General Public License (GPL), incorporated herein by reference.
11  Drivers based on or derived from this code fall under the GPL and must
12  retain the authorship, copyright and license notice. This file is not
13  a complete program and may only be used when the entire operating
14  system is licensed under the GPL.
15 
16  The information below comes from Donald Becker's original driver:
17 
18  The author may be reached as [email protected], or C/O
19  Scyld Computing Corporation
20  410 Severn Ave., Suite 210
21  Annapolis MD 21403
22 
23  Support and updates available at
24  http://www.scyld.com/network/starfire.html
25  [link no longer provides useful info -jgarzik]
26 
27 */
28 
29 #define DRV_NAME "starfire"
30 #define DRV_VERSION "2.1"
31 #define DRV_RELDATE "July 6, 2008"
32 
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/crc32.h>
42 #include <linux/ethtool.h>
43 #include <linux/mii.h>
44 #include <linux/if_vlan.h>
45 #include <linux/mm.h>
46 #include <linux/firmware.h>
47 #include <asm/processor.h> /* Processor type for cache alignment. */
48 #include <asm/uaccess.h>
49 #include <asm/io.h>
50 
51 /*
52  * The current frame processor firmware fails to checksum a fragment
53  * of length 1. If and when this is fixed, the #define below can be removed.
54  */
55 #define HAS_BROKEN_FIRMWARE
56 
57 /*
58  * If using the broken firmware, data must be padded to the next 32-bit boundary.
59  */
60 #ifdef HAS_BROKEN_FIRMWARE
61 #define PADDING_MASK 3
62 #endif
63 
64 /*
65  * Define this if using the driver with the zero-copy patch
66  */
67 #define ZEROCOPY
68 
69 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
70 #define VLAN_SUPPORT
71 #endif
72 
73 /* The user-configurable values.
74  These may be modified when a driver module is loaded.*/
75 
76 /* Used for tuning interrupt latency vs. overhead. */
77 static int intr_latency;
78 static int small_frames;
79 
80 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
81 static int max_interrupt_work = 20;
82 static int mtu;
83 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
84  The Starfire has a 512 element hash table based on the Ethernet CRC. */
85 static const int multicast_filter_limit = 512;
86 /* Whether to do TCP/UDP checksums in hardware */
87 static int enable_hw_cksum = 1;
88 
89 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
90 /*
91  * Set the copy breakpoint for the copy-only-tiny-frames scheme.
92  * Setting to > 1518 effectively disables this feature.
93  *
94  * NOTE:
95  * The ia64 doesn't allow for unaligned loads even of integers being
96  * misaligned on a 2 byte boundary. Thus always force copying of
97  * packets as the starfire doesn't allow for misaligned DMAs ;-(
98  * 23/10/2000 - Jes
99  *
100  * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
101  * at least, having unaligned frames leads to a rather serious performance
102  * penalty. -Ion
103  */
104 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
105 static int rx_copybreak = PKT_BUF_SZ;
106 #else
107 static int rx_copybreak /* = 0 */;
108 #endif
109 
110 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
111 #ifdef __sparc__
112 #define DMA_BURST_SIZE 64
113 #else
114 #define DMA_BURST_SIZE 128
115 #endif
116 
117 /* Operational parameters that are set at compile time. */
118 
119 /* The "native" ring sizes are either 256 or 2048.
120  However in some modes a descriptor may be marked to wrap the ring earlier.
121 */
122 #define RX_RING_SIZE 256
123 #define TX_RING_SIZE 32
124 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
125 #define DONE_Q_SIZE 1024
126 /* All queues must be aligned on a 256-byte boundary */
127 #define QUEUE_ALIGN 256
128 
129 #if RX_RING_SIZE > 256
130 #define RX_Q_ENTRIES Rx2048QEntries
131 #else
132 #define RX_Q_ENTRIES Rx256QEntries
133 #endif
134 
135 /* Operational parameters that usually are not changed. */
136 /* Time in jiffies before concluding the transmitter is hung. */
137 #define TX_TIMEOUT (2 * HZ)
138 
139 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
140 /* 64-bit dma_addr_t */
141 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
142 #define netdrv_addr_t __le64
143 #define cpu_to_dma(x) cpu_to_le64(x)
144 #define dma_to_cpu(x) le64_to_cpu(x)
145 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
146 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
147 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
148 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
149 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
150 #else /* 32-bit dma_addr_t */
151 #define netdrv_addr_t __le32
152 #define cpu_to_dma(x) cpu_to_le32(x)
153 #define dma_to_cpu(x) le32_to_cpu(x)
154 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
155 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
156 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
157 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
158 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
159 #endif
160 
161 #define skb_first_frag_len(skb) skb_headlen(skb)
162 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
163 
164 /* Firmware names */
165 #define FIRMWARE_RX "adaptec/starfire_rx.bin"
166 #define FIRMWARE_TX "adaptec/starfire_tx.bin"
167 
168 /* These identify the driver base version and may not be removed. */
169 static const char version[] __devinitconst =
170 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <[email protected]>\n"
171 " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
172 
173 MODULE_AUTHOR("Donald Becker <[email protected]>");
174 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
175 MODULE_LICENSE("GPL");
179 
180 module_param(max_interrupt_work, int, 0);
181 module_param(mtu, int, 0);
182 module_param(debug, int, 0);
183 module_param(rx_copybreak, int, 0);
184 module_param(intr_latency, int, 0);
185 module_param(small_frames, int, 0);
186 module_param(enable_hw_cksum, int, 0);
187 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
188 MODULE_PARM_DESC(mtu, "MTU (all boards)");
189 MODULE_PARM_DESC(debug, "Debug level (0-6)");
190 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
191 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
192 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
193 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
194 
195 /*
196  Theory of Operation
197 
198 I. Board Compatibility
199 
200 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
201 
202 II. Board-specific settings
203 
204 III. Driver operation
205 
206 IIIa. Ring buffers
207 
208 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
209 ring sizes are set fixed by the hardware, but may optionally be wrapped
210 earlier by the END bit in the descriptor.
211 This driver uses that hardware queue size for the Rx ring, where a large
212 number of entries has no ill effect beyond increases the potential backlog.
213 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
214 disables the queue layer priority ordering and we have no mechanism to
215 utilize the hardware two-level priority queue. When modifying the
216 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
217 levels.
218 
219 IIIb/c. Transmit/Receive Structure
220 
221 See the Adaptec manual for the many possible structures, and options for
222 each structure. There are far too many to document all of them here.
223 
224 For transmit this driver uses type 0/1 transmit descriptors (depending
225 on the 32/64 bitness of the architecture), and relies on automatic
226 minimum-length padding. It does not use the completion queue
227 consumer index, but instead checks for non-zero status entries.
228 
229 For receive this driver uses type 2/3 receive descriptors. The driver
230 allocates full frame size skbuffs for the Rx ring buffers, so all frames
231 should fit in a single descriptor. The driver does not use the completion
232 queue consumer index, but instead checks for non-zero status entries.
233 
234 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
235 is allocated and the frame is copied to the new skbuff. When the incoming
236 frame is larger, the skbuff is passed directly up the protocol stack.
237 Buffers consumed this way are replaced by newly allocated skbuffs in a later
238 phase of receive.
239 
240 A notable aspect of operation is that unaligned buffers are not permitted by
241 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
242 isn't longword aligned, which may cause problems on some machine
243 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
244 the frame into a new skbuff unconditionally. Copied frames are put into the
245 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
246 
247 IIId. Synchronization
248 
249 The driver runs as two independent, single-threaded flows of control. One
250 is the send-packet routine, which enforces single-threaded use by the
251 dev->tbusy flag. The other thread is the interrupt handler, which is single
252 threaded by the hardware and interrupt handling software.
253 
254 The send packet thread has partial control over the Tx ring and the netif_queue
255 status. If the number of free Tx slots in the ring falls below a certain number
256 (currently hardcoded to 4), it signals the upper layer to stop the queue.
257 
258 The interrupt handler has exclusive control over the Rx ring and records stats
259 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
260 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
261 number of free Tx slow is above the threshold, it signals the upper layer to
262 restart the queue.
263 
264 IV. Notes
265 
266 IVb. References
267 
268 The Adaptec Starfire manuals, available only from Adaptec.
269 http://www.scyld.com/expert/100mbps.html
270 http://www.scyld.com/expert/NWay.html
271 
272 IVc. Errata
273 
274 - StopOnPerr is broken, don't enable
275 - Hardware ethernet padding exposes random data, perform software padding
276  instead (unverified -- works correctly for all the hardware I have)
277 
278 */
279 
280 
281 
283 
284 enum chipset {
285  CH_6915 = 0,
286 };
287 
288 static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
289  { PCI_VDEVICE(ADAPTEC, 0x6915), CH_6915 },
290  { 0, }
291 };
292 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
293 
294 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
295 static const struct chip_info {
296  const char *name;
297  int drv_flags;
298 } netdrv_tbl[] __devinitconst = {
299  { "Adaptec Starfire 6915", CanHaveMII },
300 };
301 
302 
303 /* Offsets to the device registers.
304  Unlike software-only systems, device drivers interact with complex hardware.
305  It's not useful to define symbolic names for every register bit in the
306  device. The name can only partially document the semantics and make
307  the driver longer and more difficult to read.
308  In general, only the important configuration values or bits changed
309  multiple times should be defined symbolically.
310 */
312  PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
313  IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
314  MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
315  GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
316  TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
317  TxRingHiAddr=0x5009C, /* 64 bit address extension. */
318  TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
319  TxThreshold=0x500B0,
323  RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
324  RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
325  TxMode=0x55000, VlanType=0x55064,
326  PerfFilterTable=0x56000, HashTable=0x56100,
327  TxGfpMem=0x58000, RxGfpMem=0x5a000,
328 };
329 
330 /*
331  * Bits in the interrupt status/mask registers.
332  * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
333  * enables all the interrupt sources that are or'ed into those status bits.
334  */
336  IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
337  IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
338  IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
339  IntrTxComplQLow=0x200000, IntrPCI=0x100000,
340  IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
341  IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
343  IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
344  IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
345  IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
349  IntrTxGfp=0x02, IntrPCIPad=0x01,
350  /* not quite bits */
353  IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
354 };
355 
356 /* Bits in the RxFilterMode register. */
360  PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
361  WakeupOnGFP=0x0800,
362 };
363 
364 /* Bits in the TxMode register */
366  MiiSoftReset=0x8000, MIILoopback=0x4000,
367  TxFlowEnable=0x0800, RxFlowEnable=0x0400,
368  PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
369 };
370 
371 /* Bits in the TxDescCtrl register. */
381 };
382 
383 /* Bits in the RxDescQCtrl register. */
386  RxPrefetchMode=0x8000, RxVariableQ=0x2000,
394 };
395 
396 /* Bits in the RxDMACtrl register. */
398  RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
399  RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
400  RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
405  RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
406  RxDMAQ2NonIP=0x400000,
407  RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
410 };
411 
412 /* Bits in the RxCompletionAddr register */
419 };
420 
421 /* Bits in the TxCompletionAddr register */
428 };
429 
430 /* Bits in the GenCtrl register */
432  RxEnable=0x05, TxEnable=0x0a,
434 };
435 
436 /* Bits in the IntrTimerCtrl register */
441 };
442 
443 /* The Rx and Tx buffer descriptors. */
446 };
449 };
450 
451 /* Completion queue entry. */
453  __le32 status; /* Low 16 bits is length. */
454 };
456  __le32 status; /* Low 16 bits is length. */
459 };
461  __le32 status; /* Low 16 bits is length. */
462  __le16 csum; /* Partial checksum */
464 };
466  __le32 status; /* Low 16 bits is length. */
470  __le16 csum; /* partial checksum */
472 };
473 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
474 #ifdef VLAN_SUPPORT
475 typedef struct full_rx_done_desc rx_done_desc;
476 #define RxComplType RxComplType3
477 #else /* not VLAN_SUPPORT */
479 #define RxComplType RxComplType2
480 #endif /* not VLAN_SUPPORT */
481 
483  RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
484 };
485 
486 /* Type 1 Tx descriptor. */
488  __le32 status; /* Upper bits are status, lower 16 length. */
490 };
491 
492 /* Type 2 Tx descriptor. */
494  __le32 status; /* Upper bits are status, lower 16 length. */
497 };
498 
499 #ifdef ADDR_64BITS
500 typedef struct starfire_tx_desc_2 starfire_tx_desc;
501 #define TX_DESC_TYPE TxDescType2
502 #else /* not ADDR_64BITS */
504 #define TX_DESC_TYPE TxDescType1
505 #endif /* not ADDR_64BITS */
506 #define TX_DESC_SPACING TxDescSpaceUnlim
507 
509  TxDescID=0xB0000000,
510  TxCRCEn=0x01000000, TxDescIntr=0x08000000,
511  TxRingWrap=0x04000000, TxCalTCP=0x02000000,
512 };
513 struct tx_done_desc {
514  __le32 status; /* timestamp, index. */
515 #if 0
516  __le32 intrstatus; /* interrupt status */
517 #endif
518 };
519 
520 struct rx_ring_info {
521  struct sk_buff *skb;
523 };
524 struct tx_ring_info {
525  struct sk_buff *skb;
527  unsigned int used_slots;
528 };
529 
530 #define PHY_CNT 2
532  /* Descriptor rings first for alignment. */
537  /* The addresses of rx/tx-in-place skbuffs. */
540  /* Pointers to completion queues (full pages). */
543  unsigned int rx_done;
546  unsigned int tx_done;
548  struct net_device *dev;
549  struct pci_dev *pci_dev;
550 #ifdef VLAN_SUPPORT
551  unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
552 #endif
553  void *queue_mem;
556 
557  /* Frequently used values: keep some adjacent for cache effect. */
559  unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
560  unsigned int cur_tx, dirty_tx, reap_tx;
561  unsigned int rx_buf_sz; /* Based on MTU+slack. */
562  /* These values keep track of the transceiver/media in use. */
563  int speed100; /* Set if speed == 100MBit. */
567  /* MII transceiver section. */
568  struct mii_if_info mii_if; /* MII lib hooks/info */
569  int phy_cnt; /* MII device addresses. */
570  unsigned char phys[PHY_CNT]; /* MII device addresses. */
571  void __iomem *base;
572 };
573 
574 
575 static int mdio_read(struct net_device *dev, int phy_id, int location);
576 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
577 static int netdev_open(struct net_device *dev);
578 static void check_duplex(struct net_device *dev);
579 static void tx_timeout(struct net_device *dev);
580 static void init_ring(struct net_device *dev);
581 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
582 static irqreturn_t intr_handler(int irq, void *dev_instance);
583 static void netdev_error(struct net_device *dev, int intr_status);
584 static int __netdev_rx(struct net_device *dev, int *quota);
585 static int netdev_poll(struct napi_struct *napi, int budget);
586 static void refill_rx_ring(struct net_device *dev);
587 static void netdev_error(struct net_device *dev, int intr_status);
588 static void set_rx_mode(struct net_device *dev);
589 static struct net_device_stats *get_stats(struct net_device *dev);
590 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
591 static int netdev_close(struct net_device *dev);
592 static void netdev_media_change(struct net_device *dev);
593 static const struct ethtool_ops ethtool_ops;
594 
595 
596 #ifdef VLAN_SUPPORT
597 static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
598 {
599  struct netdev_private *np = netdev_priv(dev);
600 
601  spin_lock(&np->lock);
602  if (debug > 1)
603  printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
604  set_bit(vid, np->active_vlans);
605  set_rx_mode(dev);
606  spin_unlock(&np->lock);
607 
608  return 0;
609 }
610 
611 static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
612 {
613  struct netdev_private *np = netdev_priv(dev);
614 
615  spin_lock(&np->lock);
616  if (debug > 1)
617  printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
618  clear_bit(vid, np->active_vlans);
619  set_rx_mode(dev);
620  spin_unlock(&np->lock);
621 
622  return 0;
623 }
624 #endif /* VLAN_SUPPORT */
625 
626 
627 static const struct net_device_ops netdev_ops = {
628  .ndo_open = netdev_open,
629  .ndo_stop = netdev_close,
630  .ndo_start_xmit = start_tx,
631  .ndo_tx_timeout = tx_timeout,
632  .ndo_get_stats = get_stats,
633  .ndo_set_rx_mode = set_rx_mode,
634  .ndo_do_ioctl = netdev_ioctl,
635  .ndo_change_mtu = eth_change_mtu,
636  .ndo_set_mac_address = eth_mac_addr,
637  .ndo_validate_addr = eth_validate_addr,
638 #ifdef VLAN_SUPPORT
639  .ndo_vlan_rx_add_vid = netdev_vlan_rx_add_vid,
640  .ndo_vlan_rx_kill_vid = netdev_vlan_rx_kill_vid,
641 #endif
642 };
643 
644 static int __devinit starfire_init_one(struct pci_dev *pdev,
645  const struct pci_device_id *ent)
646 {
647  struct device *d = &pdev->dev;
648  struct netdev_private *np;
649  int i, irq, chip_idx = ent->driver_data;
650  struct net_device *dev;
651  long ioaddr;
652  void __iomem *base;
653  int drv_flags, io_size;
654  int boguscnt;
655 
656 /* when built into the kernel, we only print version if device is found */
657 #ifndef MODULE
658  static int printed_version;
659  if (!printed_version++)
660  printk(version);
661 #endif
662 
663  if (pci_enable_device (pdev))
664  return -EIO;
665 
666  ioaddr = pci_resource_start(pdev, 0);
667  io_size = pci_resource_len(pdev, 0);
668  if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
669  dev_err(d, "no PCI MEM resources, aborting\n");
670  return -ENODEV;
671  }
672 
673  dev = alloc_etherdev(sizeof(*np));
674  if (!dev)
675  return -ENOMEM;
676 
677  SET_NETDEV_DEV(dev, &pdev->dev);
678 
679  irq = pdev->irq;
680 
681  if (pci_request_regions (pdev, DRV_NAME)) {
682  dev_err(d, "cannot reserve PCI resources, aborting\n");
683  goto err_out_free_netdev;
684  }
685 
686  base = ioremap(ioaddr, io_size);
687  if (!base) {
688  dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
689  io_size, ioaddr);
690  goto err_out_free_res;
691  }
692 
693  pci_set_master(pdev);
694 
695  /* enable MWI -- it vastly improves Rx performance on sparc64 */
696  pci_try_set_mwi(pdev);
697 
698 #ifdef ZEROCOPY
699  /* Starfire can do TCP/UDP checksumming */
700  if (enable_hw_cksum)
702 #endif /* ZEROCOPY */
703 
704 #ifdef VLAN_SUPPORT
706 #endif /* VLAN_RX_KILL_VID */
707 #ifdef ADDR_64BITS
708  dev->features |= NETIF_F_HIGHDMA;
709 #endif /* ADDR_64BITS */
710 
711  /* Serial EEPROM reads are hidden by the hardware. */
712  for (i = 0; i < 6; i++)
713  dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
714 
715 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
716  if (debug > 4)
717  for (i = 0; i < 0x20; i++)
718  printk("%2.2x%s",
719  (unsigned int)readb(base + EEPROMCtrl + i),
720  i % 16 != 15 ? " " : "\n");
721 #endif
722 
723  /* Issue soft reset */
724  writel(MiiSoftReset, base + TxMode);
725  udelay(1000);
726  writel(0, base + TxMode);
727 
728  /* Reset the chip to erase previous misconfiguration. */
729  writel(1, base + PCIDeviceConfig);
730  boguscnt = 1000;
731  while (--boguscnt > 0) {
732  udelay(10);
733  if ((readl(base + PCIDeviceConfig) & 1) == 0)
734  break;
735  }
736  if (boguscnt == 0)
737  printk("%s: chipset reset never completed!\n", dev->name);
738  /* wait a little longer */
739  udelay(1000);
740 
741  np = netdev_priv(dev);
742  np->dev = dev;
743  np->base = base;
744  spin_lock_init(&np->lock);
745  pci_set_drvdata(pdev, dev);
746 
747  np->pci_dev = pdev;
748 
749  np->mii_if.dev = dev;
750  np->mii_if.mdio_read = mdio_read;
751  np->mii_if.mdio_write = mdio_write;
752  np->mii_if.phy_id_mask = 0x1f;
753  np->mii_if.reg_num_mask = 0x1f;
754 
755  drv_flags = netdrv_tbl[chip_idx].drv_flags;
756 
757  np->speed100 = 1;
758 
759  /* timer resolution is 128 * 0.8us */
760  np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
762 
763  if (small_frames > 0) {
765  switch (small_frames) {
766  case 1 ... 64:
768  break;
769  case 65 ... 128:
771  break;
772  case 129 ... 256:
774  break;
775  default:
777  if (small_frames > 512)
778  printk("Adjusting small_frames down to 512\n");
779  break;
780  }
781  }
782 
783  dev->netdev_ops = &netdev_ops;
784  dev->watchdog_timeo = TX_TIMEOUT;
786 
787  netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
788 
789  if (mtu)
790  dev->mtu = mtu;
791 
792  if (register_netdev(dev))
793  goto err_out_cleardev;
794 
795  printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
796  dev->name, netdrv_tbl[chip_idx].name, base,
797  dev->dev_addr, irq);
798 
799  if (drv_flags & CanHaveMII) {
800  int phy, phy_idx = 0;
801  int mii_status;
802  for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
803  mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
804  mdelay(100);
805  boguscnt = 1000;
806  while (--boguscnt > 0)
807  if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
808  break;
809  if (boguscnt == 0) {
810  printk("%s: PHY#%d reset never completed!\n", dev->name, phy);
811  continue;
812  }
813  mii_status = mdio_read(dev, phy, MII_BMSR);
814  if (mii_status != 0) {
815  np->phys[phy_idx++] = phy;
816  np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
817  printk(KERN_INFO "%s: MII PHY found at address %d, status "
818  "%#4.4x advertising %#4.4x.\n",
819  dev->name, phy, mii_status, np->mii_if.advertising);
820  /* there can be only one PHY on-board */
821  break;
822  }
823  }
824  np->phy_cnt = phy_idx;
825  if (np->phy_cnt > 0)
826  np->mii_if.phy_id = np->phys[0];
827  else
828  memset(&np->mii_if, 0, sizeof(np->mii_if));
829  }
830 
831  printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
832  dev->name, enable_hw_cksum ? "enabled" : "disabled");
833  return 0;
834 
835 err_out_cleardev:
836  pci_set_drvdata(pdev, NULL);
837  iounmap(base);
838 err_out_free_res:
839  pci_release_regions (pdev);
840 err_out_free_netdev:
841  free_netdev(dev);
842  return -ENODEV;
843 }
844 
845 
846 /* Read the MII Management Data I/O (MDIO) interfaces. */
847 static int mdio_read(struct net_device *dev, int phy_id, int location)
848 {
849  struct netdev_private *np = netdev_priv(dev);
850  void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
851  int result, boguscnt=1000;
852  /* ??? Should we add a busy-wait here? */
853  do {
854  result = readl(mdio_addr);
855  } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
856  if (boguscnt == 0)
857  return 0;
858  if ((result & 0xffff) == 0xffff)
859  return 0;
860  return result & 0xffff;
861 }
862 
863 
864 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
865 {
866  struct netdev_private *np = netdev_priv(dev);
867  void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
868  writel(value, mdio_addr);
869  /* The busy-wait will occur before a read. */
870 }
871 
872 
873 static int netdev_open(struct net_device *dev)
874 {
875  const struct firmware *fw_rx, *fw_tx;
876  const __be32 *fw_rx_data, *fw_tx_data;
877  struct netdev_private *np = netdev_priv(dev);
878  void __iomem *ioaddr = np->base;
879  const int irq = np->pci_dev->irq;
880  int i, retval;
881  size_t tx_size, rx_size;
882  size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
883 
884  /* Do we ever need to reset the chip??? */
885 
886  retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
887  if (retval)
888  return retval;
889 
890  /* Disable the Rx and Tx, and reset the chip. */
891  writel(0, ioaddr + GenCtrl);
892  writel(1, ioaddr + PCIDeviceConfig);
893  if (debug > 1)
894  printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
895  dev->name, irq);
896 
897  /* Allocate the various queues. */
898  if (!np->queue_mem) {
899  tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
900  rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
901  tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
902  rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
903  np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
905  if (np->queue_mem == NULL) {
906  free_irq(irq, dev);
907  return -ENOMEM;
908  }
909 
910  np->tx_done_q = np->queue_mem;
911  np->tx_done_q_dma = np->queue_mem_dma;
912  np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
913  np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
914  np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
915  np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
916  np->rx_ring = (void *) np->tx_ring + tx_ring_size;
917  np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
918  }
919 
920  /* Start with no carrier, it gets adjusted later */
921  netif_carrier_off(dev);
922  init_ring(dev);
923  /* Set the size of the Rx buffers. */
925  (0 << RxMinDescrThreshShift) |
927  RX_Q_ENTRIES |
929  RxDescSpace4,
930  ioaddr + RxDescQCtrl);
931 
932  /* Set up the Rx DMA controller. */
934  (0 << RxEarlyIntThreshShift) |
935  (6 << RxHighPrioThreshShift) |
936  ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
937  ioaddr + RxDMACtrl);
938 
939  /* Set Tx descriptor */
941  (0 << TxPadLenShift) |
942  ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
945  ioaddr + TxDescCtrl);
946 
947  writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
948  writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
949  writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
950  writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
951  writel(np->tx_ring_dma, ioaddr + TxRingPtr);
952 
953  writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
954  writel(np->rx_done_q_dma |
955  RxComplType |
956  (0 << RxComplThreshShift),
957  ioaddr + RxCompletionAddr);
958 
959  if (debug > 1)
960  printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
961 
962  /* Fill both the Tx SA register and the Rx perfect filter. */
963  for (i = 0; i < 6; i++)
964  writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
965  /* The first entry is special because it bypasses the VLAN filter.
966  Don't use it. */
967  writew(0, ioaddr + PerfFilterTable);
968  writew(0, ioaddr + PerfFilterTable + 4);
969  writew(0, ioaddr + PerfFilterTable + 8);
970  for (i = 1; i < 16; i++) {
971  __be16 *eaddrs = (__be16 *)dev->dev_addr;
972  void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
973  writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
974  writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
975  writew(be16_to_cpu(eaddrs[0]), setup_frm); setup_frm += 8;
976  }
977 
978  /* Initialize other registers. */
979  /* Configure the PCI bus bursts and FIFO thresholds. */
980  np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
981  writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
982  udelay(1000);
983  writel(np->tx_mode, ioaddr + TxMode);
984  np->tx_threshold = 4;
985  writel(np->tx_threshold, ioaddr + TxThreshold);
986 
987  writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
988 
989  napi_enable(&np->napi);
990 
991  netif_start_queue(dev);
992 
993  if (debug > 1)
994  printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
995  set_rx_mode(dev);
996 
997  np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
998  check_duplex(dev);
999 
1000  /* Enable GPIO interrupts on link change */
1001  writel(0x0f00ff00, ioaddr + GPIOCtrl);
1002 
1003  /* Set the interrupt mask */
1007  ioaddr + IntrEnable);
1008  /* Enable PCI interrupts. */
1009  writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1010  ioaddr + PCIDeviceConfig);
1011 
1012 #ifdef VLAN_SUPPORT
1013  /* Set VLAN type to 802.1q */
1014  writel(ETH_P_8021Q, ioaddr + VlanType);
1015 #endif /* VLAN_SUPPORT */
1016 
1017  retval = request_firmware(&fw_rx, FIRMWARE_RX, &np->pci_dev->dev);
1018  if (retval) {
1019  printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1020  FIRMWARE_RX);
1021  goto out_init;
1022  }
1023  if (fw_rx->size % 4) {
1024  printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1025  fw_rx->size, FIRMWARE_RX);
1026  retval = -EINVAL;
1027  goto out_rx;
1028  }
1029  retval = request_firmware(&fw_tx, FIRMWARE_TX, &np->pci_dev->dev);
1030  if (retval) {
1031  printk(KERN_ERR "starfire: Failed to load firmware \"%s\"\n",
1032  FIRMWARE_TX);
1033  goto out_rx;
1034  }
1035  if (fw_tx->size % 4) {
1036  printk(KERN_ERR "starfire: bogus length %zu in \"%s\"\n",
1037  fw_tx->size, FIRMWARE_TX);
1038  retval = -EINVAL;
1039  goto out_tx;
1040  }
1041  fw_rx_data = (const __be32 *)&fw_rx->data[0];
1042  fw_tx_data = (const __be32 *)&fw_tx->data[0];
1043  rx_size = fw_rx->size / 4;
1044  tx_size = fw_tx->size / 4;
1045 
1046  /* Load Rx/Tx firmware into the frame processors */
1047  for (i = 0; i < rx_size; i++)
1048  writel(be32_to_cpup(&fw_rx_data[i]), ioaddr + RxGfpMem + i * 4);
1049  for (i = 0; i < tx_size; i++)
1050  writel(be32_to_cpup(&fw_tx_data[i]), ioaddr + TxGfpMem + i * 4);
1051  if (enable_hw_cksum)
1052  /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1054  else
1055  /* Enable the Rx and Tx units only. */
1056  writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1057 
1058  if (debug > 1)
1059  printk(KERN_DEBUG "%s: Done netdev_open().\n",
1060  dev->name);
1061 
1062 out_tx:
1063  release_firmware(fw_tx);
1064 out_rx:
1065  release_firmware(fw_rx);
1066 out_init:
1067  if (retval)
1068  netdev_close(dev);
1069  return retval;
1070 }
1071 
1072 
1073 static void check_duplex(struct net_device *dev)
1074 {
1075  struct netdev_private *np = netdev_priv(dev);
1076  u16 reg0;
1077  int silly_count = 1000;
1078 
1079  mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1080  mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1081  udelay(500);
1082  while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1083  /* do nothing */;
1084  if (!silly_count) {
1085  printk("%s: MII reset failed!\n", dev->name);
1086  return;
1087  }
1088 
1089  reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1090 
1091  if (!np->mii_if.force_media) {
1092  reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1093  } else {
1094  reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1095  if (np->speed100)
1096  reg0 |= BMCR_SPEED100;
1097  if (np->mii_if.full_duplex)
1098  reg0 |= BMCR_FULLDPLX;
1099  printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1100  dev->name,
1101  np->speed100 ? "100" : "10",
1102  np->mii_if.full_duplex ? "full" : "half");
1103  }
1104  mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1105 }
1106 
1107 
1108 static void tx_timeout(struct net_device *dev)
1109 {
1110  struct netdev_private *np = netdev_priv(dev);
1111  void __iomem *ioaddr = np->base;
1112  int old_debug;
1113 
1114  printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1115  "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1116 
1117  /* Perhaps we should reinitialize the hardware here. */
1118 
1119  /*
1120  * Stop and restart the interface.
1121  * Cheat and increase the debug level temporarily.
1122  */
1123  old_debug = debug;
1124  debug = 2;
1125  netdev_close(dev);
1126  netdev_open(dev);
1127  debug = old_debug;
1128 
1129  /* Trigger an immediate transmit demand. */
1130 
1131  dev->trans_start = jiffies; /* prevent tx timeout */
1132  dev->stats.tx_errors++;
1133  netif_wake_queue(dev);
1134 }
1135 
1136 
1137 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1138 static void init_ring(struct net_device *dev)
1139 {
1140  struct netdev_private *np = netdev_priv(dev);
1141  int i;
1142 
1143  np->cur_rx = np->cur_tx = np->reap_tx = 0;
1144  np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1145 
1146  np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1147 
1148  /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1149  for (i = 0; i < RX_RING_SIZE; i++) {
1150  struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1151  np->rx_info[i].skb = skb;
1152  if (skb == NULL)
1153  break;
1154  np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1155  /* Grrr, we cannot offset to correctly align the IP header. */
1156  np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1157  }
1158  writew(i - 1, np->base + RxDescQIdx);
1159  np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1160 
1161  /* Clear the remainder of the Rx buffer ring. */
1162  for ( ; i < RX_RING_SIZE; i++) {
1163  np->rx_ring[i].rxaddr = 0;
1164  np->rx_info[i].skb = NULL;
1165  np->rx_info[i].mapping = 0;
1166  }
1167  /* Mark the last entry as wrapping the ring. */
1168  np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1169 
1170  /* Clear the completion rings. */
1171  for (i = 0; i < DONE_Q_SIZE; i++) {
1172  np->rx_done_q[i].status = 0;
1173  np->tx_done_q[i].status = 0;
1174  }
1175 
1176  for (i = 0; i < TX_RING_SIZE; i++)
1177  memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1178 }
1179 
1180 
1181 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1182 {
1183  struct netdev_private *np = netdev_priv(dev);
1184  unsigned int entry;
1185  u32 status;
1186  int i;
1187 
1188  /*
1189  * be cautious here, wrapping the queue has weird semantics
1190  * and we may not have enough slots even when it seems we do.
1191  */
1192  if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1193  netif_stop_queue(dev);
1194  return NETDEV_TX_BUSY;
1195  }
1196 
1197 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1198  if (skb->ip_summed == CHECKSUM_PARTIAL) {
1199  if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
1200  return NETDEV_TX_OK;
1201  }
1202 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1203 
1204  entry = np->cur_tx % TX_RING_SIZE;
1205  for (i = 0; i < skb_num_frags(skb); i++) {
1206  int wrap_ring = 0;
1207  status = TxDescID;
1208 
1209  if (i == 0) {
1210  np->tx_info[entry].skb = skb;
1211  status |= TxCRCEn;
1212  if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1213  status |= TxRingWrap;
1214  wrap_ring = 1;
1215  }
1216  if (np->reap_tx) {
1217  status |= TxDescIntr;
1218  np->reap_tx = 0;
1219  }
1220  if (skb->ip_summed == CHECKSUM_PARTIAL) {
1221  status |= TxCalTCP;
1222  dev->stats.tx_compressed++;
1223  }
1224  status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1225 
1226  np->tx_info[entry].mapping =
1227  pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1228  } else {
1229  const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1230  status |= skb_frag_size(this_frag);
1231  np->tx_info[entry].mapping =
1232  pci_map_single(np->pci_dev,
1233  skb_frag_address(this_frag),
1234  skb_frag_size(this_frag),
1236  }
1237 
1238  np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1239  np->tx_ring[entry].status = cpu_to_le32(status);
1240  if (debug > 3)
1241  printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1242  dev->name, np->cur_tx, np->dirty_tx,
1243  entry, status);
1244  if (wrap_ring) {
1245  np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1246  np->cur_tx += np->tx_info[entry].used_slots;
1247  entry = 0;
1248  } else {
1249  np->tx_info[entry].used_slots = 1;
1250  np->cur_tx += np->tx_info[entry].used_slots;
1251  entry++;
1252  }
1253  /* scavenge the tx descriptors twice per TX_RING_SIZE */
1254  if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1255  np->reap_tx = 1;
1256  }
1257 
1258  /* Non-x86: explicitly flush descriptor cache lines here. */
1259  /* Ensure all descriptors are written back before the transmit is
1260  initiated. - Jes */
1261  wmb();
1262 
1263  /* Update the producer index. */
1264  writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1265 
1266  /* 4 is arbitrary, but should be ok */
1267  if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1268  netif_stop_queue(dev);
1269 
1270  return NETDEV_TX_OK;
1271 }
1272 
1273 
1274 /* The interrupt handler does all of the Rx thread work and cleans up
1275  after the Tx thread. */
1276 static irqreturn_t intr_handler(int irq, void *dev_instance)
1277 {
1278  struct net_device *dev = dev_instance;
1279  struct netdev_private *np = netdev_priv(dev);
1280  void __iomem *ioaddr = np->base;
1281  int boguscnt = max_interrupt_work;
1282  int consumer;
1283  int tx_status;
1284  int handled = 0;
1285 
1286  do {
1287  u32 intr_status = readl(ioaddr + IntrClear);
1288 
1289  if (debug > 4)
1290  printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1291  dev->name, intr_status);
1292 
1293  if (intr_status == 0 || intr_status == (u32) -1)
1294  break;
1295 
1296  handled = 1;
1297 
1298  if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1299  u32 enable;
1300 
1301  if (likely(napi_schedule_prep(&np->napi))) {
1302  __napi_schedule(&np->napi);
1303  enable = readl(ioaddr + IntrEnable);
1304  enable &= ~(IntrRxDone | IntrRxEmpty);
1305  writel(enable, ioaddr + IntrEnable);
1306  /* flush PCI posting buffers */
1307  readl(ioaddr + IntrEnable);
1308  } else {
1309  /* Paranoia check */
1310  enable = readl(ioaddr + IntrEnable);
1311  if (enable & (IntrRxDone | IntrRxEmpty)) {
1313  "%s: interrupt while in poll!\n",
1314  dev->name);
1315  enable &= ~(IntrRxDone | IntrRxEmpty);
1316  writel(enable, ioaddr + IntrEnable);
1317  }
1318  }
1319  }
1320 
1321  /* Scavenge the skbuff list based on the Tx-done queue.
1322  There are redundant checks here that may be cleaned up
1323  after the driver has proven to be reliable. */
1324  consumer = readl(ioaddr + TxConsumerIdx);
1325  if (debug > 3)
1326  printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1327  dev->name, consumer);
1328 
1329  while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1330  if (debug > 3)
1331  printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1332  dev->name, np->dirty_tx, np->tx_done, tx_status);
1333  if ((tx_status & 0xe0000000) == 0xa0000000) {
1334  dev->stats.tx_packets++;
1335  } else if ((tx_status & 0xe0000000) == 0x80000000) {
1336  u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1337  struct sk_buff *skb = np->tx_info[entry].skb;
1338  np->tx_info[entry].skb = NULL;
1339  pci_unmap_single(np->pci_dev,
1340  np->tx_info[entry].mapping,
1341  skb_first_frag_len(skb),
1343  np->tx_info[entry].mapping = 0;
1344  np->dirty_tx += np->tx_info[entry].used_slots;
1345  entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1346  {
1347  int i;
1348  for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1349  pci_unmap_single(np->pci_dev,
1350  np->tx_info[entry].mapping,
1351  skb_frag_size(&skb_shinfo(skb)->frags[i]),
1353  np->dirty_tx++;
1354  entry++;
1355  }
1356  }
1357 
1358  dev_kfree_skb_irq(skb);
1359  }
1360  np->tx_done_q[np->tx_done].status = 0;
1361  np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1362  }
1363  writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1364 
1365  if (netif_queue_stopped(dev) &&
1366  (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1367  /* The ring is no longer full, wake the queue. */
1368  netif_wake_queue(dev);
1369  }
1370 
1371  /* Stats overflow */
1372  if (intr_status & IntrStatsMax)
1373  get_stats(dev);
1374 
1375  /* Media change interrupt. */
1376  if (intr_status & IntrLinkChange)
1377  netdev_media_change(dev);
1378 
1379  /* Abnormal error summary/uncommon events handlers. */
1380  if (intr_status & IntrAbnormalSummary)
1381  netdev_error(dev, intr_status);
1382 
1383  if (--boguscnt < 0) {
1384  if (debug > 1)
1385  printk(KERN_WARNING "%s: Too much work at interrupt, "
1386  "status=%#8.8x.\n",
1387  dev->name, intr_status);
1388  break;
1389  }
1390  } while (1);
1391 
1392  if (debug > 4)
1393  printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1394  dev->name, (int) readl(ioaddr + IntrStatus));
1395  return IRQ_RETVAL(handled);
1396 }
1397 
1398 
1399 /*
1400  * This routine is logically part of the interrupt/poll handler, but separated
1401  * for clarity and better register allocation.
1402  */
1403 static int __netdev_rx(struct net_device *dev, int *quota)
1404 {
1405  struct netdev_private *np = netdev_priv(dev);
1406  u32 desc_status;
1407  int retcode = 0;
1408 
1409  /* If EOP is set on the next entry, it's a new packet. Send it up. */
1410  while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1411  struct sk_buff *skb;
1412  u16 pkt_len;
1413  int entry;
1414  rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1415 
1416  if (debug > 4)
1417  printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1418  if (!(desc_status & RxOK)) {
1419  /* There was an error. */
1420  if (debug > 2)
1421  printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1422  dev->stats.rx_errors++;
1423  if (desc_status & RxFIFOErr)
1424  dev->stats.rx_fifo_errors++;
1425  goto next_rx;
1426  }
1427 
1428  if (*quota <= 0) { /* out of rx quota */
1429  retcode = 1;
1430  goto out;
1431  }
1432  (*quota)--;
1433 
1434  pkt_len = desc_status; /* Implicitly Truncate */
1435  entry = (desc_status >> 16) & 0x7ff;
1436 
1437  if (debug > 4)
1438  printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1439  /* Check if the packet is long enough to accept without copying
1440  to a minimally-sized skbuff. */
1441  if (pkt_len < rx_copybreak &&
1442  (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1443  skb_reserve(skb, 2); /* 16 byte align the IP header */
1444  pci_dma_sync_single_for_cpu(np->pci_dev,
1445  np->rx_info[entry].mapping,
1446  pkt_len, PCI_DMA_FROMDEVICE);
1447  skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
1448  pci_dma_sync_single_for_device(np->pci_dev,
1449  np->rx_info[entry].mapping,
1450  pkt_len, PCI_DMA_FROMDEVICE);
1451  skb_put(skb, pkt_len);
1452  } else {
1453  pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1454  skb = np->rx_info[entry].skb;
1455  skb_put(skb, pkt_len);
1456  np->rx_info[entry].skb = NULL;
1457  np->rx_info[entry].mapping = 0;
1458  }
1459 #ifndef final_version /* Remove after testing. */
1460  /* You will want this info for the initial debug. */
1461  if (debug > 5) {
1462  printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1463  skb->data, skb->data + 6,
1464  skb->data[12], skb->data[13]);
1465  }
1466 #endif
1467 
1468  skb->protocol = eth_type_trans(skb, dev);
1469 #ifdef VLAN_SUPPORT
1470  if (debug > 4)
1471  printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1472 #endif
1473  if (le16_to_cpu(desc->status2) & 0x0100) {
1475  dev->stats.rx_compressed++;
1476  }
1477  /*
1478  * This feature doesn't seem to be working, at least
1479  * with the two firmware versions I have. If the GFP sees
1480  * an IP fragment, it either ignores it completely, or reports
1481  * "bad checksum" on it.
1482  *
1483  * Maybe I missed something -- corrections are welcome.
1484  * Until then, the printk stays. :-) -Ion
1485  */
1486  else if (le16_to_cpu(desc->status2) & 0x0040) {
1488  skb->csum = le16_to_cpu(desc->csum);
1489  printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1490  }
1491 #ifdef VLAN_SUPPORT
1492  if (le16_to_cpu(desc->status2) & 0x0200) {
1493  u16 vlid = le16_to_cpu(desc->vlanid);
1494 
1495  if (debug > 4) {
1496  printk(KERN_DEBUG " netdev_rx() vlanid = %d\n",
1497  vlid);
1498  }
1499  __vlan_hwaccel_put_tag(skb, vlid);
1500  }
1501 #endif /* VLAN_SUPPORT */
1502  netif_receive_skb(skb);
1503  dev->stats.rx_packets++;
1504 
1505  next_rx:
1506  np->cur_rx++;
1507  desc->status = 0;
1508  np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1509  }
1510 
1511  if (*quota == 0) { /* out of rx quota */
1512  retcode = 1;
1513  goto out;
1514  }
1516 
1517  out:
1518  refill_rx_ring(dev);
1519  if (debug > 5)
1520  printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1521  retcode, np->rx_done, desc_status);
1522  return retcode;
1523 }
1524 
1525 static int netdev_poll(struct napi_struct *napi, int budget)
1526 {
1527  struct netdev_private *np = container_of(napi, struct netdev_private, napi);
1528  struct net_device *dev = np->dev;
1529  u32 intr_status;
1530  void __iomem *ioaddr = np->base;
1531  int quota = budget;
1532 
1533  do {
1534  writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1535 
1536  if (__netdev_rx(dev, &quota))
1537  goto out;
1538 
1539  intr_status = readl(ioaddr + IntrStatus);
1540  } while (intr_status & (IntrRxDone | IntrRxEmpty));
1541 
1542  napi_complete(napi);
1543  intr_status = readl(ioaddr + IntrEnable);
1544  intr_status |= IntrRxDone | IntrRxEmpty;
1545  writel(intr_status, ioaddr + IntrEnable);
1546 
1547  out:
1548  if (debug > 5)
1549  printk(KERN_DEBUG " exiting netdev_poll(): %d.\n",
1550  budget - quota);
1551 
1552  /* Restart Rx engine if stopped. */
1553  return budget - quota;
1554 }
1555 
1556 static void refill_rx_ring(struct net_device *dev)
1557 {
1558  struct netdev_private *np = netdev_priv(dev);
1559  struct sk_buff *skb;
1560  int entry = -1;
1561 
1562  /* Refill the Rx ring buffers. */
1563  for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1564  entry = np->dirty_rx % RX_RING_SIZE;
1565  if (np->rx_info[entry].skb == NULL) {
1566  skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1567  np->rx_info[entry].skb = skb;
1568  if (skb == NULL)
1569  break; /* Better luck next round. */
1570  np->rx_info[entry].mapping =
1571  pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1572  np->rx_ring[entry].rxaddr =
1573  cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1574  }
1575  if (entry == RX_RING_SIZE - 1)
1576  np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1577  }
1578  if (entry >= 0)
1579  writew(entry, np->base + RxDescQIdx);
1580 }
1581 
1582 
1583 static void netdev_media_change(struct net_device *dev)
1584 {
1585  struct netdev_private *np = netdev_priv(dev);
1586  void __iomem *ioaddr = np->base;
1587  u16 reg0, reg1, reg4, reg5;
1588  u32 new_tx_mode;
1589  u32 new_intr_timer_ctrl;
1590 
1591  /* reset status first */
1592  mdio_read(dev, np->phys[0], MII_BMCR);
1593  mdio_read(dev, np->phys[0], MII_BMSR);
1594 
1595  reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1596  reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1597 
1598  if (reg1 & BMSR_LSTATUS) {
1599  /* link is up */
1600  if (reg0 & BMCR_ANENABLE) {
1601  /* autonegotiation is enabled */
1602  reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1603  reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1604  if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1605  np->speed100 = 1;
1606  np->mii_if.full_duplex = 1;
1607  } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1608  np->speed100 = 1;
1609  np->mii_if.full_duplex = 0;
1610  } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1611  np->speed100 = 0;
1612  np->mii_if.full_duplex = 1;
1613  } else {
1614  np->speed100 = 0;
1615  np->mii_if.full_duplex = 0;
1616  }
1617  } else {
1618  /* autonegotiation is disabled */
1619  if (reg0 & BMCR_SPEED100)
1620  np->speed100 = 1;
1621  else
1622  np->speed100 = 0;
1623  if (reg0 & BMCR_FULLDPLX)
1624  np->mii_if.full_duplex = 1;
1625  else
1626  np->mii_if.full_duplex = 0;
1627  }
1628  netif_carrier_on(dev);
1629  printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1630  dev->name,
1631  np->speed100 ? "100" : "10",
1632  np->mii_if.full_duplex ? "full" : "half");
1633 
1634  new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1635  if (np->mii_if.full_duplex)
1636  new_tx_mode |= FullDuplex;
1637  if (np->tx_mode != new_tx_mode) {
1638  np->tx_mode = new_tx_mode;
1639  writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1640  udelay(1000);
1641  writel(np->tx_mode, ioaddr + TxMode);
1642  }
1643 
1644  new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1645  if (np->speed100)
1646  new_intr_timer_ctrl |= Timer10X;
1647  if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1648  np->intr_timer_ctrl = new_intr_timer_ctrl;
1649  writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1650  }
1651  } else {
1652  netif_carrier_off(dev);
1653  printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1654  }
1655 }
1656 
1657 
1658 static void netdev_error(struct net_device *dev, int intr_status)
1659 {
1660  struct netdev_private *np = netdev_priv(dev);
1661 
1662  /* Came close to underrunning the Tx FIFO, increase threshold. */
1663  if (intr_status & IntrTxDataLow) {
1664  if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1665  writel(++np->tx_threshold, np->base + TxThreshold);
1666  printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1667  dev->name, np->tx_threshold * 16);
1668  } else
1669  printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1670  }
1671  if (intr_status & IntrRxGFPDead) {
1672  dev->stats.rx_fifo_errors++;
1673  dev->stats.rx_errors++;
1674  }
1675  if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1676  dev->stats.tx_fifo_errors++;
1677  dev->stats.tx_errors++;
1678  }
1679  if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1680  printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1681  dev->name, intr_status);
1682 }
1683 
1684 
1685 static struct net_device_stats *get_stats(struct net_device *dev)
1686 {
1687  struct netdev_private *np = netdev_priv(dev);
1688  void __iomem *ioaddr = np->base;
1689 
1690  /* This adapter architecture needs no SMP locks. */
1691  dev->stats.tx_bytes = readl(ioaddr + 0x57010);
1692  dev->stats.rx_bytes = readl(ioaddr + 0x57044);
1693  dev->stats.tx_packets = readl(ioaddr + 0x57000);
1694  dev->stats.tx_aborted_errors =
1695  readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1696  dev->stats.tx_window_errors = readl(ioaddr + 0x57018);
1697  dev->stats.collisions =
1698  readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1699 
1700  /* The chip only need report frame silently dropped. */
1701  dev->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1702  writew(0, ioaddr + RxDMAStatus);
1703  dev->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1704  dev->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1705  dev->stats.rx_length_errors = readl(ioaddr + 0x57058);
1706  dev->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1707 
1708  return &dev->stats;
1709 }
1710 
1711 #ifdef VLAN_SUPPORT
1712 static u32 set_vlan_mode(struct netdev_private *np)
1713 {
1714  u32 ret = VlanMode;
1715  u16 vid;
1716  void __iomem *filter_addr = np->base + HashTable + 8;
1717  int vlan_count = 0;
1718 
1719  for_each_set_bit(vid, np->active_vlans, VLAN_N_VID) {
1720  if (vlan_count == 32)
1721  break;
1722  writew(vid, filter_addr);
1723  filter_addr += 16;
1724  vlan_count++;
1725  }
1726  if (vlan_count == 32) {
1727  ret |= PerfectFilterVlan;
1728  while (vlan_count < 32) {
1729  writew(0, filter_addr);
1730  filter_addr += 16;
1731  vlan_count++;
1732  }
1733  }
1734  return ret;
1735 }
1736 #endif /* VLAN_SUPPORT */
1737 
1738 static void set_rx_mode(struct net_device *dev)
1739 {
1740  struct netdev_private *np = netdev_priv(dev);
1741  void __iomem *ioaddr = np->base;
1742  u32 rx_mode = MinVLANPrio;
1743  struct netdev_hw_addr *ha;
1744  int i;
1745 
1746 #ifdef VLAN_SUPPORT
1747  rx_mode |= set_vlan_mode(np);
1748 #endif /* VLAN_SUPPORT */
1749 
1750  if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1751  rx_mode |= AcceptAll;
1752  } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1753  (dev->flags & IFF_ALLMULTI)) {
1754  /* Too many to match, or accept all multicasts. */
1756  } else if (netdev_mc_count(dev) <= 14) {
1757  /* Use the 16 element perfect filter, skip first two entries. */
1758  void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1759  __be16 *eaddrs;
1760  netdev_for_each_mc_addr(ha, dev) {
1761  eaddrs = (__be16 *) ha->addr;
1762  writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1763  writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1764  writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
1765  }
1766  eaddrs = (__be16 *)dev->dev_addr;
1767  i = netdev_mc_count(dev) + 2;
1768  while (i++ < 16) {
1769  writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1770  writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1771  writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1772  }
1773  rx_mode |= AcceptBroadcast|PerfectFilter;
1774  } else {
1775  /* Must use a multicast hash table. */
1776  void __iomem *filter_addr;
1777  __be16 *eaddrs;
1778  __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1779 
1780  memset(mc_filter, 0, sizeof(mc_filter));
1781  netdev_for_each_mc_addr(ha, dev) {
1782  /* The chip uses the upper 9 CRC bits
1783  as index into the hash table */
1784  int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1785  __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1786 
1787  *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1788  }
1789  /* Clear the perfect filter list, skip first two entries. */
1790  filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1791  eaddrs = (__be16 *)dev->dev_addr;
1792  for (i = 2; i < 16; i++) {
1793  writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
1794  writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1795  writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 8;
1796  }
1797  for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1798  writew(mc_filter[i], filter_addr);
1800  }
1801  writel(rx_mode, ioaddr + RxFilterMode);
1802 }
1803 
1804 static int check_if_running(struct net_device *dev)
1805 {
1806  if (!netif_running(dev))
1807  return -EINVAL;
1808  return 0;
1809 }
1810 
1811 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1812 {
1813  struct netdev_private *np = netdev_priv(dev);
1814  strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1815  strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1816  strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1817 }
1818 
1819 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1820 {
1821  struct netdev_private *np = netdev_priv(dev);
1822  spin_lock_irq(&np->lock);
1823  mii_ethtool_gset(&np->mii_if, ecmd);
1824  spin_unlock_irq(&np->lock);
1825  return 0;
1826 }
1827 
1828 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1829 {
1830  struct netdev_private *np = netdev_priv(dev);
1831  int res;
1832  spin_lock_irq(&np->lock);
1833  res = mii_ethtool_sset(&np->mii_if, ecmd);
1834  spin_unlock_irq(&np->lock);
1835  check_duplex(dev);
1836  return res;
1837 }
1838 
1839 static int nway_reset(struct net_device *dev)
1840 {
1841  struct netdev_private *np = netdev_priv(dev);
1842  return mii_nway_restart(&np->mii_if);
1843 }
1844 
1845 static u32 get_link(struct net_device *dev)
1846 {
1847  struct netdev_private *np = netdev_priv(dev);
1848  return mii_link_ok(&np->mii_if);
1849 }
1850 
1851 static u32 get_msglevel(struct net_device *dev)
1852 {
1853  return debug;
1854 }
1855 
1856 static void set_msglevel(struct net_device *dev, u32 val)
1857 {
1858  debug = val;
1859 }
1860 
1861 static const struct ethtool_ops ethtool_ops = {
1862  .begin = check_if_running,
1863  .get_drvinfo = get_drvinfo,
1864  .get_settings = get_settings,
1865  .set_settings = set_settings,
1866  .nway_reset = nway_reset,
1867  .get_link = get_link,
1868  .get_msglevel = get_msglevel,
1869  .set_msglevel = set_msglevel,
1870 };
1871 
1872 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1873 {
1874  struct netdev_private *np = netdev_priv(dev);
1875  struct mii_ioctl_data *data = if_mii(rq);
1876  int rc;
1877 
1878  if (!netif_running(dev))
1879  return -EINVAL;
1880 
1881  spin_lock_irq(&np->lock);
1882  rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1883  spin_unlock_irq(&np->lock);
1884 
1885  if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1886  check_duplex(dev);
1887 
1888  return rc;
1889 }
1890 
1891 static int netdev_close(struct net_device *dev)
1892 {
1893  struct netdev_private *np = netdev_priv(dev);
1894  void __iomem *ioaddr = np->base;
1895  int i;
1896 
1897  netif_stop_queue(dev);
1898 
1899  napi_disable(&np->napi);
1900 
1901  if (debug > 1) {
1902  printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
1903  dev->name, (int) readl(ioaddr + IntrStatus));
1904  printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1905  dev->name, np->cur_tx, np->dirty_tx,
1906  np->cur_rx, np->dirty_rx);
1907  }
1908 
1909  /* Disable interrupts by clearing the interrupt mask. */
1910  writel(0, ioaddr + IntrEnable);
1911 
1912  /* Stop the chip's Tx and Rx processes. */
1913  writel(0, ioaddr + GenCtrl);
1914  readl(ioaddr + GenCtrl);
1915 
1916  if (debug > 5) {
1917  printk(KERN_DEBUG" Tx ring at %#llx:\n",
1918  (long long) np->tx_ring_dma);
1919  for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1920  printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
1921  i, le32_to_cpu(np->tx_ring[i].status),
1922  (long long) dma_to_cpu(np->tx_ring[i].addr),
1923  le32_to_cpu(np->tx_done_q[i].status));
1924  printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
1925  (long long) np->rx_ring_dma, np->rx_done_q);
1926  if (np->rx_done_q)
1927  for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1928  printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
1929  i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1930  }
1931  }
1932 
1933  free_irq(np->pci_dev->irq, dev);
1934 
1935  /* Free all the skbuffs in the Rx queue. */
1936  for (i = 0; i < RX_RING_SIZE; i++) {
1937  np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
1938  if (np->rx_info[i].skb != NULL) {
1939  pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1940  dev_kfree_skb(np->rx_info[i].skb);
1941  }
1942  np->rx_info[i].skb = NULL;
1943  np->rx_info[i].mapping = 0;
1944  }
1945  for (i = 0; i < TX_RING_SIZE; i++) {
1946  struct sk_buff *skb = np->tx_info[i].skb;
1947  if (skb == NULL)
1948  continue;
1949  pci_unmap_single(np->pci_dev,
1950  np->tx_info[i].mapping,
1952  np->tx_info[i].mapping = 0;
1953  dev_kfree_skb(skb);
1954  np->tx_info[i].skb = NULL;
1955  }
1956 
1957  return 0;
1958 }
1959 
1960 #ifdef CONFIG_PM
1961 static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
1962 {
1963  struct net_device *dev = pci_get_drvdata(pdev);
1964 
1965  if (netif_running(dev)) {
1966  netif_device_detach(dev);
1967  netdev_close(dev);
1968  }
1969 
1970  pci_save_state(pdev);
1971  pci_set_power_state(pdev, pci_choose_state(pdev,state));
1972 
1973  return 0;
1974 }
1975 
1976 static int starfire_resume(struct pci_dev *pdev)
1977 {
1978  struct net_device *dev = pci_get_drvdata(pdev);
1979 
1980  pci_set_power_state(pdev, PCI_D0);
1981  pci_restore_state(pdev);
1982 
1983  if (netif_running(dev)) {
1984  netdev_open(dev);
1985  netif_device_attach(dev);
1986  }
1987 
1988  return 0;
1989 }
1990 #endif /* CONFIG_PM */
1991 
1992 
1993 static void __devexit starfire_remove_one (struct pci_dev *pdev)
1994 {
1995  struct net_device *dev = pci_get_drvdata(pdev);
1996  struct netdev_private *np = netdev_priv(dev);
1997 
1998  BUG_ON(!dev);
1999 
2000  unregister_netdev(dev);
2001 
2002  if (np->queue_mem)
2004 
2005 
2006  /* XXX: add wakeup code -- requires firmware for MagicPacket */
2007  pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2008  pci_disable_device(pdev);
2009 
2010  iounmap(np->base);
2011  pci_release_regions(pdev);
2012 
2013  pci_set_drvdata(pdev, NULL);
2014  free_netdev(dev); /* Will also free np!! */
2015 }
2016 
2017 
2018 static struct pci_driver starfire_driver = {
2019  .name = DRV_NAME,
2020  .probe = starfire_init_one,
2021  .remove = __devexit_p(starfire_remove_one),
2022 #ifdef CONFIG_PM
2023  .suspend = starfire_suspend,
2024  .resume = starfire_resume,
2025 #endif /* CONFIG_PM */
2026  .id_table = starfire_pci_tbl,
2027 };
2028 
2029 
2030 static int __init starfire_init (void)
2031 {
2032 /* when a module, this is printed whether or not devices are found in probe */
2033 #ifdef MODULE
2034  printk(version);
2035 
2036  printk(KERN_INFO DRV_NAME ": polling (NAPI) enabled\n");
2037 #endif
2038 
2039  BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(netdrv_addr_t));
2040 
2041  return pci_register_driver(&starfire_driver);
2042 }
2043 
2044 
2045 static void __exit starfire_cleanup (void)
2046 {
2047  pci_unregister_driver (&starfire_driver);
2048 }
2049 
2050 
2051 module_init(starfire_init);
2052 module_exit(starfire_cleanup);
2053 
2054 
2055 /*
2056  * Local variables:
2057  * c-basic-offset: 8
2058  * tab-width: 8
2059  * End:
2060  */