Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
smc91x.c
Go to the documentation of this file.
1 /*
2  * smc91x.c
3  * This is a driver for SMSC's 91C9x/91C1xx single-chip Ethernet devices.
4  *
5  * Copyright (C) 1996 by Erik Stahlman
6  * Copyright (C) 2001 Standard Microsystems Corporation
7  * Developed by Simple Network Magic Corporation
8  * Copyright (C) 2003 Monta Vista Software, Inc.
9  * Unified SMC91x driver by Nicolas Pitre
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24  *
25  * Arguments:
26  * io = for the base address
27  * irq = for the IRQ
28  * nowait = 0 for normal wait states, 1 eliminates additional wait states
29  *
30  * original author:
31  * Erik Stahlman <[email protected]>
32  *
33  * hardware multicast code:
34  * Peter Cammaert <[email protected]>
35  *
36  * contributors:
37  * Daris A Nevil <[email protected]>
38  * Nicolas Pitre <[email protected]>
39  * Russell King <[email protected]>
40  *
41  * History:
42  * 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet
43  * 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ"
44  * 03/16/01 Daris A Nevil modified smc9194.c for use with LAN91C111
45  * 08/22/01 Scott Anderson merge changes from smc9194 to smc91111
46  * 08/21/01 Pramod B Bhardwaj added support for RevB of LAN91C111
47  * 12/20/01 Jeff Sutherland initial port to Xscale PXA with DMA support
48  * 04/07/03 Nicolas Pitre unified SMC91x driver, killed irq races,
49  * more bus abstraction, big cleanup, etc.
50  * 29/09/03 Russell King - add driver model support
51  * - ethtool support
52  * - convert to use generic MII interface
53  * - add link up/down notification
54  * - don't try to handle full negotiation in
55  * smc_phy_configure
56  * - clean up (and fix stack overrun) in PHY
57  * MII read/write functions
58  * 22/09/04 Nicolas Pitre big update (see commit log for details)
59  */
60 static const char version[] =
61  "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <[email protected]>\n";
62 
63 /* Debugging level */
64 #ifndef SMC_DEBUG
65 #define SMC_DEBUG 0
66 #endif
67 
68 
69 #include <linux/init.h>
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/sched.h>
73 #include <linux/delay.h>
74 #include <linux/interrupt.h>
75 #include <linux/irq.h>
76 #include <linux/errno.h>
77 #include <linux/ioport.h>
78 #include <linux/crc32.h>
79 #include <linux/platform_device.h>
80 #include <linux/spinlock.h>
81 #include <linux/ethtool.h>
82 #include <linux/mii.h>
83 #include <linux/workqueue.h>
84 #include <linux/of.h>
85 
86 #include <linux/netdevice.h>
87 #include <linux/etherdevice.h>
88 #include <linux/skbuff.h>
89 
90 #include <asm/io.h>
91 
92 #include "smc91x.h"
93 
94 #ifndef SMC_NOWAIT
95 # define SMC_NOWAIT 0
96 #endif
97 static int nowait = SMC_NOWAIT;
98 module_param(nowait, int, 0400);
99 MODULE_PARM_DESC(nowait, "set to 1 for no wait state");
100 
101 /*
102  * Transmit timeout, default 5 seconds.
103  */
104 static int watchdog = 1000;
105 module_param(watchdog, int, 0400);
106 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
107 
108 MODULE_LICENSE("GPL");
109 MODULE_ALIAS("platform:smc91x");
110 
111 /*
112  * The internal workings of the driver. If you are changing anything
113  * here with the SMC stuff, you should have the datasheet and know
114  * what you are doing.
115  */
116 #define CARDNAME "smc91x"
117 
118 /*
119  * Use power-down feature of the chip
120  */
121 #define POWER_DOWN 1
122 
123 /*
124  * Wait time for memory to be free. This probably shouldn't be
125  * tuned that much, as waiting for this means nothing else happens
126  * in the system
127  */
128 #define MEMORY_WAIT_TIME 16
129 
130 /*
131  * The maximum number of processing loops allowed for each call to the
132  * IRQ handler.
133  */
134 #define MAX_IRQ_LOOPS 8
135 
136 /*
137  * This selects whether TX packets are sent one by one to the SMC91x internal
138  * memory and throttled until transmission completes. This may prevent
139  * RX overruns a litle by keeping much of the memory free for RX packets
140  * but to the expense of reduced TX throughput and increased IRQ overhead.
141  * Note this is not a cure for a too slow data bus or too high IRQ latency.
142  */
143 #define THROTTLE_TX_PKTS 0
144 
145 /*
146  * The MII clock high/low times. 2x this number gives the MII clock period
147  * in microseconds. (was 50, but this gives 6.4ms for each MII transaction!)
148  */
149 #define MII_DELAY 1
150 
151 #if SMC_DEBUG > 0
152 #define DBG(n, args...) \
153  do { \
154  if (SMC_DEBUG >= (n)) \
155  printk(args); \
156  } while (0)
157 
158 #define PRINTK(args...) printk(args)
159 #else
160 #define DBG(n, args...) do { } while(0)
161 #define PRINTK(args...) printk(KERN_DEBUG args)
162 #endif
163 
164 #if SMC_DEBUG > 3
165 static void PRINT_PKT(u_char *buf, int length)
166 {
167  int i;
168  int remainder;
169  int lines;
170 
171  lines = length / 16;
172  remainder = length % 16;
173 
174  for (i = 0; i < lines ; i ++) {
175  int cur;
176  for (cur = 0; cur < 8; cur++) {
177  u_char a, b;
178  a = *buf++;
179  b = *buf++;
180  printk("%02x%02x ", a, b);
181  }
182  printk("\n");
183  }
184  for (i = 0; i < remainder/2 ; i++) {
185  u_char a, b;
186  a = *buf++;
187  b = *buf++;
188  printk("%02x%02x ", a, b);
189  }
190  printk("\n");
191 }
192 #else
193 #define PRINT_PKT(x...) do { } while(0)
194 #endif
195 
196 
197 /* this enables an interrupt in the interrupt mask register */
198 #define SMC_ENABLE_INT(lp, x) do { \
199  unsigned char mask; \
200  unsigned long smc_enable_flags; \
201  spin_lock_irqsave(&lp->lock, smc_enable_flags); \
202  mask = SMC_GET_INT_MASK(lp); \
203  mask |= (x); \
204  SMC_SET_INT_MASK(lp, mask); \
205  spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \
206 } while (0)
207 
208 /* this disables an interrupt from the interrupt mask register */
209 #define SMC_DISABLE_INT(lp, x) do { \
210  unsigned char mask; \
211  unsigned long smc_disable_flags; \
212  spin_lock_irqsave(&lp->lock, smc_disable_flags); \
213  mask = SMC_GET_INT_MASK(lp); \
214  mask &= ~(x); \
215  SMC_SET_INT_MASK(lp, mask); \
216  spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \
217 } while (0)
218 
219 /*
220  * Wait while MMU is busy. This is usually in the order of a few nanosecs
221  * if at all, but let's avoid deadlocking the system if the hardware
222  * decides to go south.
223  */
224 #define SMC_WAIT_MMU_BUSY(lp) do { \
225  if (unlikely(SMC_GET_MMU_CMD(lp) & MC_BUSY)) { \
226  unsigned long timeout = jiffies + 2; \
227  while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \
228  if (time_after(jiffies, timeout)) { \
229  printk("%s: timeout %s line %d\n", \
230  dev->name, __FILE__, __LINE__); \
231  break; \
232  } \
233  cpu_relax(); \
234  } \
235  } \
236 } while (0)
237 
238 
239 /*
240  * this does a soft reset on the device
241  */
242 static void smc_reset(struct net_device *dev)
243 {
244  struct smc_local *lp = netdev_priv(dev);
245  void __iomem *ioaddr = lp->base;
246  unsigned int ctl, cfg;
247  struct sk_buff *pending_skb;
248 
249  DBG(2, "%s: %s\n", dev->name, __func__);
250 
251  /* Disable all interrupts, block TX tasklet */
252  spin_lock_irq(&lp->lock);
253  SMC_SELECT_BANK(lp, 2);
254  SMC_SET_INT_MASK(lp, 0);
255  pending_skb = lp->pending_tx_skb;
256  lp->pending_tx_skb = NULL;
257  spin_unlock_irq(&lp->lock);
258 
259  /* free any pending tx skb */
260  if (pending_skb) {
261  dev_kfree_skb(pending_skb);
262  dev->stats.tx_errors++;
263  dev->stats.tx_aborted_errors++;
264  }
265 
266  /*
267  * This resets the registers mostly to defaults, but doesn't
268  * affect EEPROM. That seems unnecessary
269  */
270  SMC_SELECT_BANK(lp, 0);
272 
273  /*
274  * Setup the Configuration Register
275  * This is necessary because the CONFIG_REG is not affected
276  * by a soft reset
277  */
278  SMC_SELECT_BANK(lp, 1);
279 
280  cfg = CONFIG_DEFAULT;
281 
282  /*
283  * Setup for fast accesses if requested. If the card/system
284  * can't handle it then there will be no recovery except for
285  * a hard reset or power cycle
286  */
287  if (lp->cfg.flags & SMC91X_NOWAIT)
288  cfg |= CONFIG_NO_WAIT;
289 
290  /*
291  * Release from possible power-down state
292  * Configuration register is not affected by Soft Reset
293  */
294  cfg |= CONFIG_EPH_POWER_EN;
295 
296  SMC_SET_CONFIG(lp, cfg);
297 
298  /* this should pause enough for the chip to be happy */
299  /*
300  * elaborate? What does the chip _need_? --jgarzik
301  *
302  * This seems to be undocumented, but something the original
303  * driver(s) have always done. Suspect undocumented timing
304  * info/determined empirically. --rmk
305  */
306  udelay(1);
307 
308  /* Disable transmit and receive functionality */
309  SMC_SELECT_BANK(lp, 0);
310  SMC_SET_RCR(lp, RCR_CLEAR);
311  SMC_SET_TCR(lp, TCR_CLEAR);
312 
313  SMC_SELECT_BANK(lp, 1);
314  ctl = SMC_GET_CTL(lp) | CTL_LE_ENABLE;
315 
316  /*
317  * Set the control register to automatically release successfully
318  * transmitted packets, to make the best use out of our limited
319  * memory
320  */
321  if(!THROTTLE_TX_PKTS)
322  ctl |= CTL_AUTO_RELEASE;
323  else
324  ctl &= ~CTL_AUTO_RELEASE;
325  SMC_SET_CTL(lp, ctl);
326 
327  /* Reset the MMU */
328  SMC_SELECT_BANK(lp, 2);
330  SMC_WAIT_MMU_BUSY(lp);
331 }
332 
333 /*
334  * Enable Interrupts, Receive, and Transmit
335  */
336 static void smc_enable(struct net_device *dev)
337 {
338  struct smc_local *lp = netdev_priv(dev);
339  void __iomem *ioaddr = lp->base;
340  int mask;
341 
342  DBG(2, "%s: %s\n", dev->name, __func__);
343 
344  /* see the header file for options in TCR/RCR DEFAULT */
345  SMC_SELECT_BANK(lp, 0);
346  SMC_SET_TCR(lp, lp->tcr_cur_mode);
347  SMC_SET_RCR(lp, lp->rcr_cur_mode);
348 
349  SMC_SELECT_BANK(lp, 1);
350  SMC_SET_MAC_ADDR(lp, dev->dev_addr);
351 
352  /* now, enable interrupts */
354  if (lp->version >= (CHIP_91100 << 4))
355  mask |= IM_MDINT;
356  SMC_SELECT_BANK(lp, 2);
357  SMC_SET_INT_MASK(lp, mask);
358 
359  /*
360  * From this point the register bank must _NOT_ be switched away
361  * to something else than bank 2 without proper locking against
362  * races with any tasklet or interrupt handlers until smc_shutdown()
363  * or smc_reset() is called.
364  */
365 }
366 
367 /*
368  * this puts the device in an inactive state
369  */
370 static void smc_shutdown(struct net_device *dev)
371 {
372  struct smc_local *lp = netdev_priv(dev);
373  void __iomem *ioaddr = lp->base;
374  struct sk_buff *pending_skb;
375 
376  DBG(2, "%s: %s\n", CARDNAME, __func__);
377 
378  /* no more interrupts for me */
379  spin_lock_irq(&lp->lock);
380  SMC_SELECT_BANK(lp, 2);
381  SMC_SET_INT_MASK(lp, 0);
382  pending_skb = lp->pending_tx_skb;
383  lp->pending_tx_skb = NULL;
384  spin_unlock_irq(&lp->lock);
385  if (pending_skb)
386  dev_kfree_skb(pending_skb);
387 
388  /* and tell the card to stay away from that nasty outside world */
389  SMC_SELECT_BANK(lp, 0);
390  SMC_SET_RCR(lp, RCR_CLEAR);
391  SMC_SET_TCR(lp, TCR_CLEAR);
392 
393 #ifdef POWER_DOWN
394  /* finally, shut the chip down */
395  SMC_SELECT_BANK(lp, 1);
397 #endif
398 }
399 
400 /*
401  * This is the procedure to handle the receipt of a packet.
402  */
403 static inline void smc_rcv(struct net_device *dev)
404 {
405  struct smc_local *lp = netdev_priv(dev);
406  void __iomem *ioaddr = lp->base;
407  unsigned int packet_number, status, packet_len;
408 
409  DBG(3, "%s: %s\n", dev->name, __func__);
410 
411  packet_number = SMC_GET_RXFIFO(lp);
412  if (unlikely(packet_number & RXFIFO_REMPTY)) {
413  PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name);
414  return;
415  }
416 
417  /* read from start of packet */
419 
420  /* First two words are status and packet length */
421  SMC_GET_PKT_HDR(lp, status, packet_len);
422  packet_len &= 0x07ff; /* mask off top bits */
423  DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
424  dev->name, packet_number, status,
425  packet_len, packet_len);
426 
427  back:
428  if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
429  if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) {
430  /* accept VLAN packets */
431  status &= ~RS_TOOLONG;
432  goto back;
433  }
434  if (packet_len < 6) {
435  /* bloody hardware */
436  printk(KERN_ERR "%s: fubar (rxlen %u status %x\n",
437  dev->name, packet_len, status);
438  status |= RS_TOOSHORT;
439  }
440  SMC_WAIT_MMU_BUSY(lp);
442  dev->stats.rx_errors++;
443  if (status & RS_ALGNERR)
444  dev->stats.rx_frame_errors++;
445  if (status & (RS_TOOSHORT | RS_TOOLONG))
446  dev->stats.rx_length_errors++;
447  if (status & RS_BADCRC)
448  dev->stats.rx_crc_errors++;
449  } else {
450  struct sk_buff *skb;
451  unsigned char *data;
452  unsigned int data_len;
453 
454  /* set multicast stats */
455  if (status & RS_MULTICAST)
456  dev->stats.multicast++;
457 
458  /*
459  * Actual payload is packet_len - 6 (or 5 if odd byte).
460  * We want skb_reserve(2) and the final ctrl word
461  * (2 bytes, possibly containing the payload odd byte).
462  * Furthermore, we add 2 bytes to allow rounding up to
463  * multiple of 4 bytes on 32 bit buses.
464  * Hence packet_len - 6 + 2 + 2 + 2.
465  */
466  skb = netdev_alloc_skb(dev, packet_len);
467  if (unlikely(skb == NULL)) {
468  printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
469  dev->name);
470  SMC_WAIT_MMU_BUSY(lp);
472  dev->stats.rx_dropped++;
473  return;
474  }
475 
476  /* Align IP header to 32 bits */
477  skb_reserve(skb, 2);
478 
479  /* BUG: the LAN91C111 rev A never sets this bit. Force it. */
480  if (lp->version == 0x90)
481  status |= RS_ODDFRAME;
482 
483  /*
484  * If odd length: packet_len - 5,
485  * otherwise packet_len - 6.
486  * With the trailing ctrl byte it's packet_len - 4.
487  */
488  data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6);
489  data = skb_put(skb, data_len);
490  SMC_PULL_DATA(lp, data, packet_len - 4);
491 
492  SMC_WAIT_MMU_BUSY(lp);
494 
495  PRINT_PKT(data, packet_len - 4);
496 
497  skb->protocol = eth_type_trans(skb, dev);
498  netif_rx(skb);
499  dev->stats.rx_packets++;
500  dev->stats.rx_bytes += data_len;
501  }
502 }
503 
504 #ifdef CONFIG_SMP
505 /*
506  * On SMP we have the following problem:
507  *
508  * A = smc_hardware_send_pkt()
509  * B = smc_hard_start_xmit()
510  * C = smc_interrupt()
511  *
512  * A and B can never be executed simultaneously. However, at least on UP,
513  * it is possible (and even desirable) for C to interrupt execution of
514  * A or B in order to have better RX reliability and avoid overruns.
515  * C, just like A and B, must have exclusive access to the chip and
516  * each of them must lock against any other concurrent access.
517  * Unfortunately this is not possible to have C suspend execution of A or
518  * B taking place on another CPU. On UP this is no an issue since A and B
519  * are run from softirq context and C from hard IRQ context, and there is
520  * no other CPU where concurrent access can happen.
521  * If ever there is a way to force at least B and C to always be executed
522  * on the same CPU then we could use read/write locks to protect against
523  * any other concurrent access and C would always interrupt B. But life
524  * isn't that easy in a SMP world...
525  */
526 #define smc_special_trylock(lock, flags) \
527 ({ \
528  int __ret; \
529  local_irq_save(flags); \
530  __ret = spin_trylock(lock); \
531  if (!__ret) \
532  local_irq_restore(flags); \
533  __ret; \
534 })
535 #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
536 #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
537 #else
538 #define smc_special_trylock(lock, flags) (flags == flags)
539 #define smc_special_lock(lock, flags) do { flags = 0; } while (0)
540 #define smc_special_unlock(lock, flags) do { flags = 0; } while (0)
541 #endif
542 
543 /*
544  * This is called to actually send a packet to the chip.
545  */
546 static void smc_hardware_send_pkt(unsigned long data)
547 {
548  struct net_device *dev = (struct net_device *)data;
549  struct smc_local *lp = netdev_priv(dev);
550  void __iomem *ioaddr = lp->base;
551  struct sk_buff *skb;
552  unsigned int packet_no, len;
553  unsigned char *buf;
554  unsigned long flags;
555 
556  DBG(3, "%s: %s\n", dev->name, __func__);
557 
558  if (!smc_special_trylock(&lp->lock, flags)) {
559  netif_stop_queue(dev);
560  tasklet_schedule(&lp->tx_task);
561  return;
562  }
563 
564  skb = lp->pending_tx_skb;
565  if (unlikely(!skb)) {
566  smc_special_unlock(&lp->lock, flags);
567  return;
568  }
569  lp->pending_tx_skb = NULL;
570 
571  packet_no = SMC_GET_AR(lp);
572  if (unlikely(packet_no & AR_FAILED)) {
573  printk("%s: Memory allocation failed.\n", dev->name);
574  dev->stats.tx_errors++;
575  dev->stats.tx_fifo_errors++;
576  smc_special_unlock(&lp->lock, flags);
577  goto done;
578  }
579 
580  /* point to the beginning of the packet */
581  SMC_SET_PN(lp, packet_no);
583 
584  buf = skb->data;
585  len = skb->len;
586  DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n",
587  dev->name, packet_no, len, len, buf);
588  PRINT_PKT(buf, len);
589 
590  /*
591  * Send the packet length (+6 for status words, length, and ctl.
592  * The card will pad to 64 bytes with zeroes if packet is too small.
593  */
594  SMC_PUT_PKT_HDR(lp, 0, len + 6);
595 
596  /* send the actual data */
597  SMC_PUSH_DATA(lp, buf, len & ~1);
598 
599  /* Send final ctl word with the last byte if there is one */
600  SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG(lp));
601 
602  /*
603  * If THROTTLE_TX_PKTS is set, we stop the queue here. This will
604  * have the effect of having at most one packet queued for TX
605  * in the chip's memory at all time.
606  *
607  * If THROTTLE_TX_PKTS is not set then the queue is stopped only
608  * when memory allocation (MC_ALLOC) does not succeed right away.
609  */
610  if (THROTTLE_TX_PKTS)
611  netif_stop_queue(dev);
612 
613  /* queue the packet for TX */
615  smc_special_unlock(&lp->lock, flags);
616 
617  dev->trans_start = jiffies;
618  dev->stats.tx_packets++;
619  dev->stats.tx_bytes += len;
620 
622 
623 done: if (!THROTTLE_TX_PKTS)
624  netif_wake_queue(dev);
625 
626  dev_kfree_skb(skb);
627 }
628 
629 /*
630  * Since I am not sure if I will have enough room in the chip's ram
631  * to store the packet, I call this routine which either sends it
632  * now, or set the card to generates an interrupt when ready
633  * for the packet.
634  */
635 static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
636 {
637  struct smc_local *lp = netdev_priv(dev);
638  void __iomem *ioaddr = lp->base;
639  unsigned int numPages, poll_count, status;
640  unsigned long flags;
641 
642  DBG(3, "%s: %s\n", dev->name, __func__);
643 
644  BUG_ON(lp->pending_tx_skb != NULL);
645 
646  /*
647  * The MMU wants the number of pages to be the number of 256 bytes
648  * 'pages', minus 1 (since a packet can't ever have 0 pages :))
649  *
650  * The 91C111 ignores the size bits, but earlier models don't.
651  *
652  * Pkt size for allocating is data length +6 (for additional status
653  * words, length and ctl)
654  *
655  * If odd size then last byte is included in ctl word.
656  */
657  numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
658  if (unlikely(numPages > 7)) {
659  printk("%s: Far too big packet error.\n", dev->name);
660  dev->stats.tx_errors++;
661  dev->stats.tx_dropped++;
662  dev_kfree_skb(skb);
663  return NETDEV_TX_OK;
664  }
665 
666  smc_special_lock(&lp->lock, flags);
667 
668  /* now, try to allocate the memory */
669  SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
670 
671  /*
672  * Poll the chip for a short amount of time in case the
673  * allocation succeeds quickly.
674  */
675  poll_count = MEMORY_WAIT_TIME;
676  do {
677  status = SMC_GET_INT(lp);
678  if (status & IM_ALLOC_INT) {
679  SMC_ACK_INT(lp, IM_ALLOC_INT);
680  break;
681  }
682  } while (--poll_count);
683 
684  smc_special_unlock(&lp->lock, flags);
685 
686  lp->pending_tx_skb = skb;
687  if (!poll_count) {
688  /* oh well, wait until the chip finds memory later */
689  netif_stop_queue(dev);
690  DBG(2, "%s: TX memory allocation deferred.\n", dev->name);
692  } else {
693  /*
694  * Allocation succeeded: push packet to the chip's own memory
695  * immediately.
696  */
697  smc_hardware_send_pkt((unsigned long)dev);
698  }
699 
700  return NETDEV_TX_OK;
701 }
702 
703 /*
704  * This handles a TX interrupt, which is only called when:
705  * - a TX error occurred, or
706  * - CTL_AUTO_RELEASE is not set and TX of a packet completed.
707  */
708 static void smc_tx(struct net_device *dev)
709 {
710  struct smc_local *lp = netdev_priv(dev);
711  void __iomem *ioaddr = lp->base;
712  unsigned int saved_packet, packet_no, tx_status, pkt_len;
713 
714  DBG(3, "%s: %s\n", dev->name, __func__);
715 
716  /* If the TX FIFO is empty then nothing to do */
717  packet_no = SMC_GET_TXFIFO(lp);
718  if (unlikely(packet_no & TXFIFO_TEMPTY)) {
719  PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name);
720  return;
721  }
722 
723  /* select packet to read from */
724  saved_packet = SMC_GET_PN(lp);
725  SMC_SET_PN(lp, packet_no);
726 
727  /* read the first word (status word) from this packet */
729  SMC_GET_PKT_HDR(lp, tx_status, pkt_len);
730  DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
731  dev->name, tx_status, packet_no);
732 
733  if (!(tx_status & ES_TX_SUC))
734  dev->stats.tx_errors++;
735 
736  if (tx_status & ES_LOSTCARR)
737  dev->stats.tx_carrier_errors++;
738 
739  if (tx_status & (ES_LATCOL | ES_16COL)) {
740  PRINTK("%s: %s occurred on last xmit\n", dev->name,
741  (tx_status & ES_LATCOL) ?
742  "late collision" : "too many collisions");
743  dev->stats.tx_window_errors++;
744  if (!(dev->stats.tx_window_errors & 63) && net_ratelimit()) {
745  printk(KERN_INFO "%s: unexpectedly large number of "
746  "bad collisions. Please check duplex "
747  "setting.\n", dev->name);
748  }
749  }
750 
751  /* kill the packet */
752  SMC_WAIT_MMU_BUSY(lp);
754 
755  /* Don't restore Packet Number Reg until busy bit is cleared */
756  SMC_WAIT_MMU_BUSY(lp);
757  SMC_SET_PN(lp, saved_packet);
758 
759  /* re-enable transmit */
760  SMC_SELECT_BANK(lp, 0);
761  SMC_SET_TCR(lp, lp->tcr_cur_mode);
762  SMC_SELECT_BANK(lp, 2);
763 }
764 
765 
766 /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
767 
768 static void smc_mii_out(struct net_device *dev, unsigned int val, int bits)
769 {
770  struct smc_local *lp = netdev_priv(dev);
771  void __iomem *ioaddr = lp->base;
772  unsigned int mii_reg, mask;
773 
774  mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
775  mii_reg |= MII_MDOE;
776 
777  for (mask = 1 << (bits - 1); mask; mask >>= 1) {
778  if (val & mask)
779  mii_reg |= MII_MDO;
780  else
781  mii_reg &= ~MII_MDO;
782 
783  SMC_SET_MII(lp, mii_reg);
784  udelay(MII_DELAY);
785  SMC_SET_MII(lp, mii_reg | MII_MCLK);
786  udelay(MII_DELAY);
787  }
788 }
789 
790 static unsigned int smc_mii_in(struct net_device *dev, int bits)
791 {
792  struct smc_local *lp = netdev_priv(dev);
793  void __iomem *ioaddr = lp->base;
794  unsigned int mii_reg, mask, val;
795 
796  mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
797  SMC_SET_MII(lp, mii_reg);
798 
799  for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) {
800  if (SMC_GET_MII(lp) & MII_MDI)
801  val |= mask;
802 
803  SMC_SET_MII(lp, mii_reg);
804  udelay(MII_DELAY);
805  SMC_SET_MII(lp, mii_reg | MII_MCLK);
806  udelay(MII_DELAY);
807  }
808 
809  return val;
810 }
811 
812 /*
813  * Reads a register from the MII Management serial interface
814  */
815 static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
816 {
817  struct smc_local *lp = netdev_priv(dev);
818  void __iomem *ioaddr = lp->base;
819  unsigned int phydata;
820 
821  SMC_SELECT_BANK(lp, 3);
822 
823  /* Idle - 32 ones */
824  smc_mii_out(dev, 0xffffffff, 32);
825 
826  /* Start code (01) + read (10) + phyaddr + phyreg */
827  smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14);
828 
829  /* Turnaround (2bits) + phydata */
830  phydata = smc_mii_in(dev, 18);
831 
832  /* Return to idle state */
834 
835  DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
836  __func__, phyaddr, phyreg, phydata);
837 
838  SMC_SELECT_BANK(lp, 2);
839  return phydata;
840 }
841 
842 /*
843  * Writes a register to the MII Management serial interface
844  */
845 static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
846  int phydata)
847 {
848  struct smc_local *lp = netdev_priv(dev);
849  void __iomem *ioaddr = lp->base;
850 
851  SMC_SELECT_BANK(lp, 3);
852 
853  /* Idle - 32 ones */
854  smc_mii_out(dev, 0xffffffff, 32);
855 
856  /* Start code (01) + write (01) + phyaddr + phyreg + turnaround + phydata */
857  smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32);
858 
859  /* Return to idle state */
861 
862  DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
863  __func__, phyaddr, phyreg, phydata);
864 
865  SMC_SELECT_BANK(lp, 2);
866 }
867 
868 /*
869  * Finds and reports the PHY address
870  */
871 static void smc_phy_detect(struct net_device *dev)
872 {
873  struct smc_local *lp = netdev_priv(dev);
874  int phyaddr;
875 
876  DBG(2, "%s: %s\n", dev->name, __func__);
877 
878  lp->phy_type = 0;
879 
880  /*
881  * Scan all 32 PHY addresses if necessary, starting at
882  * PHY#1 to PHY#31, and then PHY#0 last.
883  */
884  for (phyaddr = 1; phyaddr < 33; ++phyaddr) {
885  unsigned int id1, id2;
886 
887  /* Read the PHY identifiers */
888  id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1);
889  id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2);
890 
891  DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n",
892  dev->name, id1, id2);
893 
894  /* Make sure it is a valid identifier */
895  if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 &&
896  id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) {
897  /* Save the PHY's address */
898  lp->mii.phy_id = phyaddr & 31;
899  lp->phy_type = id1 << 16 | id2;
900  break;
901  }
902  }
903 }
904 
905 /*
906  * Sets the PHY to a configuration as determined by the user
907  */
908 static int smc_phy_fixed(struct net_device *dev)
909 {
910  struct smc_local *lp = netdev_priv(dev);
911  void __iomem *ioaddr = lp->base;
912  int phyaddr = lp->mii.phy_id;
913  int bmcr, cfg1;
914 
915  DBG(3, "%s: %s\n", dev->name, __func__);
916 
917  /* Enter Link Disable state */
918  cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
919  cfg1 |= PHY_CFG1_LNKDIS;
920  smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1);
921 
922  /*
923  * Set our fixed capabilities
924  * Disable auto-negotiation
925  */
926  bmcr = 0;
927 
928  if (lp->ctl_rfduplx)
929  bmcr |= BMCR_FULLDPLX;
930 
931  if (lp->ctl_rspeed == 100)
932  bmcr |= BMCR_SPEED100;
933 
934  /* Write our capabilities to the phy control register */
935  smc_phy_write(dev, phyaddr, MII_BMCR, bmcr);
936 
937  /* Re-Configure the Receive/Phy Control register */
938  SMC_SELECT_BANK(lp, 0);
939  SMC_SET_RPC(lp, lp->rpc_cur_mode);
940  SMC_SELECT_BANK(lp, 2);
941 
942  return 1;
943 }
944 
958 static int smc_phy_reset(struct net_device *dev, int phy)
959 {
960  struct smc_local *lp = netdev_priv(dev);
961  unsigned int bmcr;
962  int timeout;
963 
964  smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET);
965 
966  for (timeout = 2; timeout; timeout--) {
967  spin_unlock_irq(&lp->lock);
968  msleep(50);
969  spin_lock_irq(&lp->lock);
970 
971  bmcr = smc_phy_read(dev, phy, MII_BMCR);
972  if (!(bmcr & BMCR_RESET))
973  break;
974  }
975 
976  return bmcr & BMCR_RESET;
977 }
978 
985 static void smc_phy_powerdown(struct net_device *dev)
986 {
987  struct smc_local *lp = netdev_priv(dev);
988  unsigned int bmcr;
989  int phy = lp->mii.phy_id;
990 
991  if (lp->phy_type == 0)
992  return;
993 
994  /* We need to ensure that no calls to smc_phy_configure are
995  pending.
996  */
998 
999  bmcr = smc_phy_read(dev, phy, MII_BMCR);
1000  smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
1001 }
1002 
1011 static void smc_phy_check_media(struct net_device *dev, int init)
1012 {
1013  struct smc_local *lp = netdev_priv(dev);
1014  void __iomem *ioaddr = lp->base;
1015 
1016  if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
1017  /* duplex state has changed */
1018  if (lp->mii.full_duplex) {
1019  lp->tcr_cur_mode |= TCR_SWFDUP;
1020  } else {
1021  lp->tcr_cur_mode &= ~TCR_SWFDUP;
1022  }
1023 
1024  SMC_SELECT_BANK(lp, 0);
1025  SMC_SET_TCR(lp, lp->tcr_cur_mode);
1026  }
1027 }
1028 
1029 /*
1030  * Configures the specified PHY through the MII management interface
1031  * using Autonegotiation.
1032  * Calls smc_phy_fixed() if the user has requested a certain config.
1033  * If RPC ANEG bit is set, the media selection is dependent purely on
1034  * the selection by the MII (either in the MII BMCR reg or the result
1035  * of autonegotiation.) If the RPC ANEG bit is cleared, the selection
1036  * is controlled by the RPC SPEED and RPC DPLX bits.
1037  */
1038 static void smc_phy_configure(struct work_struct *work)
1039 {
1040  struct smc_local *lp =
1041  container_of(work, struct smc_local, phy_configure);
1042  struct net_device *dev = lp->dev;
1043  void __iomem *ioaddr = lp->base;
1044  int phyaddr = lp->mii.phy_id;
1045  int my_phy_caps; /* My PHY capabilities */
1046  int my_ad_caps; /* My Advertised capabilities */
1047  int status;
1048 
1049  DBG(3, "%s:smc_program_phy()\n", dev->name);
1050 
1051  spin_lock_irq(&lp->lock);
1052 
1053  /*
1054  * We should not be called if phy_type is zero.
1055  */
1056  if (lp->phy_type == 0)
1057  goto smc_phy_configure_exit;
1058 
1059  if (smc_phy_reset(dev, phyaddr)) {
1060  printk("%s: PHY reset timed out\n", dev->name);
1061  goto smc_phy_configure_exit;
1062  }
1063 
1064  /*
1065  * Enable PHY Interrupts (for register 18)
1066  * Interrupts listed here are disabled
1067  */
1068  smc_phy_write(dev, phyaddr, PHY_MASK_REG,
1072 
1073  /* Configure the Receive/Phy Control register */
1074  SMC_SELECT_BANK(lp, 0);
1075  SMC_SET_RPC(lp, lp->rpc_cur_mode);
1076 
1077  /* If the user requested no auto neg, then go set his request */
1078  if (lp->mii.force_media) {
1079  smc_phy_fixed(dev);
1080  goto smc_phy_configure_exit;
1081  }
1082 
1083  /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
1084  my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR);
1085 
1086  if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
1087  printk(KERN_INFO "Auto negotiation NOT supported\n");
1088  smc_phy_fixed(dev);
1089  goto smc_phy_configure_exit;
1090  }
1091 
1092  my_ad_caps = ADVERTISE_CSMA; /* I am CSMA capable */
1093 
1094  if (my_phy_caps & BMSR_100BASE4)
1095  my_ad_caps |= ADVERTISE_100BASE4;
1096  if (my_phy_caps & BMSR_100FULL)
1097  my_ad_caps |= ADVERTISE_100FULL;
1098  if (my_phy_caps & BMSR_100HALF)
1099  my_ad_caps |= ADVERTISE_100HALF;
1100  if (my_phy_caps & BMSR_10FULL)
1101  my_ad_caps |= ADVERTISE_10FULL;
1102  if (my_phy_caps & BMSR_10HALF)
1103  my_ad_caps |= ADVERTISE_10HALF;
1104 
1105  /* Disable capabilities not selected by our user */
1106  if (lp->ctl_rspeed != 100)
1108 
1109  if (!lp->ctl_rfduplx)
1110  my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
1111 
1112  /* Update our Auto-Neg Advertisement Register */
1113  smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps);
1114  lp->mii.advertising = my_ad_caps;
1115 
1116  /*
1117  * Read the register back. Without this, it appears that when
1118  * auto-negotiation is restarted, sometimes it isn't ready and
1119  * the link does not come up.
1120  */
1121  status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
1122 
1123  DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps);
1124  DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps);
1125 
1126  /* Restart auto-negotiation process in order to advertise my caps */
1127  smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART);
1128 
1129  smc_phy_check_media(dev, 1);
1130 
1131 smc_phy_configure_exit:
1132  SMC_SELECT_BANK(lp, 2);
1133  spin_unlock_irq(&lp->lock);
1134 }
1135 
1136 /*
1137  * smc_phy_interrupt
1138  *
1139  * Purpose: Handle interrupts relating to PHY register 18. This is
1140  * called from the "hard" interrupt handler under our private spinlock.
1141  */
1142 static void smc_phy_interrupt(struct net_device *dev)
1143 {
1144  struct smc_local *lp = netdev_priv(dev);
1145  int phyaddr = lp->mii.phy_id;
1146  int phy18;
1147 
1148  DBG(2, "%s: %s\n", dev->name, __func__);
1149 
1150  if (lp->phy_type == 0)
1151  return;
1152 
1153  for(;;) {
1154  smc_phy_check_media(dev, 0);
1155 
1156  /* Read PHY Register 18, Status Output */
1157  phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG);
1158  if ((phy18 & PHY_INT_INT) == 0)
1159  break;
1160  }
1161 }
1162 
1163 /*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
1164 
1165 static void smc_10bt_check_media(struct net_device *dev, int init)
1166 {
1167  struct smc_local *lp = netdev_priv(dev);
1168  void __iomem *ioaddr = lp->base;
1169  unsigned int old_carrier, new_carrier;
1170 
1171  old_carrier = netif_carrier_ok(dev) ? 1 : 0;
1172 
1173  SMC_SELECT_BANK(lp, 0);
1174  new_carrier = (SMC_GET_EPH_STATUS(lp) & ES_LINK_OK) ? 1 : 0;
1175  SMC_SELECT_BANK(lp, 2);
1176 
1177  if (init || (old_carrier != new_carrier)) {
1178  if (!new_carrier) {
1179  netif_carrier_off(dev);
1180  } else {
1181  netif_carrier_on(dev);
1182  }
1183  if (netif_msg_link(lp))
1184  printk(KERN_INFO "%s: link %s\n", dev->name,
1185  new_carrier ? "up" : "down");
1186  }
1187 }
1188 
1189 static void smc_eph_interrupt(struct net_device *dev)
1190 {
1191  struct smc_local *lp = netdev_priv(dev);
1192  void __iomem *ioaddr = lp->base;
1193  unsigned int ctl;
1194 
1195  smc_10bt_check_media(dev, 0);
1196 
1197  SMC_SELECT_BANK(lp, 1);
1198  ctl = SMC_GET_CTL(lp);
1199  SMC_SET_CTL(lp, ctl & ~CTL_LE_ENABLE);
1200  SMC_SET_CTL(lp, ctl);
1201  SMC_SELECT_BANK(lp, 2);
1202 }
1203 
1204 /*
1205  * This is the main routine of the driver, to handle the device when
1206  * it needs some attention.
1207  */
1208 static irqreturn_t smc_interrupt(int irq, void *dev_id)
1209 {
1210  struct net_device *dev = dev_id;
1211  struct smc_local *lp = netdev_priv(dev);
1212  void __iomem *ioaddr = lp->base;
1213  int status, mask, timeout, card_stats;
1214  int saved_pointer;
1215 
1216  DBG(3, "%s: %s\n", dev->name, __func__);
1217 
1218  spin_lock(&lp->lock);
1219 
1220  /* A preamble may be used when there is a potential race
1221  * between the interruptible transmit functions and this
1222  * ISR. */
1224 
1225  saved_pointer = SMC_GET_PTR(lp);
1226  mask = SMC_GET_INT_MASK(lp);
1227  SMC_SET_INT_MASK(lp, 0);
1228 
1229  /* set a timeout value, so I don't stay here forever */
1230  timeout = MAX_IRQ_LOOPS;
1231 
1232  do {
1233  status = SMC_GET_INT(lp);
1234 
1235  DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
1236  dev->name, status, mask,
1237  ({ int meminfo; SMC_SELECT_BANK(lp, 0);
1238  meminfo = SMC_GET_MIR(lp);
1239  SMC_SELECT_BANK(lp, 2); meminfo; }),
1240  SMC_GET_FIFO(lp));
1241 
1242  status &= mask;
1243  if (!status)
1244  break;
1245 
1246  if (status & IM_TX_INT) {
1247  /* do this before RX as it will free memory quickly */
1248  DBG(3, "%s: TX int\n", dev->name);
1249  smc_tx(dev);
1250  SMC_ACK_INT(lp, IM_TX_INT);
1251  if (THROTTLE_TX_PKTS)
1252  netif_wake_queue(dev);
1253  } else if (status & IM_RCV_INT) {
1254  DBG(3, "%s: RX irq\n", dev->name);
1255  smc_rcv(dev);
1256  } else if (status & IM_ALLOC_INT) {
1257  DBG(3, "%s: Allocation irq\n", dev->name);
1258  tasklet_hi_schedule(&lp->tx_task);
1259  mask &= ~IM_ALLOC_INT;
1260  } else if (status & IM_TX_EMPTY_INT) {
1261  DBG(3, "%s: TX empty\n", dev->name);
1262  mask &= ~IM_TX_EMPTY_INT;
1263 
1264  /* update stats */
1265  SMC_SELECT_BANK(lp, 0);
1266  card_stats = SMC_GET_COUNTER(lp);
1267  SMC_SELECT_BANK(lp, 2);
1268 
1269  /* single collisions */
1270  dev->stats.collisions += card_stats & 0xF;
1271  card_stats >>= 4;
1272 
1273  /* multiple collisions */
1274  dev->stats.collisions += card_stats & 0xF;
1275  } else if (status & IM_RX_OVRN_INT) {
1276  DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
1277  ({ int eph_st; SMC_SELECT_BANK(lp, 0);
1278  eph_st = SMC_GET_EPH_STATUS(lp);
1279  SMC_SELECT_BANK(lp, 2); eph_st; }));
1280  SMC_ACK_INT(lp, IM_RX_OVRN_INT);
1281  dev->stats.rx_errors++;
1282  dev->stats.rx_fifo_errors++;
1283  } else if (status & IM_EPH_INT) {
1284  smc_eph_interrupt(dev);
1285  } else if (status & IM_MDINT) {
1286  SMC_ACK_INT(lp, IM_MDINT);
1287  smc_phy_interrupt(dev);
1288  } else if (status & IM_ERCV_INT) {
1289  SMC_ACK_INT(lp, IM_ERCV_INT);
1290  PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
1291  }
1292  } while (--timeout);
1293 
1294  /* restore register states */
1295  SMC_SET_PTR(lp, saved_pointer);
1296  SMC_SET_INT_MASK(lp, mask);
1297  spin_unlock(&lp->lock);
1298 
1299 #ifndef CONFIG_NET_POLL_CONTROLLER
1300  if (timeout == MAX_IRQ_LOOPS)
1301  PRINTK("%s: spurious interrupt (mask = 0x%02x)\n",
1302  dev->name, mask);
1303 #endif
1304  DBG(3, "%s: Interrupt done (%d loops)\n",
1305  dev->name, MAX_IRQ_LOOPS - timeout);
1306 
1307  /*
1308  * We return IRQ_HANDLED unconditionally here even if there was
1309  * nothing to do. There is a possibility that a packet might
1310  * get enqueued into the chip right after TX_EMPTY_INT is raised
1311  * but just before the CPU acknowledges the IRQ.
1312  * Better take an unneeded IRQ in some occasions than complexifying
1313  * the code for all cases.
1314  */
1315  return IRQ_HANDLED;
1316 }
1317 
1318 #ifdef CONFIG_NET_POLL_CONTROLLER
1319 /*
1320  * Polling receive - used by netconsole and other diagnostic tools
1321  * to allow network i/o with interrupts disabled.
1322  */
1323 static void smc_poll_controller(struct net_device *dev)
1324 {
1325  disable_irq(dev->irq);
1326  smc_interrupt(dev->irq, dev);
1327  enable_irq(dev->irq);
1328 }
1329 #endif
1330 
1331 /* Our watchdog timed out. Called by the networking layer */
1332 static void smc_timeout(struct net_device *dev)
1333 {
1334  struct smc_local *lp = netdev_priv(dev);
1335  void __iomem *ioaddr = lp->base;
1336  int status, mask, eph_st, meminfo, fifo;
1337 
1338  DBG(2, "%s: %s\n", dev->name, __func__);
1339 
1340  spin_lock_irq(&lp->lock);
1341  status = SMC_GET_INT(lp);
1342  mask = SMC_GET_INT_MASK(lp);
1343  fifo = SMC_GET_FIFO(lp);
1344  SMC_SELECT_BANK(lp, 0);
1345  eph_st = SMC_GET_EPH_STATUS(lp);
1346  meminfo = SMC_GET_MIR(lp);
1347  SMC_SELECT_BANK(lp, 2);
1348  spin_unlock_irq(&lp->lock);
1349  PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
1350  "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
1351  dev->name, status, mask, meminfo, fifo, eph_st );
1352 
1353  smc_reset(dev);
1354  smc_enable(dev);
1355 
1356  /*
1357  * Reconfiguring the PHY doesn't seem like a bad idea here, but
1358  * smc_phy_configure() calls msleep() which calls schedule_timeout()
1359  * which calls schedule(). Hence we use a work queue.
1360  */
1361  if (lp->phy_type != 0)
1363 
1364  /* We can accept TX packets again */
1365  dev->trans_start = jiffies; /* prevent tx timeout */
1366  netif_wake_queue(dev);
1367 }
1368 
1369 /*
1370  * This routine will, depending on the values passed to it,
1371  * either make it accept multicast packets, go into
1372  * promiscuous mode (for TCPDUMP and cousins) or accept
1373  * a select set of multicast packets
1374  */
1375 static void smc_set_multicast_list(struct net_device *dev)
1376 {
1377  struct smc_local *lp = netdev_priv(dev);
1378  void __iomem *ioaddr = lp->base;
1379  unsigned char multicast_table[8];
1380  int update_multicast = 0;
1381 
1382  DBG(2, "%s: %s\n", dev->name, __func__);
1383 
1384  if (dev->flags & IFF_PROMISC) {
1385  DBG(2, "%s: RCR_PRMS\n", dev->name);
1386  lp->rcr_cur_mode |= RCR_PRMS;
1387  }
1388 
1389 /* BUG? I never disable promiscuous mode if multicasting was turned on.
1390  Now, I turn off promiscuous mode, but I don't do anything to multicasting
1391  when promiscuous mode is turned on.
1392 */
1393 
1394  /*
1395  * Here, I am setting this to accept all multicast packets.
1396  * I don't need to zero the multicast table, because the flag is
1397  * checked before the table is
1398  */
1399  else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
1400  DBG(2, "%s: RCR_ALMUL\n", dev->name);
1401  lp->rcr_cur_mode |= RCR_ALMUL;
1402  }
1403 
1404  /*
1405  * This sets the internal hardware table to filter out unwanted
1406  * multicast packets before they take up memory.
1407  *
1408  * The SMC chip uses a hash table where the high 6 bits of the CRC of
1409  * address are the offset into the table. If that bit is 1, then the
1410  * multicast packet is accepted. Otherwise, it's dropped silently.
1411  *
1412  * To use the 6 bits as an offset into the table, the high 3 bits are
1413  * the number of the 8 bit register, while the low 3 bits are the bit
1414  * within that register.
1415  */
1416  else if (!netdev_mc_empty(dev)) {
1417  struct netdev_hw_addr *ha;
1418 
1419  /* table for flipping the order of 3 bits */
1420  static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
1421 
1422  /* start with a table of all zeros: reject all */
1423  memset(multicast_table, 0, sizeof(multicast_table));
1424 
1425  netdev_for_each_mc_addr(ha, dev) {
1426  int position;
1427 
1428  /* only use the low order bits */
1429  position = crc32_le(~0, ha->addr, 6) & 0x3f;
1430 
1431  /* do some messy swapping to put the bit in the right spot */
1432  multicast_table[invert3[position&7]] |=
1433  (1<<invert3[(position>>3)&7]);
1434  }
1435 
1436  /* be sure I get rid of flags I might have set */
1437  lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
1438 
1439  /* now, the table can be loaded into the chipset */
1440  update_multicast = 1;
1441  } else {
1442  DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name);
1443  lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
1444 
1445  /*
1446  * since I'm disabling all multicast entirely, I need to
1447  * clear the multicast list
1448  */
1449  memset(multicast_table, 0, sizeof(multicast_table));
1450  update_multicast = 1;
1451  }
1452 
1453  spin_lock_irq(&lp->lock);
1454  SMC_SELECT_BANK(lp, 0);
1455  SMC_SET_RCR(lp, lp->rcr_cur_mode);
1456  if (update_multicast) {
1457  SMC_SELECT_BANK(lp, 3);
1458  SMC_SET_MCAST(lp, multicast_table);
1459  }
1460  SMC_SELECT_BANK(lp, 2);
1461  spin_unlock_irq(&lp->lock);
1462 }
1463 
1464 
1465 /*
1466  * Open and Initialize the board
1467  *
1468  * Set up everything, reset the card, etc..
1469  */
1470 static int
1471 smc_open(struct net_device *dev)
1472 {
1473  struct smc_local *lp = netdev_priv(dev);
1474 
1475  DBG(2, "%s: %s\n", dev->name, __func__);
1476 
1477  /*
1478  * Check that the address is valid. If its not, refuse
1479  * to bring the device up. The user must specify an
1480  * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1481  */
1482  if (!is_valid_ether_addr(dev->dev_addr)) {
1483  PRINTK("%s: no valid ethernet hw addr\n", __func__);
1484  return -EINVAL;
1485  }
1486 
1487  /* Setup the default Register Modes */
1488  lp->tcr_cur_mode = TCR_DEFAULT;
1489  lp->rcr_cur_mode = RCR_DEFAULT;
1490  lp->rpc_cur_mode = RPC_DEFAULT |
1491  lp->cfg.leda << RPC_LSXA_SHFT |
1492  lp->cfg.ledb << RPC_LSXB_SHFT;
1493 
1494  /*
1495  * If we are not using a MII interface, we need to
1496  * monitor our own carrier signal to detect faults.
1497  */
1498  if (lp->phy_type == 0)
1499  lp->tcr_cur_mode |= TCR_MON_CSN;
1500 
1501  /* reset the hardware */
1502  smc_reset(dev);
1503  smc_enable(dev);
1504 
1505  /* Configure the PHY, initialize the link state */
1506  if (lp->phy_type != 0)
1507  smc_phy_configure(&lp->phy_configure);
1508  else {
1509  spin_lock_irq(&lp->lock);
1510  smc_10bt_check_media(dev, 1);
1511  spin_unlock_irq(&lp->lock);
1512  }
1513 
1514  netif_start_queue(dev);
1515  return 0;
1516 }
1517 
1518 /*
1519  * smc_close
1520  *
1521  * this makes the board clean up everything that it can
1522  * and not talk to the outside world. Caused by
1523  * an 'ifconfig ethX down'
1524  */
1525 static int smc_close(struct net_device *dev)
1526 {
1527  struct smc_local *lp = netdev_priv(dev);
1528 
1529  DBG(2, "%s: %s\n", dev->name, __func__);
1530 
1531  netif_stop_queue(dev);
1532  netif_carrier_off(dev);
1533 
1534  /* clear everything */
1535  smc_shutdown(dev);
1536  tasklet_kill(&lp->tx_task);
1537  smc_phy_powerdown(dev);
1538  return 0;
1539 }
1540 
1541 /*
1542  * Ethtool support
1543  */
1544 static int
1545 smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1546 {
1547  struct smc_local *lp = netdev_priv(dev);
1548  int ret;
1549 
1550  cmd->maxtxpkt = 1;
1551  cmd->maxrxpkt = 1;
1552 
1553  if (lp->phy_type != 0) {
1554  spin_lock_irq(&lp->lock);
1555  ret = mii_ethtool_gset(&lp->mii, cmd);
1556  spin_unlock_irq(&lp->lock);
1557  } else {
1561 
1562  if (lp->ctl_rspeed == 10)
1563  ethtool_cmd_speed_set(cmd, SPEED_10);
1564  else if (lp->ctl_rspeed == 100)
1565  ethtool_cmd_speed_set(cmd, SPEED_100);
1566 
1567  cmd->autoneg = AUTONEG_DISABLE;
1568  cmd->transceiver = XCVR_INTERNAL;
1569  cmd->port = 0;
1571 
1572  ret = 0;
1573  }
1574 
1575  return ret;
1576 }
1577 
1578 static int
1579 smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1580 {
1581  struct smc_local *lp = netdev_priv(dev);
1582  int ret;
1583 
1584  if (lp->phy_type != 0) {
1585  spin_lock_irq(&lp->lock);
1586  ret = mii_ethtool_sset(&lp->mii, cmd);
1587  spin_unlock_irq(&lp->lock);
1588  } else {
1589  if (cmd->autoneg != AUTONEG_DISABLE ||
1590  cmd->speed != SPEED_10 ||
1591  (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
1592  (cmd->port != PORT_TP && cmd->port != PORT_AUI))
1593  return -EINVAL;
1594 
1595 // lp->port = cmd->port;
1596  lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
1597 
1598 // if (netif_running(dev))
1599 // smc_set_port(dev);
1600 
1601  ret = 0;
1602  }
1603 
1604  return ret;
1605 }
1606 
1607 static void
1608 smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1609 {
1610  strncpy(info->driver, CARDNAME, sizeof(info->driver));
1611  strncpy(info->version, version, sizeof(info->version));
1612  strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
1613 }
1614 
1615 static int smc_ethtool_nwayreset(struct net_device *dev)
1616 {
1617  struct smc_local *lp = netdev_priv(dev);
1618  int ret = -EINVAL;
1619 
1620  if (lp->phy_type != 0) {
1621  spin_lock_irq(&lp->lock);
1622  ret = mii_nway_restart(&lp->mii);
1623  spin_unlock_irq(&lp->lock);
1624  }
1625 
1626  return ret;
1627 }
1628 
1629 static u32 smc_ethtool_getmsglevel(struct net_device *dev)
1630 {
1631  struct smc_local *lp = netdev_priv(dev);
1632  return lp->msg_enable;
1633 }
1634 
1635 static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level)
1636 {
1637  struct smc_local *lp = netdev_priv(dev);
1638  lp->msg_enable = level;
1639 }
1640 
1641 static int smc_write_eeprom_word(struct net_device *dev, u16 addr, u16 word)
1642 {
1643  u16 ctl;
1644  struct smc_local *lp = netdev_priv(dev);
1645  void __iomem *ioaddr = lp->base;
1646 
1647  spin_lock_irq(&lp->lock);
1648  /* load word into GP register */
1649  SMC_SELECT_BANK(lp, 1);
1650  SMC_SET_GP(lp, word);
1651  /* set the address to put the data in EEPROM */
1652  SMC_SELECT_BANK(lp, 2);
1653  SMC_SET_PTR(lp, addr);
1654  /* tell it to write */
1655  SMC_SELECT_BANK(lp, 1);
1656  ctl = SMC_GET_CTL(lp);
1657  SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_STORE));
1658  /* wait for it to finish */
1659  do {
1660  udelay(1);
1661  } while (SMC_GET_CTL(lp) & CTL_STORE);
1662  /* clean up */
1663  SMC_SET_CTL(lp, ctl);
1664  SMC_SELECT_BANK(lp, 2);
1665  spin_unlock_irq(&lp->lock);
1666  return 0;
1667 }
1668 
1669 static int smc_read_eeprom_word(struct net_device *dev, u16 addr, u16 *word)
1670 {
1671  u16 ctl;
1672  struct smc_local *lp = netdev_priv(dev);
1673  void __iomem *ioaddr = lp->base;
1674 
1675  spin_lock_irq(&lp->lock);
1676  /* set the EEPROM address to get the data from */
1677  SMC_SELECT_BANK(lp, 2);
1678  SMC_SET_PTR(lp, addr | PTR_READ);
1679  /* tell it to load */
1680  SMC_SELECT_BANK(lp, 1);
1681  SMC_SET_GP(lp, 0xffff); /* init to known */
1682  ctl = SMC_GET_CTL(lp);
1683  SMC_SET_CTL(lp, ctl | (CTL_EEPROM_SELECT | CTL_RELOAD));
1684  /* wait for it to finish */
1685  do {
1686  udelay(1);
1687  } while (SMC_GET_CTL(lp) & CTL_RELOAD);
1688  /* read word from GP register */
1689  *word = SMC_GET_GP(lp);
1690  /* clean up */
1691  SMC_SET_CTL(lp, ctl);
1692  SMC_SELECT_BANK(lp, 2);
1693  spin_unlock_irq(&lp->lock);
1694  return 0;
1695 }
1696 
1697 static int smc_ethtool_geteeprom_len(struct net_device *dev)
1698 {
1699  return 0x23 * 2;
1700 }
1701 
1702 static int smc_ethtool_geteeprom(struct net_device *dev,
1703  struct ethtool_eeprom *eeprom, u8 *data)
1704 {
1705  int i;
1706  int imax;
1707 
1708  DBG(1, "Reading %d bytes at %d(0x%x)\n",
1709  eeprom->len, eeprom->offset, eeprom->offset);
1710  imax = smc_ethtool_geteeprom_len(dev);
1711  for (i = 0; i < eeprom->len; i += 2) {
1712  int ret;
1713  u16 wbuf;
1714  int offset = i + eeprom->offset;
1715  if (offset > imax)
1716  break;
1717  ret = smc_read_eeprom_word(dev, offset >> 1, &wbuf);
1718  if (ret != 0)
1719  return ret;
1720  DBG(2, "Read 0x%x from 0x%x\n", wbuf, offset >> 1);
1721  data[i] = (wbuf >> 8) & 0xff;
1722  data[i+1] = wbuf & 0xff;
1723  }
1724  return 0;
1725 }
1726 
1727 static int smc_ethtool_seteeprom(struct net_device *dev,
1728  struct ethtool_eeprom *eeprom, u8 *data)
1729 {
1730  int i;
1731  int imax;
1732 
1733  DBG(1, "Writing %d bytes to %d(0x%x)\n",
1734  eeprom->len, eeprom->offset, eeprom->offset);
1735  imax = smc_ethtool_geteeprom_len(dev);
1736  for (i = 0; i < eeprom->len; i += 2) {
1737  int ret;
1738  u16 wbuf;
1739  int offset = i + eeprom->offset;
1740  if (offset > imax)
1741  break;
1742  wbuf = (data[i] << 8) | data[i + 1];
1743  DBG(2, "Writing 0x%x to 0x%x\n", wbuf, offset >> 1);
1744  ret = smc_write_eeprom_word(dev, offset >> 1, wbuf);
1745  if (ret != 0)
1746  return ret;
1747  }
1748  return 0;
1749 }
1750 
1751 
1752 static const struct ethtool_ops smc_ethtool_ops = {
1753  .get_settings = smc_ethtool_getsettings,
1754  .set_settings = smc_ethtool_setsettings,
1755  .get_drvinfo = smc_ethtool_getdrvinfo,
1756 
1757  .get_msglevel = smc_ethtool_getmsglevel,
1758  .set_msglevel = smc_ethtool_setmsglevel,
1759  .nway_reset = smc_ethtool_nwayreset,
1760  .get_link = ethtool_op_get_link,
1761  .get_eeprom_len = smc_ethtool_geteeprom_len,
1762  .get_eeprom = smc_ethtool_geteeprom,
1763  .set_eeprom = smc_ethtool_seteeprom,
1764 };
1765 
1766 static const struct net_device_ops smc_netdev_ops = {
1767  .ndo_open = smc_open,
1768  .ndo_stop = smc_close,
1769  .ndo_start_xmit = smc_hard_start_xmit,
1770  .ndo_tx_timeout = smc_timeout,
1771  .ndo_set_rx_mode = smc_set_multicast_list,
1772  .ndo_change_mtu = eth_change_mtu,
1773  .ndo_validate_addr = eth_validate_addr,
1774  .ndo_set_mac_address = eth_mac_addr,
1775 #ifdef CONFIG_NET_POLL_CONTROLLER
1776  .ndo_poll_controller = smc_poll_controller,
1777 #endif
1778 };
1779 
1780 /*
1781  * smc_findirq
1782  *
1783  * This routine has a simple purpose -- make the SMC chip generate an
1784  * interrupt, so an auto-detect routine can detect it, and find the IRQ,
1785  */
1786 /*
1787  * does this still work?
1788  *
1789  * I just deleted auto_irq.c, since it was never built...
1790  * --jgarzik
1791  */
1792 static int __devinit smc_findirq(struct smc_local *lp)
1793 {
1794  void __iomem *ioaddr = lp->base;
1795  int timeout = 20;
1796  unsigned long cookie;
1797 
1798  DBG(2, "%s: %s\n", CARDNAME, __func__);
1799 
1800  cookie = probe_irq_on();
1801 
1802  /*
1803  * What I try to do here is trigger an ALLOC_INT. This is done
1804  * by allocating a small chunk of memory, which will give an interrupt
1805  * when done.
1806  */
1807  /* enable ALLOCation interrupts ONLY */
1808  SMC_SELECT_BANK(lp, 2);
1809  SMC_SET_INT_MASK(lp, IM_ALLOC_INT);
1810 
1811  /*
1812  * Allocate 512 bytes of memory. Note that the chip was just
1813  * reset so all the memory is available
1814  */
1815  SMC_SET_MMU_CMD(lp, MC_ALLOC | 1);
1816 
1817  /*
1818  * Wait until positive that the interrupt has been generated
1819  */
1820  do {
1821  int int_status;
1822  udelay(10);
1823  int_status = SMC_GET_INT(lp);
1824  if (int_status & IM_ALLOC_INT)
1825  break; /* got the interrupt */
1826  } while (--timeout);
1827 
1828  /*
1829  * there is really nothing that I can do here if timeout fails,
1830  * as autoirq_report will return a 0 anyway, which is what I
1831  * want in this case. Plus, the clean up is needed in both
1832  * cases.
1833  */
1834 
1835  /* and disable all interrupts again */
1836  SMC_SET_INT_MASK(lp, 0);
1837 
1838  /* and return what I found */
1839  return probe_irq_off(cookie);
1840 }
1841 
1842 /*
1843  * Function: smc_probe(unsigned long ioaddr)
1844  *
1845  * Purpose:
1846  * Tests to see if a given ioaddr points to an SMC91x chip.
1847  * Returns a 0 on success
1848  *
1849  * Algorithm:
1850  * (1) see if the high byte of BANK_SELECT is 0x33
1851  * (2) compare the ioaddr with the base register's address
1852  * (3) see if I recognize the chip ID in the appropriate register
1853  *
1854  * Here I do typical initialization tasks.
1855  *
1856  * o Initialize the structure if needed
1857  * o print out my vanity message if not done so already
1858  * o print out what type of hardware is detected
1859  * o print out the ethernet address
1860  * o find the IRQ
1861  * o set up my private data
1862  * o configure the dev structure with my subroutines
1863  * o actually GRAB the irq.
1864  * o GRAB the region
1865  */
1866 static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
1867  unsigned long irq_flags)
1868 {
1869  struct smc_local *lp = netdev_priv(dev);
1870  static int version_printed = 0;
1871  int retval;
1872  unsigned int val, revision_register;
1873  const char *version_string;
1874 
1875  DBG(2, "%s: %s\n", CARDNAME, __func__);
1876 
1877  /* First, see if the high byte is 0x33 */
1878  val = SMC_CURRENT_BANK(lp);
1879  DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val);
1880  if ((val & 0xFF00) != 0x3300) {
1881  if ((val & 0xFF) == 0x33) {
1883  "%s: Detected possible byte-swapped interface"
1884  " at IOADDR %p\n", CARDNAME, ioaddr);
1885  }
1886  retval = -ENODEV;
1887  goto err_out;
1888  }
1889 
1890  /*
1891  * The above MIGHT indicate a device, but I need to write to
1892  * further test this.
1893  */
1894  SMC_SELECT_BANK(lp, 0);
1895  val = SMC_CURRENT_BANK(lp);
1896  if ((val & 0xFF00) != 0x3300) {
1897  retval = -ENODEV;
1898  goto err_out;
1899  }
1900 
1901  /*
1902  * well, we've already written once, so hopefully another
1903  * time won't hurt. This time, I need to switch the bank
1904  * register to bank 1, so I can access the base address
1905  * register
1906  */
1907  SMC_SELECT_BANK(lp, 1);
1908  val = SMC_GET_BASE(lp);
1909  val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
1910  if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
1911  printk("%s: IOADDR %p doesn't match configuration (%x).\n",
1912  CARDNAME, ioaddr, val);
1913  }
1914 
1915  /*
1916  * check if the revision register is something that I
1917  * recognize. These might need to be added to later,
1918  * as future revisions could be added.
1919  */
1920  SMC_SELECT_BANK(lp, 3);
1921  revision_register = SMC_GET_REV(lp);
1922  DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
1923  version_string = chip_ids[ (revision_register >> 4) & 0xF];
1924  if (!version_string || (revision_register & 0xff00) != 0x3300) {
1925  /* I don't recognize this chip, so... */
1926  printk("%s: IO %p: Unrecognized revision register 0x%04x"
1927  ", Contact author.\n", CARDNAME,
1928  ioaddr, revision_register);
1929 
1930  retval = -ENODEV;
1931  goto err_out;
1932  }
1933 
1934  /* At this point I'll assume that the chip is an SMC91x. */
1935  if (version_printed++ == 0)
1936  printk("%s", version);
1937 
1938  /* fill in some of the fields */
1939  dev->base_addr = (unsigned long)ioaddr;
1940  lp->base = ioaddr;
1941  lp->version = revision_register & 0xff;
1942  spin_lock_init(&lp->lock);
1943 
1944  /* Get the MAC address */
1945  SMC_SELECT_BANK(lp, 1);
1946  SMC_GET_MAC_ADDR(lp, dev->dev_addr);
1947 
1948  /* now, reset the chip, and put it into a known state */
1949  smc_reset(dev);
1950 
1951  /*
1952  * If dev->irq is 0, then the device has to be banged on to see
1953  * what the IRQ is.
1954  *
1955  * This banging doesn't always detect the IRQ, for unknown reasons.
1956  * a workaround is to reset the chip and try again.
1957  *
1958  * Interestingly, the DOS packet driver *SETS* the IRQ on the card to
1959  * be what is requested on the command line. I don't do that, mostly
1960  * because the card that I have uses a non-standard method of accessing
1961  * the IRQs, and because this _should_ work in most configurations.
1962  *
1963  * Specifying an IRQ is done with the assumption that the user knows
1964  * what (s)he is doing. No checking is done!!!!
1965  */
1966  if (dev->irq < 1) {
1967  int trials;
1968 
1969  trials = 3;
1970  while (trials--) {
1971  dev->irq = smc_findirq(lp);
1972  if (dev->irq)
1973  break;
1974  /* kick the card and try again */
1975  smc_reset(dev);
1976  }
1977  }
1978  if (dev->irq == 0) {
1979  printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
1980  dev->name);
1981  retval = -ENODEV;
1982  goto err_out;
1983  }
1984  dev->irq = irq_canonicalize(dev->irq);
1985 
1986  /* Fill in the fields of the device structure with ethernet values. */
1987  ether_setup(dev);
1988 
1990  dev->netdev_ops = &smc_netdev_ops;
1991  dev->ethtool_ops = &smc_ethtool_ops;
1992 
1993  tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
1994  INIT_WORK(&lp->phy_configure, smc_phy_configure);
1995  lp->dev = dev;
1996  lp->mii.phy_id_mask = 0x1f;
1997  lp->mii.reg_num_mask = 0x1f;
1998  lp->mii.force_media = 0;
1999  lp->mii.full_duplex = 0;
2000  lp->mii.dev = dev;
2001  lp->mii.mdio_read = smc_phy_read;
2002  lp->mii.mdio_write = smc_phy_write;
2003 
2004  /*
2005  * Locate the phy, if any.
2006  */
2007  if (lp->version >= (CHIP_91100 << 4))
2008  smc_phy_detect(dev);
2009 
2010  /* then shut everything down to save power */
2011  smc_shutdown(dev);
2012  smc_phy_powerdown(dev);
2013 
2014  /* Set default parameters */
2015  lp->msg_enable = NETIF_MSG_LINK;
2016  lp->ctl_rfduplx = 0;
2017  lp->ctl_rspeed = 10;
2018 
2019  if (lp->version >= (CHIP_91100 << 4)) {
2020  lp->ctl_rfduplx = 1;
2021  lp->ctl_rspeed = 100;
2022  }
2023 
2024  /* Grab the IRQ */
2025  retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev);
2026  if (retval)
2027  goto err_out;
2028 
2029 #ifdef CONFIG_ARCH_PXA
2030 # ifdef SMC_USE_PXA_DMA
2031  lp->cfg.flags |= SMC91X_USE_DMA;
2032 # endif
2033  if (lp->cfg.flags & SMC91X_USE_DMA) {
2034  int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
2035  smc_pxa_dma_irq, NULL);
2036  if (dma >= 0)
2037  dev->dma = dma;
2038  }
2039 #endif
2040 
2041  retval = register_netdev(dev);
2042  if (retval == 0) {
2043  /* now, print out the card info, in a short format.. */
2044  printk("%s: %s (rev %d) at %p IRQ %d",
2045  dev->name, version_string, revision_register & 0x0f,
2046  lp->base, dev->irq);
2047 
2048  if (dev->dma != (unsigned char)-1)
2049  printk(" DMA %d", dev->dma);
2050 
2051  printk("%s%s\n",
2052  lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
2053  THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
2054 
2055  if (!is_valid_ether_addr(dev->dev_addr)) {
2056  printk("%s: Invalid ethernet MAC address. Please "
2057  "set using ifconfig\n", dev->name);
2058  } else {
2059  /* Print the Ethernet address */
2060  printk("%s: Ethernet addr: %pM\n",
2061  dev->name, dev->dev_addr);
2062  }
2063 
2064  if (lp->phy_type == 0) {
2065  PRINTK("%s: No PHY found\n", dev->name);
2066  } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) {
2067  PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name);
2068  } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) {
2069  PRINTK("%s: PHY LAN83C180\n", dev->name);
2070  }
2071  }
2072 
2073 err_out:
2074 #ifdef CONFIG_ARCH_PXA
2075  if (retval && dev->dma != (unsigned char)-1)
2076  pxa_free_dma(dev->dma);
2077 #endif
2078  return retval;
2079 }
2080 
2081 static int smc_enable_device(struct platform_device *pdev)
2082 {
2083  struct net_device *ndev = platform_get_drvdata(pdev);
2084  struct smc_local *lp = netdev_priv(ndev);
2085  unsigned long flags;
2086  unsigned char ecor, ecsr;
2087  void __iomem *addr;
2088  struct resource * res;
2089 
2090  res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2091  if (!res)
2092  return 0;
2093 
2094  /*
2095  * Map the attribute space. This is overkill, but clean.
2096  */
2097  addr = ioremap(res->start, ATTRIB_SIZE);
2098  if (!addr)
2099  return -ENOMEM;
2100 
2101  /*
2102  * Reset the device. We must disable IRQs around this
2103  * since a reset causes the IRQ line become active.
2104  */
2105  local_irq_save(flags);
2106  ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET;
2107  writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT));
2108  readb(addr + (ECOR << SMC_IO_SHIFT));
2109 
2110  /*
2111  * Wait 100us for the chip to reset.
2112  */
2113  udelay(100);
2114 
2115  /*
2116  * The device will ignore all writes to the enable bit while
2117  * reset is asserted, even if the reset bit is cleared in the
2118  * same write. Must clear reset first, then enable the device.
2119  */
2120  writeb(ecor, addr + (ECOR << SMC_IO_SHIFT));
2121  writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT));
2122 
2123  /*
2124  * Set the appropriate byte/word mode.
2125  */
2126  ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
2127  if (!SMC_16BIT(lp))
2128  ecsr |= ECSR_IOIS8;
2129  writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
2130  local_irq_restore(flags);
2131 
2132  iounmap(addr);
2133 
2134  /*
2135  * Wait for the chip to wake up. We could poll the control
2136  * register in the main register space, but that isn't mapped
2137  * yet. We know this is going to take 750us.
2138  */
2139  msleep(1);
2140 
2141  return 0;
2142 }
2143 
2144 static int smc_request_attrib(struct platform_device *pdev,
2145  struct net_device *ndev)
2146 {
2147  struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2148  struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2149 
2150  if (!res)
2151  return 0;
2152 
2154  return -EBUSY;
2155 
2156  return 0;
2157 }
2158 
2159 static void smc_release_attrib(struct platform_device *pdev,
2160  struct net_device *ndev)
2161 {
2162  struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
2163  struct smc_local *lp __maybe_unused = netdev_priv(ndev);
2164 
2165  if (res)
2167 }
2168 
2169 static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
2170 {
2171  if (SMC_CAN_USE_DATACS) {
2172  struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
2173  struct smc_local *lp = netdev_priv(ndev);
2174 
2175  if (!res)
2176  return;
2177 
2179  printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
2180  return;
2181  }
2182 
2183  lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
2184  }
2185 }
2186 
2187 static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
2188 {
2189  if (SMC_CAN_USE_DATACS) {
2190  struct smc_local *lp = netdev_priv(ndev);
2191  struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
2192 
2193  if (lp->datacs)
2194  iounmap(lp->datacs);
2195 
2196  lp->datacs = NULL;
2197 
2198  if (res)
2200  }
2201 }
2202 
2203 /*
2204  * smc_init(void)
2205  * Input parameters:
2206  * dev->base_addr == 0, try to find all possible locations
2207  * dev->base_addr > 0x1ff, this is the address to check
2208  * dev->base_addr == <anything else>, return failure code
2209  *
2210  * Output:
2211  * 0 --> there is a device
2212  * anything else, error
2213  */
2214 static int __devinit smc_drv_probe(struct platform_device *pdev)
2215 {
2216  struct smc91x_platdata *pd = pdev->dev.platform_data;
2217  struct smc_local *lp;
2218  struct net_device *ndev;
2219  struct resource *res, *ires;
2220  unsigned int __iomem *addr;
2221  unsigned long irq_flags = SMC_IRQ_FLAGS;
2222  int ret;
2223 
2224  ndev = alloc_etherdev(sizeof(struct smc_local));
2225  if (!ndev) {
2226  ret = -ENOMEM;
2227  goto out;
2228  }
2229  SET_NETDEV_DEV(ndev, &pdev->dev);
2230 
2231  /* get configuration from platform data, only allow use of
2232  * bus width if both SMC_CAN_USE_xxx and SMC91X_USE_xxx are set.
2233  */
2234 
2235  lp = netdev_priv(ndev);
2236 
2237  if (pd) {
2238  memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2239  lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
2240  } else {
2241  lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
2242  lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
2243  lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
2244  lp->cfg.flags |= (nowait) ? SMC91X_NOWAIT : 0;
2245  }
2246 
2247  if (!lp->cfg.leda && !lp->cfg.ledb) {
2248  lp->cfg.leda = RPC_LSA_DEFAULT;
2249  lp->cfg.ledb = RPC_LSB_DEFAULT;
2250  }
2251 
2252  ndev->dma = (unsigned char)-1;
2253 
2254  res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2255  if (!res)
2256  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2257  if (!res) {
2258  ret = -ENODEV;
2259  goto out_free_netdev;
2260  }
2261 
2262 
2264  ret = -EBUSY;
2265  goto out_free_netdev;
2266  }
2267 
2268  ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2269  if (!ires) {
2270  ret = -ENODEV;
2271  goto out_release_io;
2272  }
2273 
2274  ndev->irq = ires->start;
2275 
2276  if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
2277  irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2278 
2279  ret = smc_request_attrib(pdev, ndev);
2280  if (ret)
2281  goto out_release_io;
2282 #if defined(CONFIG_SA1100_ASSABET)
2284 #endif
2285  platform_set_drvdata(pdev, ndev);
2286  ret = smc_enable_device(pdev);
2287  if (ret)
2288  goto out_release_attrib;
2289 
2290  addr = ioremap(res->start, SMC_IO_EXTENT);
2291  if (!addr) {
2292  ret = -ENOMEM;
2293  goto out_release_attrib;
2294  }
2295 
2296 #ifdef CONFIG_ARCH_PXA
2297  {
2298  struct smc_local *lp = netdev_priv(ndev);
2299  lp->device = &pdev->dev;
2300  lp->physaddr = res->start;
2301  }
2302 #endif
2303 
2304  ret = smc_probe(ndev, addr, irq_flags);
2305  if (ret != 0)
2306  goto out_iounmap;
2307 
2308  smc_request_datacs(pdev, ndev);
2309 
2310  return 0;
2311 
2312  out_iounmap:
2313  platform_set_drvdata(pdev, NULL);
2314  iounmap(addr);
2315  out_release_attrib:
2316  smc_release_attrib(pdev, ndev);
2317  out_release_io:
2319  out_free_netdev:
2320  free_netdev(ndev);
2321  out:
2322  printk("%s: not found (%d).\n", CARDNAME, ret);
2323 
2324  return ret;
2325 }
2326 
2327 static int __devexit smc_drv_remove(struct platform_device *pdev)
2328 {
2329  struct net_device *ndev = platform_get_drvdata(pdev);
2330  struct smc_local *lp = netdev_priv(ndev);
2331  struct resource *res;
2332 
2333  platform_set_drvdata(pdev, NULL);
2334 
2335  unregister_netdev(ndev);
2336 
2337  free_irq(ndev->irq, ndev);
2338 
2339 #ifdef CONFIG_ARCH_PXA
2340  if (ndev->dma != (unsigned char)-1)
2341  pxa_free_dma(ndev->dma);
2342 #endif
2343  iounmap(lp->base);
2344 
2345  smc_release_datacs(pdev,ndev);
2346  smc_release_attrib(pdev,ndev);
2347 
2348  res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
2349  if (!res)
2350  res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2352 
2353  free_netdev(ndev);
2354 
2355  return 0;
2356 }
2357 
2358 static int smc_drv_suspend(struct device *dev)
2359 {
2360  struct platform_device *pdev = to_platform_device(dev);
2361  struct net_device *ndev = platform_get_drvdata(pdev);
2362 
2363  if (ndev) {
2364  if (netif_running(ndev)) {
2365  netif_device_detach(ndev);
2366  smc_shutdown(ndev);
2367  smc_phy_powerdown(ndev);
2368  }
2369  }
2370  return 0;
2371 }
2372 
2373 static int smc_drv_resume(struct device *dev)
2374 {
2375  struct platform_device *pdev = to_platform_device(dev);
2376  struct net_device *ndev = platform_get_drvdata(pdev);
2377 
2378  if (ndev) {
2379  struct smc_local *lp = netdev_priv(ndev);
2380  smc_enable_device(pdev);
2381  if (netif_running(ndev)) {
2382  smc_reset(ndev);
2383  smc_enable(ndev);
2384  if (lp->phy_type != 0)
2385  smc_phy_configure(&lp->phy_configure);
2386  netif_device_attach(ndev);
2387  }
2388  }
2389  return 0;
2390 }
2391 
2392 #ifdef CONFIG_OF
2393 static const struct of_device_id smc91x_match[] = {
2394  { .compatible = "smsc,lan91c94", },
2395  { .compatible = "smsc,lan91c111", },
2396  {},
2397 };
2398 MODULE_DEVICE_TABLE(of, smc91x_match);
2399 #else
2400 #define smc91x_match NULL
2401 #endif
2402 
2403 static struct dev_pm_ops smc_drv_pm_ops = {
2404  .suspend = smc_drv_suspend,
2405  .resume = smc_drv_resume,
2406 };
2407 
2408 static struct platform_driver smc_driver = {
2409  .probe = smc_drv_probe,
2410  .remove = __devexit_p(smc_drv_remove),
2411  .driver = {
2412  .name = CARDNAME,
2413  .owner = THIS_MODULE,
2414  .pm = &smc_drv_pm_ops,
2415  .of_match_table = smc91x_match,
2416  },
2417 };
2418 
2419 module_platform_driver(smc_driver);