Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lmc_main.c
Go to the documentation of this file.
1  /*
2  * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3  * All rights reserved. www.lanmedia.com
4  * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <[email protected]>
5  *
6  * This code is written by:
7  * Andrew Stanley-Jones ([email protected])
8  * Rob Braun ([email protected]),
9  * Michael Graff ([email protected]) and
10  * Matt Thomas ([email protected]).
11  *
12  * With Help By:
13  * David Boggs
14  * Ron Crane
15  * Alan Cox
16  *
17  * This software may be used and distributed according to the terms
18  * of the GNU General Public License version 2, incorporated herein by reference.
19  *
20  * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
21  *
22  * To control link specific options lmcctl is required.
23  * It can be obtained from ftp.lanmedia.com.
24  *
25  * Linux driver notes:
26  * Linux uses the device struct lmc_private to pass private information
27  * around.
28  *
29  * The initialization portion of this driver (the lmc_reset() and the
30  * lmc_dec_reset() functions, as well as the led controls and the
31  * lmc_initcsrs() functions.
32  *
33  * The watchdog function runs every second and checks to see if
34  * we still have link, and that the timing source is what we expected
35  * it to be. If link is lost, the interface is marked down, and
36  * we no longer can transmit.
37  *
38  */
39 
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/string.h>
43 #include <linux/timer.h>
44 #include <linux/ptrace.h>
45 #include <linux/errno.h>
46 #include <linux/ioport.h>
47 #include <linux/slab.h>
48 #include <linux/interrupt.h>
49 #include <linux/pci.h>
50 #include <linux/delay.h>
51 #include <linux/hdlc.h>
52 #include <linux/init.h>
53 #include <linux/in.h>
54 #include <linux/if_arp.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
58 #include <linux/inet.h>
59 #include <linux/bitops.h>
60 #include <asm/processor.h> /* Processor type for cache alignment. */
61 #include <asm/io.h>
62 #include <asm/dma.h>
63 #include <asm/uaccess.h>
64 //#include <asm/spinlock.h>
65 
66 #define DRIVER_MAJOR_VERSION 1
67 #define DRIVER_MINOR_VERSION 34
68 #define DRIVER_SUB_VERSION 0
69 
70 #define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
71 
72 #include "lmc.h"
73 #include "lmc_var.h"
74 #include "lmc_ioctl.h"
75 #include "lmc_debug.h"
76 #include "lmc_proto.h"
77 
78 static int LMC_PKT_BUF_SZ = 1542;
79 
80 static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
85  { 0 }
86 };
87 
88 MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
89 MODULE_LICENSE("GPL v2");
90 
91 
92 static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
93  struct net_device *dev);
94 static int lmc_rx (struct net_device *dev);
95 static int lmc_open(struct net_device *dev);
96 static int lmc_close(struct net_device *dev);
97 static struct net_device_stats *lmc_get_stats(struct net_device *dev);
98 static irqreturn_t lmc_interrupt(int irq, void *dev_instance);
99 static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
100 static void lmc_softreset(lmc_softc_t * const);
101 static void lmc_running_reset(struct net_device *dev);
102 static int lmc_ifdown(struct net_device * const);
103 static void lmc_watchdog(unsigned long data);
104 static void lmc_reset(lmc_softc_t * const sc);
105 static void lmc_dec_reset(lmc_softc_t * const sc);
106 static void lmc_driver_timeout(struct net_device *dev);
107 
108 /*
109  * linux reserves 16 device specific IOCTLs. We call them
110  * LMCIOC* to control various bits of our world.
111  */
112 int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
113 {
114  lmc_softc_t *sc = dev_to_sc(dev);
115  lmc_ctl_t ctl;
116  int ret = -EOPNOTSUPP;
117  u16 regVal;
118  unsigned long flags;
119 
120  lmc_trace(dev, "lmc_ioctl in");
121 
122  /*
123  * Most functions mess with the structure
124  * Disable interrupts while we do the polling
125  */
126 
127  switch (cmd) {
128  /*
129  * Return current driver state. Since we keep this up
130  * To date internally, just copy this out to the user.
131  */
132  case LMCIOCGINFO: /*fold01*/
133  if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof(lmc_ctl_t)))
134  ret = -EFAULT;
135  else
136  ret = 0;
137  break;
138 
139  case LMCIOCSINFO: /*fold01*/
140  if (!capable(CAP_NET_ADMIN)) {
141  ret = -EPERM;
142  break;
143  }
144 
145  if(dev->flags & IFF_UP){
146  ret = -EBUSY;
147  break;
148  }
149 
150  if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
151  ret = -EFAULT;
152  break;
153  }
154 
155  spin_lock_irqsave(&sc->lmc_lock, flags);
156  sc->lmc_media->set_status (sc, &ctl);
157 
158  if(ctl.crc_length != sc->ictl.crc_length) {
159  sc->lmc_media->set_crc_length(sc, ctl.crc_length);
162  else
164  }
165  spin_unlock_irqrestore(&sc->lmc_lock, flags);
166 
167  ret = 0;
168  break;
169 
170  case LMCIOCIFTYPE: /*fold01*/
171  {
172  u16 old_type = sc->if_type;
173  u16 new_type;
174 
175  if (!capable(CAP_NET_ADMIN)) {
176  ret = -EPERM;
177  break;
178  }
179 
180  if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) {
181  ret = -EFAULT;
182  break;
183  }
184 
185 
186  if (new_type == old_type)
187  {
188  ret = 0 ;
189  break; /* no change */
190  }
191 
192  spin_lock_irqsave(&sc->lmc_lock, flags);
193  lmc_proto_close(sc);
194 
195  sc->if_type = new_type;
196  lmc_proto_attach(sc);
197  ret = lmc_proto_open(sc);
198  spin_unlock_irqrestore(&sc->lmc_lock, flags);
199  break;
200  }
201 
202  case LMCIOCGETXINFO: /*fold01*/
203  spin_lock_irqsave(&sc->lmc_lock, flags);
204  sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
205 
207  sc->lmc_xinfo.PciSlotNumber = 0;
212  lmc_mii_readreg (sc, 0, 3) & 0xf;
213  sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
215  sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
216  spin_unlock_irqrestore(&sc->lmc_lock, flags);
217 
218  sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
219 
220  if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
221  sizeof(struct lmc_xinfo)))
222  ret = -EFAULT;
223  else
224  ret = 0;
225 
226  break;
227 
228  case LMCIOCGETLMCSTATS:
229  spin_lock_irqsave(&sc->lmc_lock, flags);
230  if (sc->lmc_cardtype == LMC_CARDTYPE_T1) {
232  sc->extra_stats.framingBitErrorCount +=
233  lmc_mii_readreg(sc, 0, 18) & 0xff;
235  sc->extra_stats.framingBitErrorCount +=
236  (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
238  sc->extra_stats.lineCodeViolationCount +=
239  lmc_mii_readreg(sc, 0, 18) & 0xff;
241  sc->extra_stats.lineCodeViolationCount +=
242  (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8;
243  lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR);
244  regVal = lmc_mii_readreg(sc, 0, 18) & 0xff;
245 
246  sc->extra_stats.lossOfFrameCount +=
247  (regVal & T1FRAMER_LOF_MASK) >> 4;
248  sc->extra_stats.changeOfFrameAlignmentCount +=
249  (regVal & T1FRAMER_COFA_MASK) >> 2;
250  sc->extra_stats.severelyErroredFrameCount +=
251  regVal & T1FRAMER_SEF_MASK;
252  }
253  spin_unlock_irqrestore(&sc->lmc_lock, flags);
254  if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats,
255  sizeof(sc->lmc_device->stats)) ||
256  copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats),
257  &sc->extra_stats, sizeof(sc->extra_stats)))
258  ret = -EFAULT;
259  else
260  ret = 0;
261  break;
262 
263  case LMCIOCCLEARLMCSTATS:
264  if (!capable(CAP_NET_ADMIN)) {
265  ret = -EPERM;
266  break;
267  }
268 
269  spin_lock_irqsave(&sc->lmc_lock, flags);
270  memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats));
271  memset(&sc->extra_stats, 0, sizeof(sc->extra_stats));
272  sc->extra_stats.check = STATCHECK;
273  sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
274  sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
275  sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
276  spin_unlock_irqrestore(&sc->lmc_lock, flags);
277  ret = 0;
278  break;
279 
280  case LMCIOCSETCIRCUIT: /*fold01*/
281  if (!capable(CAP_NET_ADMIN)){
282  ret = -EPERM;
283  break;
284  }
285 
286  if(dev->flags & IFF_UP){
287  ret = -EBUSY;
288  break;
289  }
290 
291  if (copy_from_user(&ctl, ifr->ifr_data, sizeof(lmc_ctl_t))) {
292  ret = -EFAULT;
293  break;
294  }
295  spin_lock_irqsave(&sc->lmc_lock, flags);
297  sc->ictl.circuit_type = ctl.circuit_type;
298  spin_unlock_irqrestore(&sc->lmc_lock, flags);
299  ret = 0;
300 
301  break;
302 
303  case LMCIOCRESET: /*fold01*/
304  if (!capable(CAP_NET_ADMIN)){
305  ret = -EPERM;
306  break;
307  }
308 
309  spin_lock_irqsave(&sc->lmc_lock, flags);
310  /* Reset driver and bring back to current state */
311  printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
312  lmc_running_reset (dev);
313  printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
314 
315  LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
316  spin_unlock_irqrestore(&sc->lmc_lock, flags);
317 
318  ret = 0;
319  break;
320 
321 #ifdef DEBUG
322  case LMCIOCDUMPEVENTLOG:
323  if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof(u32))) {
324  ret = -EFAULT;
325  break;
326  }
327  if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf,
328  sizeof(lmcEventLogBuf)))
329  ret = -EFAULT;
330  else
331  ret = 0;
332 
333  break;
334 #endif /* end ifdef _DBG_EVENTLOG */
335  case LMCIOCT1CONTROL: /*fold01*/
336  if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
337  ret = -EOPNOTSUPP;
338  break;
339  }
340  break;
341  case LMCIOCXILINX: /*fold01*/
342  {
343  struct lmc_xilinx_control xc; /*fold02*/
344 
345  if (!capable(CAP_NET_ADMIN)){
346  ret = -EPERM;
347  break;
348  }
349 
350  /*
351  * Stop the xwitter whlie we restart the hardware
352  */
353  netif_stop_queue(dev);
354 
355  if (copy_from_user(&xc, ifr->ifr_data, sizeof(struct lmc_xilinx_control))) {
356  ret = -EFAULT;
357  break;
358  }
359  switch(xc.command){
360  case lmc_xilinx_reset: /*fold02*/
361  {
362  u16 mii;
363  spin_lock_irqsave(&sc->lmc_lock, flags);
364  mii = lmc_mii_readreg (sc, 0, 16);
365 
366  /*
367  * Make all of them 0 and make input
368  */
369  lmc_gpio_mkinput(sc, 0xff);
370 
371  /*
372  * make the reset output
373  */
375 
376  /*
377  * RESET low to force configuration. This also forces
378  * the transmitter clock to be internal, but we expect to reset
379  * that later anyway.
380  */
381 
382  sc->lmc_gpio &= ~LMC_GEP_RESET;
383  LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
384 
385 
386  /*
387  * hold for more than 10 microseconds
388  */
389  udelay(50);
390 
391  sc->lmc_gpio |= LMC_GEP_RESET;
392  LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
393 
394 
395  /*
396  * stop driving Xilinx-related signals
397  */
398  lmc_gpio_mkinput(sc, 0xff);
399 
400  /* Reset the frammer hardware */
401  sc->lmc_media->set_link_status (sc, 1);
402  sc->lmc_media->set_status (sc, NULL);
403 // lmc_softreset(sc);
404 
405  {
406  int i;
407  for(i = 0; i < 5; i++){
409  mdelay(100);
412  mdelay(100);
415  mdelay(100);
418  mdelay(100);
420  }
421  }
422  spin_unlock_irqrestore(&sc->lmc_lock, flags);
423 
424 
425 
426  ret = 0x0;
427 
428  }
429 
430  break;
431  case lmc_xilinx_load_prom: /*fold02*/
432  {
433  u16 mii;
434  int timeout = 500000;
435  spin_lock_irqsave(&sc->lmc_lock, flags);
436  mii = lmc_mii_readreg (sc, 0, 16);
437 
438  /*
439  * Make all of them 0 and make input
440  */
441  lmc_gpio_mkinput(sc, 0xff);
442 
443  /*
444  * make the reset output
445  */
447 
448  /*
449  * RESET low to force configuration. This also forces
450  * the transmitter clock to be internal, but we expect to reset
451  * that later anyway.
452  */
453 
454  sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
455  LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
456 
457 
458  /*
459  * hold for more than 10 microseconds
460  */
461  udelay(50);
462 
464  LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
465 
466  /*
467  * busy wait for the chip to reset
468  */
469  while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
470  (timeout-- > 0))
471  cpu_relax();
472 
473 
474  /*
475  * stop driving Xilinx-related signals
476  */
477  lmc_gpio_mkinput(sc, 0xff);
478  spin_unlock_irqrestore(&sc->lmc_lock, flags);
479 
480  ret = 0x0;
481 
482 
483  break;
484 
485  }
486 
487  case lmc_xilinx_load: /*fold02*/
488  {
489  char *data;
490  int pos;
491  int timeout = 500000;
492 
493  if (!xc.data) {
494  ret = -EINVAL;
495  break;
496  }
497 
498  data = kmalloc(xc.len, GFP_KERNEL);
499  if (!data) {
500  ret = -ENOMEM;
501  break;
502  }
503 
504  if(copy_from_user(data, xc.data, xc.len))
505  {
506  kfree(data);
507  ret = -ENOMEM;
508  break;
509  }
510 
511  printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
512 
513  spin_lock_irqsave(&sc->lmc_lock, flags);
514  lmc_gpio_mkinput(sc, 0xff);
515 
516  /*
517  * Clear the Xilinx and start prgramming from the DEC
518  */
519 
520  /*
521  * Set ouput as:
522  * Reset: 0 (active)
523  * DP: 0 (active)
524  * Mode: 1
525  *
526  */
527  sc->lmc_gpio = 0x00;
528  sc->lmc_gpio &= ~LMC_GEP_DP;
529  sc->lmc_gpio &= ~LMC_GEP_RESET;
530  sc->lmc_gpio |= LMC_GEP_MODE;
531  LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
532 
534 
535  /*
536  * Wait at least 10 us 20 to be safe
537  */
538  udelay(50);
539 
540  /*
541  * Clear reset and activate programming lines
542  * Reset: Input
543  * DP: Input
544  * Clock: Output
545  * Data: Output
546  * Mode: Output
547  */
549 
550  /*
551  * Set LOAD, DATA, Clock to 1
552  */
553  sc->lmc_gpio = 0x00;
554  sc->lmc_gpio |= LMC_GEP_MODE;
555  sc->lmc_gpio |= LMC_GEP_DATA;
556  sc->lmc_gpio |= LMC_GEP_CLK;
557  LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
558 
560 
561  /*
562  * busy wait for the chip to reset
563  */
564  while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
565  (timeout-- > 0))
566  cpu_relax();
567 
568  printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
569 
570  for(pos = 0; pos < xc.len; pos++){
571  switch(data[pos]){
572  case 0:
573  sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
574  break;
575  case 1:
576  sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
577  break;
578  default:
579  printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
580  sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
581  }
582  sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
583  sc->lmc_gpio |= LMC_GEP_MODE;
584  LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
585  udelay(1);
586 
587  sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
588  sc->lmc_gpio |= LMC_GEP_MODE;
589  LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
590  udelay(1);
591  }
592  if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
593  printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
594  }
595  else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
596  printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
597  }
598  else {
599  printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
600  }
601 
602  lmc_gpio_mkinput(sc, 0xff);
603 
605  lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
606 
608  lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
609  spin_unlock_irqrestore(&sc->lmc_lock, flags);
610 
611  kfree(data);
612 
613  ret = 0;
614 
615  break;
616  }
617  default: /*fold02*/
618  ret = -EBADE;
619  break;
620  }
621 
622  netif_wake_queue(dev);
623  sc->lmc_txfull = 0;
624 
625  }
626  break;
627  default: /*fold01*/
628  /* If we don't know what to do, give the protocol a shot. */
629  ret = lmc_proto_ioctl (sc, ifr, cmd);
630  break;
631  }
632 
633  lmc_trace(dev, "lmc_ioctl out");
634 
635  return ret;
636 }
637 
638 
639 /* the watchdog process that cruises around */
640 static void lmc_watchdog (unsigned long data) /*fold00*/
641 {
642  struct net_device *dev = (struct net_device *)data;
643  lmc_softc_t *sc = dev_to_sc(dev);
644  int link_status;
645  u32 ticks;
646  unsigned long flags;
647 
648  lmc_trace(dev, "lmc_watchdog in");
649 
650  spin_lock_irqsave(&sc->lmc_lock, flags);
651 
652  if(sc->check != 0xBEAFCAFE){
653  printk("LMC: Corrupt net_device struct, breaking out\n");
654  spin_unlock_irqrestore(&sc->lmc_lock, flags);
655  return;
656  }
657 
658 
659  /* Make sure the tx jabber and rx watchdog are off,
660  * and the transmit and receive processes are running.
661  */
662 
663  LMC_CSR_WRITE (sc, csr_15, 0x00000011);
665  LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
666 
667  if (sc->lmc_ok == 0)
668  goto kick_timer;
669 
670  LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
671 
672  /* --- begin time out check -----------------------------------
673  * check for a transmit interrupt timeout
674  * Has the packet xmt vs xmt serviced threshold been exceeded */
675  if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
676  sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
677  sc->tx_TimeoutInd == 0)
678  {
679 
680  /* wait for the watchdog to come around again */
681  sc->tx_TimeoutInd = 1;
682  }
683  else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
684  sc->lmc_device->stats.tx_packets > sc->lasttx_packets &&
685  sc->tx_TimeoutInd)
686  {
687 
688  LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
689 
690  sc->tx_TimeoutDisplay = 1;
691  sc->extra_stats.tx_TimeoutCnt++;
692 
693  /* DEC chip is stuck, hit it with a RESET!!!! */
694  lmc_running_reset (dev);
695 
696 
697  /* look at receive & transmit process state to make sure they are running */
698  LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
699 
700  /* look at: DSR - 02 for Reg 16
701  * CTS - 08
702  * DCD - 10
703  * RI - 20
704  * for Reg 17
705  */
707 
708  /* reset the transmit timeout detection flag */
709  sc->tx_TimeoutInd = 0;
710  sc->lastlmc_taint_tx = sc->lmc_taint_tx;
711  sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
712  } else {
713  sc->tx_TimeoutInd = 0;
714  sc->lastlmc_taint_tx = sc->lmc_taint_tx;
715  sc->lasttx_packets = sc->lmc_device->stats.tx_packets;
716  }
717 
718  /* --- end time out check ----------------------------------- */
719 
720 
721  link_status = sc->lmc_media->get_link_status (sc);
722 
723  /*
724  * hardware level link lost, but the interface is marked as up.
725  * Mark it as down.
726  */
727  if ((link_status == 0) && (sc->last_link_status != 0)) {
728  printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
729  sc->last_link_status = 0;
730  /* lmc_reset (sc); Why reset??? The link can go down ok */
731 
732  /* Inform the world that link has been lost */
733  netif_carrier_off(dev);
734  }
735 
736  /*
737  * hardware link is up, but the interface is marked as down.
738  * Bring it back up again.
739  */
740  if (link_status != 0 && sc->last_link_status == 0) {
741  printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
742  sc->last_link_status = 1;
743  /* lmc_reset (sc); Again why reset??? */
744 
745  netif_carrier_on(dev);
746  }
747 
748  /* Call media specific watchdog functions */
749  sc->lmc_media->watchdog(sc);
750 
751  /*
752  * Poke the transmitter to make sure it
753  * never stops, even if we run out of mem
754  */
755  LMC_CSR_WRITE(sc, csr_rxpoll, 0);
756 
757  /*
758  * Check for code that failed
759  * and try and fix it as appropriate
760  */
761  if(sc->failed_ring == 1){
762  /*
763  * Failed to setup the recv/xmit rin
764  * Try again
765  */
766  sc->failed_ring = 0;
767  lmc_softreset(sc);
768  }
769  if(sc->failed_recv_alloc == 1){
770  /*
771  * We failed to alloc mem in the
772  * interrupt handler, go through the rings
773  * and rebuild them
774  */
775  sc->failed_recv_alloc = 0;
776  lmc_softreset(sc);
777  }
778 
779 
780  /*
781  * remember the timer value
782  */
783 kick_timer:
784 
785  ticks = LMC_CSR_READ (sc, csr_gp_timer);
786  LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
787  sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
788 
789  /*
790  * restart this timer.
791  */
792  sc->timer.expires = jiffies + (HZ);
793  add_timer (&sc->timer);
794 
795  spin_unlock_irqrestore(&sc->lmc_lock, flags);
796 
797  lmc_trace(dev, "lmc_watchdog out");
798 
799 }
800 
801 static int lmc_attach(struct net_device *dev, unsigned short encoding,
802  unsigned short parity)
803 {
804  if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
805  return 0;
806  return -EINVAL;
807 }
808 
809 static const struct net_device_ops lmc_ops = {
810  .ndo_open = lmc_open,
811  .ndo_stop = lmc_close,
812  .ndo_change_mtu = hdlc_change_mtu,
813  .ndo_start_xmit = hdlc_start_xmit,
814  .ndo_do_ioctl = lmc_ioctl,
815  .ndo_tx_timeout = lmc_driver_timeout,
816  .ndo_get_stats = lmc_get_stats,
817 };
818 
819 static int __devinit lmc_init_one(struct pci_dev *pdev,
820  const struct pci_device_id *ent)
821 {
822  lmc_softc_t *sc;
823  struct net_device *dev;
824  u16 subdevice;
825  u16 AdapModelNum;
826  int err;
827  static int cards_found;
828 
829  /* lmc_trace(dev, "lmc_init_one in"); */
830 
831  err = pci_enable_device(pdev);
832  if (err) {
833  printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
834  return err;
835  }
836 
837  err = pci_request_regions(pdev, "lmc");
838  if (err) {
839  printk(KERN_ERR "lmc: pci_request_region failed\n");
840  goto err_req_io;
841  }
842 
843  /*
844  * Allocate our own device structure
845  */
846  sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL);
847  if (!sc) {
848  err = -ENOMEM;
849  goto err_kzalloc;
850  }
851 
852  dev = alloc_hdlcdev(sc);
853  if (!dev) {
854  printk(KERN_ERR "lmc:alloc_netdev for device failed\n");
855  goto err_hdlcdev;
856  }
857 
858 
859  dev->type = ARPHRD_HDLC;
860  dev_to_hdlc(dev)->xmit = lmc_start_xmit;
861  dev_to_hdlc(dev)->attach = lmc_attach;
862  dev->netdev_ops = &lmc_ops;
863  dev->watchdog_timeo = HZ; /* 1 second */
864  dev->tx_queue_len = 100;
865  sc->lmc_device = dev;
866  sc->name = dev->name;
867  sc->if_type = LMC_PPP;
868  sc->check = 0xBEAFCAFE;
869  dev->base_addr = pci_resource_start(pdev, 0);
870  dev->irq = pdev->irq;
871  pci_set_drvdata(pdev, dev);
872  SET_NETDEV_DEV(dev, &pdev->dev);
873 
874  /*
875  * This will get the protocol layer ready and do any 1 time init's
876  * Must have a valid sc and dev structure
877  */
878  lmc_proto_attach(sc);
879 
880  /* Init the spin lock so can call it latter */
881 
882  spin_lock_init(&sc->lmc_lock);
883  pci_set_master(pdev);
884 
885  printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name,
886  dev->base_addr, dev->irq);
887 
888  err = register_hdlc_device(dev);
889  if (err) {
890  printk(KERN_ERR "%s: register_netdev failed.\n", dev->name);
891  free_netdev(dev);
892  goto err_hdlcdev;
893  }
894 
897 
898  /*
899  *
900  * Check either the subvendor or the subdevice, some systems reverse
901  * the setting in the bois, seems to be version and arch dependent?
902  * Fix the error, exchange the two values
903  */
904  if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
905  subdevice = pdev->subsystem_vendor;
906 
907  switch (subdevice) {
909  printk(KERN_INFO "%s: LMC HSSI\n", dev->name);
911  sc->lmc_media = &lmc_hssi_media;
912  break;
914  printk(KERN_INFO "%s: LMC DS3\n", dev->name);
916  sc->lmc_media = &lmc_ds3_media;
917  break;
919  printk(KERN_INFO "%s: LMC SSI\n", dev->name);
921  sc->lmc_media = &lmc_ssi_media;
922  break;
924  printk(KERN_INFO "%s: LMC T1\n", dev->name);
926  sc->lmc_media = &lmc_t1_media;
927  break;
928  default:
929  printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
930  break;
931  }
932 
933  lmc_initcsrs (sc, dev->base_addr, 8);
934 
935  lmc_gpio_mkinput (sc, 0xff);
936  sc->lmc_gpio = 0; /* drive no signals yet */
937 
938  sc->lmc_media->defaults (sc);
939 
941 
942  /* verify that the PCI Sub System ID matches the Adapter Model number
943  * from the MII register
944  */
945  AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
946 
947  if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */
948  subdevice != PCI_DEVICE_ID_LMC_T1) &&
949  (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */
950  subdevice != PCI_DEVICE_ID_LMC_SSI) &&
951  (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */
952  subdevice != PCI_DEVICE_ID_LMC_DS3) &&
953  (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */
954  subdevice != PCI_DEVICE_ID_LMC_HSSI))
955  printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI"
956  " Subsystem ID = 0x%04x\n",
957  dev->name, AdapModelNum, subdevice);
958 
959  /*
960  * reset clock
961  */
962  LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
963 
964  sc->board_idx = cards_found++;
965  sc->extra_stats.check = STATCHECK;
966  sc->extra_stats.version_size = (DRIVER_VERSION << 16) +
967  sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats);
968  sc->extra_stats.lmc_cardtype = sc->lmc_cardtype;
969 
970  sc->lmc_ok = 0;
971  sc->last_link_status = 0;
972 
973  lmc_trace(dev, "lmc_init_one out");
974  return 0;
975 
976 err_hdlcdev:
977  pci_set_drvdata(pdev, NULL);
978  kfree(sc);
979 err_kzalloc:
980  pci_release_regions(pdev);
981 err_req_io:
982  pci_disable_device(pdev);
983  return err;
984 }
985 
986 /*
987  * Called from pci when removing module.
988  */
989 static void __devexit lmc_remove_one(struct pci_dev *pdev)
990 {
991  struct net_device *dev = pci_get_drvdata(pdev);
992 
993  if (dev) {
994  printk(KERN_DEBUG "%s: removing...\n", dev->name);
996  free_netdev(dev);
997  pci_release_regions(pdev);
998  pci_disable_device(pdev);
999  pci_set_drvdata(pdev, NULL);
1000  }
1001 }
1002 
1003 /* After this is called, packets can be sent.
1004  * Does not initialize the addresses
1005  */
1006 static int lmc_open(struct net_device *dev)
1007 {
1008  lmc_softc_t *sc = dev_to_sc(dev);
1009  int err;
1010 
1011  lmc_trace(dev, "lmc_open in");
1012 
1013  lmc_led_on(sc, LMC_DS3_LED0);
1014 
1015  lmc_dec_reset(sc);
1016  lmc_reset(sc);
1017 
1018  LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0);
1020  lmc_mii_readreg(sc, 0, 17));
1021 
1022  if (sc->lmc_ok){
1023  lmc_trace(dev, "lmc_open lmc_ok out");
1024  return 0;
1025  }
1026 
1027  lmc_softreset (sc);
1028 
1029  /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1030  if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
1031  printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
1032  lmc_trace(dev, "lmc_open irq failed out");
1033  return -EAGAIN;
1034  }
1035  sc->got_irq = 1;
1036 
1037  /* Assert Terminal Active */
1040 
1041  /*
1042  * reset to last state.
1043  */
1044  sc->lmc_media->set_status (sc, NULL);
1045 
1046  /* setup default bits to be used in tulip_desc_t transmit descriptor
1047  * -baz */
1048  sc->TxDescriptControlInit = (
1054  );
1055 
1056  if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
1057  /* disable 32 bit CRC generated by ASIC */
1059  }
1060  sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
1061  /* Acknoledge the Terminal Active and light LEDs */
1062 
1063  /* dev->flags |= IFF_UP; */
1064 
1065  if ((err = lmc_proto_open(sc)) != 0)
1066  return err;
1067 
1068  netif_start_queue(dev);
1069  sc->extra_stats.tx_tbusy0++;
1070 
1071  /*
1072  * select what interrupts we want to get
1073  */
1074  sc->lmc_intrmask = 0;
1075  /* Should be using the default interrupt mask defined in the .h file. */
1085  );
1086  LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1087 
1090  LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1091 
1092  sc->lmc_ok = 1; /* Run watchdog */
1093 
1094  /*
1095  * Set the if up now - pfb
1096  */
1097 
1098  sc->last_link_status = 1;
1099 
1100  /*
1101  * Setup a timer for the watchdog on probe, and start it running.
1102  * Since lmc_ok == 0, it will be a NOP for now.
1103  */
1104  init_timer (&sc->timer);
1105  sc->timer.expires = jiffies + HZ;
1106  sc->timer.data = (unsigned long) dev;
1107  sc->timer.function = lmc_watchdog;
1108  add_timer (&sc->timer);
1109 
1110  lmc_trace(dev, "lmc_open out");
1111 
1112  return 0;
1113 }
1114 
1115 /* Total reset to compensate for the AdTran DSU doing bad things
1116  * under heavy load
1117  */
1118 
1119 static void lmc_running_reset (struct net_device *dev) /*fold00*/
1120 {
1121  lmc_softc_t *sc = dev_to_sc(dev);
1122 
1123  lmc_trace(dev, "lmc_running_reset in");
1124 
1125  /* stop interrupts */
1126  /* Clear the interrupt mask */
1127  LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1128 
1129  lmc_dec_reset (sc);
1130  lmc_reset (sc);
1131  lmc_softreset (sc);
1132  /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1133  sc->lmc_media->set_link_status (sc, 1);
1134  sc->lmc_media->set_status (sc, NULL);
1135 
1136  netif_wake_queue(dev);
1137 
1138  sc->lmc_txfull = 0;
1139  sc->extra_stats.tx_tbusy0++;
1140 
1142  LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1143 
1145  LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1146 
1147  lmc_trace(dev, "lmc_runnin_reset_out");
1148 }
1149 
1150 
1151 /* This is what is called when you ifconfig down a device.
1152  * This disables the timer for the watchdog and keepalives,
1153  * and disables the irq for dev.
1154  */
1155 static int lmc_close(struct net_device *dev)
1156 {
1157  /* not calling release_region() as we should */
1158  lmc_softc_t *sc = dev_to_sc(dev);
1159 
1160  lmc_trace(dev, "lmc_close in");
1161 
1162  sc->lmc_ok = 0;
1163  sc->lmc_media->set_link_status (sc, 0);
1164  del_timer (&sc->timer);
1165  lmc_proto_close(sc);
1166  lmc_ifdown (dev);
1167 
1168  lmc_trace(dev, "lmc_close out");
1169 
1170  return 0;
1171 }
1172 
1173 /* Ends the transfer of packets */
1174 /* When the interface goes down, this is called */
1175 static int lmc_ifdown (struct net_device *dev) /*fold00*/
1176 {
1177  lmc_softc_t *sc = dev_to_sc(dev);
1178  u32 csr6;
1179  int i;
1180 
1181  lmc_trace(dev, "lmc_ifdown in");
1182 
1183  /* Don't let anything else go on right now */
1184  // dev->start = 0;
1185  netif_stop_queue(dev);
1186  sc->extra_stats.tx_tbusy1++;
1187 
1188  /* stop interrupts */
1189  /* Clear the interrupt mask */
1190  LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1191 
1192  /* Stop Tx and Rx on the chip */
1193  csr6 = LMC_CSR_READ (sc, csr_command);
1194  csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */
1195  csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
1196  LMC_CSR_WRITE (sc, csr_command, csr6);
1197 
1198  sc->lmc_device->stats.rx_missed_errors +=
1199  LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1200 
1201  /* release the interrupt */
1202  if(sc->got_irq == 1){
1203  free_irq (dev->irq, dev);
1204  sc->got_irq = 0;
1205  }
1206 
1207  /* free skbuffs in the Rx queue */
1208  for (i = 0; i < LMC_RXDESCS; i++)
1209  {
1210  struct sk_buff *skb = sc->lmc_rxq[i];
1211  sc->lmc_rxq[i] = NULL;
1212  sc->lmc_rxring[i].status = 0;
1213  sc->lmc_rxring[i].length = 0;
1214  sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
1215  if (skb != NULL)
1216  dev_kfree_skb(skb);
1217  sc->lmc_rxq[i] = NULL;
1218  }
1219 
1220  for (i = 0; i < LMC_TXDESCS; i++)
1221  {
1222  if (sc->lmc_txq[i] != NULL)
1223  dev_kfree_skb(sc->lmc_txq[i]);
1224  sc->lmc_txq[i] = NULL;
1225  }
1226 
1228 
1229  netif_wake_queue(dev);
1230  sc->extra_stats.tx_tbusy0++;
1231 
1232  lmc_trace(dev, "lmc_ifdown out");
1233 
1234  return 0;
1235 }
1236 
1237 /* Interrupt handling routine. This will take an incoming packet, or clean
1238  * up after a trasmit.
1239  */
1240 static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
1241 {
1242  struct net_device *dev = (struct net_device *) dev_instance;
1243  lmc_softc_t *sc = dev_to_sc(dev);
1244  u32 csr;
1245  int i;
1246  s32 stat;
1247  unsigned int badtx;
1248  u32 firstcsr;
1249  int max_work = LMC_RXDESCS;
1250  int handled = 0;
1251 
1252  lmc_trace(dev, "lmc_interrupt in");
1253 
1254  spin_lock(&sc->lmc_lock);
1255 
1256  /*
1257  * Read the csr to find what interrupts we have (if any)
1258  */
1259  csr = LMC_CSR_READ (sc, csr_status);
1260 
1261  /*
1262  * Make sure this is our interrupt
1263  */
1264  if ( ! (csr & sc->lmc_intrmask)) {
1265  goto lmc_int_fail_out;
1266  }
1267 
1268  firstcsr = csr;
1269 
1270  /* always go through this loop at least once */
1271  while (csr & sc->lmc_intrmask) {
1272  handled = 1;
1273 
1274  /*
1275  * Clear interrupt bits, we handle all case below
1276  */
1277  LMC_CSR_WRITE (sc, csr_status, csr);
1278 
1279  /*
1280  * One of
1281  * - Transmit process timed out CSR5<1>
1282  * - Transmit jabber timeout CSR5<3>
1283  * - Transmit underflow CSR5<5>
1284  * - Transmit Receiver buffer unavailable CSR5<7>
1285  * - Receive process stopped CSR5<8>
1286  * - Receive watchdog timeout CSR5<9>
1287  * - Early transmit interrupt CSR5<10>
1288  *
1289  * Is this really right? Should we do a running reset for jabber?
1290  * (being a WAN card and all)
1291  */
1292  if (csr & TULIP_STS_ABNRMLINTR){
1293  lmc_running_reset (dev);
1294  break;
1295  }
1296 
1297  if (csr & TULIP_STS_RXINTR){
1298  lmc_trace(dev, "rx interrupt");
1299  lmc_rx (dev);
1300 
1301  }
1303 
1304  int n_compl = 0 ;
1305  /* reset the transmit timeout detection flag -baz */
1306  sc->extra_stats.tx_NoCompleteCnt = 0;
1307 
1308  badtx = sc->lmc_taint_tx;
1309  i = badtx % LMC_TXDESCS;
1310 
1311  while ((badtx < sc->lmc_next_tx)) {
1312  stat = sc->lmc_txring[i].status;
1313 
1315  sc->lmc_txring[i].length);
1316  /*
1317  * If bit 31 is 1 the tulip owns it break out of the loop
1318  */
1319  if (stat & 0x80000000)
1320  break;
1321 
1322  n_compl++ ; /* i.e., have an empty slot in ring */
1323  /*
1324  * If we have no skbuff or have cleared it
1325  * Already continue to the next buffer
1326  */
1327  if (sc->lmc_txq[i] == NULL)
1328  continue;
1329 
1330  /*
1331  * Check the total error summary to look for any errors
1332  */
1333  if (stat & 0x8000) {
1334  sc->lmc_device->stats.tx_errors++;
1335  if (stat & 0x4104)
1336  sc->lmc_device->stats.tx_aborted_errors++;
1337  if (stat & 0x0C00)
1338  sc->lmc_device->stats.tx_carrier_errors++;
1339  if (stat & 0x0200)
1340  sc->lmc_device->stats.tx_window_errors++;
1341  if (stat & 0x0002)
1342  sc->lmc_device->stats.tx_fifo_errors++;
1343  } else {
1344  sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
1345 
1346  sc->lmc_device->stats.tx_packets++;
1347  }
1348 
1349  // dev_kfree_skb(sc->lmc_txq[i]);
1350  dev_kfree_skb_irq(sc->lmc_txq[i]);
1351  sc->lmc_txq[i] = NULL;
1352 
1353  badtx++;
1354  i = badtx % LMC_TXDESCS;
1355  }
1356 
1357  if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
1358  {
1359  printk ("%s: out of sync pointer\n", dev->name);
1360  badtx += LMC_TXDESCS;
1361  }
1362  LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
1363  sc->lmc_txfull = 0;
1364  netif_wake_queue(dev);
1365  sc->extra_stats.tx_tbusy0++;
1366 
1367 
1368 #ifdef DEBUG
1369  sc->extra_stats.dirtyTx = badtx;
1370  sc->extra_stats.lmc_next_tx = sc->lmc_next_tx;
1371  sc->extra_stats.lmc_txfull = sc->lmc_txfull;
1372 #endif
1373  sc->lmc_taint_tx = badtx;
1374 
1375  /*
1376  * Why was there a break here???
1377  */
1378  } /* end handle transmit interrupt */
1379 
1380  if (csr & TULIP_STS_SYSERROR) {
1381  u32 error;
1382  printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
1383  error = csr>>23 & 0x7;
1384  switch(error){
1385  case 0x000:
1386  printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
1387  break;
1388  case 0x001:
1389  printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
1390  break;
1391  case 0x010:
1392  printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
1393  break;
1394  default:
1395  printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
1396  }
1397  lmc_dec_reset (sc);
1398  lmc_reset (sc);
1399  LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1401  lmc_mii_readreg (sc, 0, 16),
1402  lmc_mii_readreg (sc, 0, 17));
1403 
1404  }
1405 
1406 
1407  if(max_work-- <= 0)
1408  break;
1409 
1410  /*
1411  * Get current csr status to make sure
1412  * we've cleared all interrupts
1413  */
1414  csr = LMC_CSR_READ (sc, csr_status);
1415  } /* end interrupt loop */
1416  LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
1417 
1418 lmc_int_fail_out:
1419 
1420  spin_unlock(&sc->lmc_lock);
1421 
1422  lmc_trace(dev, "lmc_interrupt out");
1423  return IRQ_RETVAL(handled);
1424 }
1425 
1426 static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
1427  struct net_device *dev)
1428 {
1429  lmc_softc_t *sc = dev_to_sc(dev);
1430  u32 flag;
1431  int entry;
1432  unsigned long flags;
1433 
1434  lmc_trace(dev, "lmc_start_xmit in");
1435 
1436  spin_lock_irqsave(&sc->lmc_lock, flags);
1437 
1438  /* normal path, tbusy known to be zero */
1439 
1440  entry = sc->lmc_next_tx % LMC_TXDESCS;
1441 
1442  sc->lmc_txq[entry] = skb;
1443  sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
1444 
1445  LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
1446 
1447 #ifndef GCOM
1448  /* If the queue is less than half full, don't interrupt */
1449  if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
1450  {
1451  /* Do not interrupt on completion of this packet */
1452  flag = 0x60000000;
1453  netif_wake_queue(dev);
1454  }
1455  else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
1456  {
1457  /* This generates an interrupt on completion of this packet */
1458  flag = 0xe0000000;
1459  netif_wake_queue(dev);
1460  }
1461  else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
1462  {
1463  /* Do not interrupt on completion of this packet */
1464  flag = 0x60000000;
1465  netif_wake_queue(dev);
1466  }
1467  else
1468  {
1469  /* This generates an interrupt on completion of this packet */
1470  flag = 0xe0000000;
1471  sc->lmc_txfull = 1;
1472  netif_stop_queue(dev);
1473  }
1474 #else
1476 
1477  if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
1478  { /* ring full, go busy */
1479  sc->lmc_txfull = 1;
1480  netif_stop_queue(dev);
1481  sc->extra_stats.tx_tbusy1++;
1482  LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
1483  }
1484 #endif
1485 
1486 
1487  if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */
1488  flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */
1489 
1490  /* don't pad small packets either */
1491  flag = sc->lmc_txring[entry].length = (skb->len) | flag |
1493 
1494  /* set the transmit timeout flag to be checked in
1495  * the watchdog timer handler. -baz
1496  */
1497 
1498  sc->extra_stats.tx_NoCompleteCnt++;
1499  sc->lmc_next_tx++;
1500 
1501  /* give ownership to the chip */
1502  LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
1503  sc->lmc_txring[entry].status = 0x80000000;
1504 
1505  /* send now! */
1506  LMC_CSR_WRITE (sc, csr_txpoll, 0);
1507 
1508  spin_unlock_irqrestore(&sc->lmc_lock, flags);
1509 
1510  lmc_trace(dev, "lmc_start_xmit_out");
1511  return NETDEV_TX_OK;
1512 }
1513 
1514 
1515 static int lmc_rx(struct net_device *dev)
1516 {
1517  lmc_softc_t *sc = dev_to_sc(dev);
1518  int i;
1519  int rx_work_limit = LMC_RXDESCS;
1520  unsigned int next_rx;
1521  int rxIntLoopCnt; /* debug -baz */
1522  int localLengthErrCnt = 0;
1523  long stat;
1524  struct sk_buff *skb, *nsb;
1525  u16 len;
1526 
1527  lmc_trace(dev, "lmc_rx in");
1528 
1529  lmc_led_on(sc, LMC_DS3_LED3);
1530 
1531  rxIntLoopCnt = 0; /* debug -baz */
1532 
1533  i = sc->lmc_next_rx % LMC_RXDESCS;
1534  next_rx = sc->lmc_next_rx;
1535 
1536  while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
1537  {
1538  rxIntLoopCnt++; /* debug -baz */
1540  if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
1541  if ((stat & 0x0000ffff) != 0x7fff) {
1542  /* Oversized frame */
1543  sc->lmc_device->stats.rx_length_errors++;
1544  goto skip_packet;
1545  }
1546  }
1547 
1548  if (stat & 0x00000008) { /* Catch a dribbling bit error */
1549  sc->lmc_device->stats.rx_errors++;
1550  sc->lmc_device->stats.rx_frame_errors++;
1551  goto skip_packet;
1552  }
1553 
1554 
1555  if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */
1556  sc->lmc_device->stats.rx_errors++;
1557  sc->lmc_device->stats.rx_crc_errors++;
1558  goto skip_packet;
1559  }
1560 
1561  if (len > LMC_PKT_BUF_SZ) {
1562  sc->lmc_device->stats.rx_length_errors++;
1563  localLengthErrCnt++;
1564  goto skip_packet;
1565  }
1566 
1567  if (len < sc->lmc_crcSize + 2) {
1568  sc->lmc_device->stats.rx_length_errors++;
1569  sc->extra_stats.rx_SmallPktCnt++;
1570  localLengthErrCnt++;
1571  goto skip_packet;
1572  }
1573 
1574  if(stat & 0x00004000){
1575  printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
1576  }
1577 
1578  len -= sc->lmc_crcSize;
1579 
1580  skb = sc->lmc_rxq[i];
1581 
1582  /*
1583  * We ran out of memory at some point
1584  * just allocate an skb buff and continue.
1585  */
1586 
1587  if (!skb) {
1588  nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1589  if (nsb) {
1590  sc->lmc_rxq[i] = nsb;
1591  nsb->dev = dev;
1592  sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1593  }
1594  sc->failed_recv_alloc = 1;
1595  goto skip_packet;
1596  }
1597 
1598  sc->lmc_device->stats.rx_packets++;
1599  sc->lmc_device->stats.rx_bytes += len;
1600 
1601  LMC_CONSOLE_LOG("recv", skb->data, len);
1602 
1603  /*
1604  * I'm not sure of the sanity of this
1605  * Packets could be arriving at a constant
1606  * 44.210mbits/sec and we're going to copy
1607  * them into a new buffer??
1608  */
1609 
1610  if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
1611  /*
1612  * If it's a large packet don't copy it just hand it up
1613  */
1614  give_it_anyways:
1615 
1616  sc->lmc_rxq[i] = NULL;
1617  sc->lmc_rxring[i].buffer1 = 0x0;
1618 
1619  skb_put (skb, len);
1620  skb->protocol = lmc_proto_type(sc, skb);
1621  skb_reset_mac_header(skb);
1622  /* skb_reset_network_header(skb); */
1623  skb->dev = dev;
1624  lmc_proto_netif(sc, skb);
1625 
1626  /*
1627  * This skb will be destroyed by the upper layers, make a new one
1628  */
1629  nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1630  if (nsb) {
1631  sc->lmc_rxq[i] = nsb;
1632  nsb->dev = dev;
1633  sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb));
1634  /* Transferred to 21140 below */
1635  }
1636  else {
1637  /*
1638  * We've run out of memory, stop trying to allocate
1639  * memory and exit the interrupt handler
1640  *
1641  * The chip may run out of receivers and stop
1642  * in which care we'll try to allocate the buffer
1643  * again. (once a second)
1644  */
1645  sc->extra_stats.rx_BuffAllocErr++;
1646  LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1647  sc->failed_recv_alloc = 1;
1648  goto skip_out_of_mem;
1649  }
1650  }
1651  else {
1652  nsb = dev_alloc_skb(len);
1653  if(!nsb) {
1654  goto give_it_anyways;
1655  }
1656  skb_copy_from_linear_data(skb, skb_put(nsb, len), len);
1657 
1658  nsb->protocol = lmc_proto_type(sc, nsb);
1659  skb_reset_mac_header(nsb);
1660  /* skb_reset_network_header(nsb); */
1661  nsb->dev = dev;
1662  lmc_proto_netif(sc, nsb);
1663  }
1664 
1665  skip_packet:
1666  LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1667  sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
1668 
1669  sc->lmc_next_rx++;
1670  i = sc->lmc_next_rx % LMC_RXDESCS;
1671  rx_work_limit--;
1672  if (rx_work_limit < 0)
1673  break;
1674  }
1675 
1676  /* detect condition for LMC1000 where DSU cable attaches and fills
1677  * descriptors with bogus packets
1678  *
1679  if (localLengthErrCnt > LMC_RXDESCS - 3) {
1680  sc->extra_stats.rx_BadPktSurgeCnt++;
1681  LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt,
1682  sc->extra_stats.rx_BadPktSurgeCnt);
1683  } */
1684 
1685  /* save max count of receive descriptors serviced */
1686  if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt)
1687  sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
1688 
1689 #ifdef DEBUG
1690  if (rxIntLoopCnt == 0)
1691  {
1692  for (i = 0; i < LMC_RXDESCS; i++)
1693  {
1694  if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
1696  {
1697  rxIntLoopCnt++;
1698  }
1699  }
1700  LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
1701  }
1702 #endif
1703 
1704 
1706 
1707 skip_out_of_mem:
1708 
1709  lmc_trace(dev, "lmc_rx out");
1710 
1711  return 0;
1712 }
1713 
1714 static struct net_device_stats *lmc_get_stats(struct net_device *dev)
1715 {
1716  lmc_softc_t *sc = dev_to_sc(dev);
1717  unsigned long flags;
1718 
1719  lmc_trace(dev, "lmc_get_stats in");
1720 
1721  spin_lock_irqsave(&sc->lmc_lock, flags);
1722 
1723  sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
1724 
1725  spin_unlock_irqrestore(&sc->lmc_lock, flags);
1726 
1727  lmc_trace(dev, "lmc_get_stats out");
1728 
1729  return &sc->lmc_device->stats;
1730 }
1731 
1732 static struct pci_driver lmc_driver = {
1733  .name = "lmc",
1734  .id_table = lmc_pci_tbl,
1735  .probe = lmc_init_one,
1736  .remove = __devexit_p(lmc_remove_one),
1737 };
1738 
1739 module_pci_driver(lmc_driver);
1740 
1741 unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
1742 {
1743  int i;
1744  int command = (0xf6 << 10) | (devaddr << 5) | regno;
1745  int retval = 0;
1746 
1747  lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
1748 
1749  LMC_MII_SYNC (sc);
1750 
1751  lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
1752 
1753  for (i = 15; i >= 0; i--)
1754  {
1755  int dataval = (command & (1 << i)) ? 0x20000 : 0;
1756 
1757  LMC_CSR_WRITE (sc, csr_9, dataval);
1758  lmc_delay ();
1759  /* __SLOW_DOWN_IO; */
1760  LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
1761  lmc_delay ();
1762  /* __SLOW_DOWN_IO; */
1763  }
1764 
1765  lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
1766 
1767  for (i = 19; i > 0; i--)
1768  {
1769  LMC_CSR_WRITE (sc, csr_9, 0x40000);
1770  lmc_delay ();
1771  /* __SLOW_DOWN_IO; */
1772  retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
1773  LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
1774  lmc_delay ();
1775  /* __SLOW_DOWN_IO; */
1776  }
1777 
1778  lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
1779 
1780  return (retval >> 1) & 0xffff;
1781 }
1782 
1783 void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
1784 {
1785  int i = 32;
1786  int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
1787 
1788  lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
1789 
1790  LMC_MII_SYNC (sc);
1791 
1792  i = 31;
1793  while (i >= 0)
1794  {
1795  int datav;
1796 
1797  if (command & (1 << i))
1798  datav = 0x20000;
1799  else
1800  datav = 0x00000;
1801 
1802  LMC_CSR_WRITE (sc, csr_9, datav);
1803  lmc_delay ();
1804  /* __SLOW_DOWN_IO; */
1805  LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
1806  lmc_delay ();
1807  /* __SLOW_DOWN_IO; */
1808  i--;
1809  }
1810 
1811  i = 2;
1812  while (i > 0)
1813  {
1814  LMC_CSR_WRITE (sc, csr_9, 0x40000);
1815  lmc_delay ();
1816  /* __SLOW_DOWN_IO; */
1817  LMC_CSR_WRITE (sc, csr_9, 0x50000);
1818  lmc_delay ();
1819  /* __SLOW_DOWN_IO; */
1820  i--;
1821  }
1822 
1823  lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
1824 }
1825 
1826 static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1827 {
1828  int i;
1829 
1830  lmc_trace(sc->lmc_device, "lmc_softreset in");
1831 
1832  /* Initialize the receive rings and buffers. */
1833  sc->lmc_txfull = 0;
1834  sc->lmc_next_rx = 0;
1835  sc->lmc_next_tx = 0;
1836  sc->lmc_taint_rx = 0;
1837  sc->lmc_taint_tx = 0;
1838 
1839  /*
1840  * Setup each one of the receiver buffers
1841  * allocate an skbuff for each one, setup the descriptor table
1842  * and point each buffer at the next one
1843  */
1844 
1845  for (i = 0; i < LMC_RXDESCS; i++)
1846  {
1847  struct sk_buff *skb;
1848 
1849  if (sc->lmc_rxq[i] == NULL)
1850  {
1851  skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1852  if(skb == NULL){
1853  printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
1854  sc->failed_ring = 1;
1855  break;
1856  }
1857  else{
1858  sc->lmc_rxq[i] = skb;
1859  }
1860  }
1861  else
1862  {
1863  skb = sc->lmc_rxq[i];
1864  }
1865 
1866  skb->dev = sc->lmc_device;
1867 
1868  /* owned by 21140 */
1869  sc->lmc_rxring[i].status = 0x80000000;
1870 
1871  /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1872  sc->lmc_rxring[i].length = skb_tailroom(skb);
1873 
1874  /* use to be tail which is dumb since you're thinking why write
1875  * to the end of the packj,et but since there's nothing there tail == data
1876  */
1877  sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
1878 
1879  /* This is fair since the structure is static and we have the next address */
1880  sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
1881 
1882  }
1883 
1884  /*
1885  * Sets end of ring
1886  */
1887  if (i != 0) {
1888  sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
1889  sc->lmc_rxring[i - 1].buffer2 = virt_to_bus(&sc->lmc_rxring[0]); /* Point back to the start */
1890  }
1891  LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
1892 
1893  /* Initialize the transmit rings and buffers */
1894  for (i = 0; i < LMC_TXDESCS; i++)
1895  {
1896  if (sc->lmc_txq[i] != NULL){ /* have buffer */
1897  dev_kfree_skb(sc->lmc_txq[i]); /* free it */
1898  sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */
1899  }
1900  sc->lmc_txq[i] = NULL;
1901  sc->lmc_txring[i].status = 0x00000000;
1902  sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
1903  }
1904  sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
1905  LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
1906 
1907  lmc_trace(sc->lmc_device, "lmc_softreset out");
1908 }
1909 
1910 void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1911 {
1912  lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
1913  sc->lmc_gpio_io &= ~bits;
1915  lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
1916 }
1917 
1918 void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
1919 {
1920  lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
1921  sc->lmc_gpio_io |= bits;
1923  lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
1924 }
1925 
1926 void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
1927 {
1928  lmc_trace(sc->lmc_device, "lmc_led_on in");
1929  if((~sc->lmc_miireg16) & led){ /* Already on! */
1930  lmc_trace(sc->lmc_device, "lmc_led_on aon out");
1931  return;
1932  }
1933 
1934  sc->lmc_miireg16 &= ~led;
1935  lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1936  lmc_trace(sc->lmc_device, "lmc_led_on out");
1937 }
1938 
1939 void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
1940 {
1941  lmc_trace(sc->lmc_device, "lmc_led_off in");
1942  if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
1943  lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
1944  return;
1945  }
1946 
1947  sc->lmc_miireg16 |= led;
1948  lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1949  lmc_trace(sc->lmc_device, "lmc_led_off out");
1950 }
1951 
1952 static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
1953 {
1954  lmc_trace(sc->lmc_device, "lmc_reset in");
1956  lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1957 
1959  lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
1960 
1961  /*
1962  * make some of the GPIO pins be outputs
1963  */
1965 
1966  /*
1967  * RESET low to force state reset. This also forces
1968  * the transmitter clock to be internal, but we expect to reset
1969  * that later anyway.
1970  */
1971  sc->lmc_gpio &= ~(LMC_GEP_RESET);
1972  LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
1973 
1974  /*
1975  * hold for more than 10 microseconds
1976  */
1977  udelay(50);
1978 
1979  /*
1980  * stop driving Xilinx-related signals
1981  */
1983 
1984  /*
1985  * Call media specific init routine
1986  */
1987  sc->lmc_media->init(sc);
1988 
1989  sc->extra_stats.resetCount++;
1990  lmc_trace(sc->lmc_device, "lmc_reset out");
1991 }
1992 
1993 static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
1994 {
1995  u32 val;
1996  lmc_trace(sc->lmc_device, "lmc_dec_reset in");
1997 
1998  /*
1999  * disable all interrupts
2000  */
2001  sc->lmc_intrmask = 0;
2002  LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
2003 
2004  /*
2005  * Reset the chip with a software reset command.
2006  * Wait 10 microseconds (actually 50 PCI cycles but at
2007  * 33MHz that comes to two microseconds but wait a
2008  * bit longer anyways)
2009  */
2010  LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
2011  udelay(25);
2012 #ifdef __sparc__
2013  sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
2014  sc->lmc_busmode = 0x00100000;
2016  LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
2017 #endif
2018  sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
2019 
2020  /*
2021  * We want:
2022  * no ethernet address in frames we write
2023  * disable padding (txdesc, padding disable)
2024  * ignore runt frames (rdes0 bit 15)
2025  * no receiver watchdog or transmitter jabber timer
2026  * (csr15 bit 0,14 == 1)
2027  * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
2028  */
2029 
2037  );
2042  );
2043 
2044  LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
2045 
2046  /*
2047  * disable receiver watchdog and transmit jabber
2048  */
2049  val = LMC_CSR_READ(sc, csr_sia_general);
2051  LMC_CSR_WRITE(sc, csr_sia_general, val);
2052 
2053  lmc_trace(sc->lmc_device, "lmc_dec_reset out");
2054 }
2055 
2056 static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
2057  size_t csr_size)
2058 {
2059  lmc_trace(sc->lmc_device, "lmc_initcsrs in");
2060  sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size;
2061  sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size;
2062  sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size;
2063  sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size;
2064  sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size;
2065  sc->lmc_csrs.csr_status = csr_base + 5 * csr_size;
2066  sc->lmc_csrs.csr_command = csr_base + 6 * csr_size;
2067  sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size;
2068  sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size;
2069  sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size;
2070  sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size;
2071  sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size;
2072  sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size;
2073  sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size;
2074  sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size;
2075  sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size;
2076  lmc_trace(sc->lmc_device, "lmc_initcsrs out");
2077 }
2078 
2079 static void lmc_driver_timeout(struct net_device *dev)
2080 {
2081  lmc_softc_t *sc = dev_to_sc(dev);
2082  u32 csr6;
2083  unsigned long flags;
2084 
2085  lmc_trace(dev, "lmc_driver_timeout in");
2086 
2087  spin_lock_irqsave(&sc->lmc_lock, flags);
2088 
2089  printk("%s: Xmitter busy|\n", dev->name);
2090 
2091  sc->extra_stats.tx_tbusy_calls++;
2092  if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
2093  goto bug_out;
2094 
2095  /*
2096  * Chip seems to have locked up
2097  * Reset it
2098  * This whips out all our decriptor
2099  * table and starts from scartch
2100  */
2101 
2103  LMC_CSR_READ (sc, csr_status),
2104  sc->extra_stats.tx_ProcTimeout);
2105 
2106  lmc_running_reset (dev);
2107 
2108  LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
2110  lmc_mii_readreg (sc, 0, 16),
2111  lmc_mii_readreg (sc, 0, 17));
2112 
2113  /* restart the tx processes */
2114  csr6 = LMC_CSR_READ (sc, csr_command);
2115  LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
2116  LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
2117 
2118  /* immediate transmit */
2119  LMC_CSR_WRITE (sc, csr_txpoll, 0);
2120 
2121  sc->lmc_device->stats.tx_errors++;
2122  sc->extra_stats.tx_ProcTimeout++; /* -baz */
2123 
2124  dev->trans_start = jiffies; /* prevent tx timeout */
2125 
2126 bug_out:
2127 
2128  spin_unlock_irqrestore(&sc->lmc_lock, flags);
2129 
2130  lmc_trace(dev, "lmc_driver_timout out");
2131 
2132 
2133 }