Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
82596.c
Go to the documentation of this file.
1 /* 82596.c: A generic 82596 ethernet driver for linux. */
2 /*
3  Based on Apricot.c
4  Written 1994 by Mark Evans.
5  This driver is for the Apricot 82596 bus-master interface
6 
7  Modularised 12/94 Mark Evans
8 
9 
10  Modified to support the 82596 ethernet chips on 680x0 VME boards.
11  by Richard Hirst <[email protected]>
12  Renamed to be 82596.c
13 
14  980825: Changed to receive directly in to sk_buffs which are
15  allocated at open() time. Eliminates copy on incoming frames
16  (small ones are still copied). Shared data now held in a
17  non-cached page, so we can run on 68060 in copyback mode.
18 
19  TBD:
20  * look at deferring rx frames rather than discarding (as per tulip)
21  * handle tx ring full as per tulip
22  * performance test to tune rx_copybreak
23 
24  Most of my modifications relate to the braindead big-endian
25  implementation by Intel. When the i596 is operating in
26  'big-endian' mode, it thinks a 32 bit value of 0x12345678
27  should be stored as 0x56781234. This is a real pain, when
28  you have linked lists which are shared by the 680x0 and the
29  i596.
30 
31  Driver skeleton
32  Written 1993 by Donald Becker.
33  Copyright 1993 United States Government as represented by the Director,
34  National Security Agency. This software may only be used and distributed
35  according to the terms of the GNU General Public License as modified by SRC,
36  incorporated herein by reference.
37 
38  The author may be reached as [email protected], or C/O
39  Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
40 
41  */
42 
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/string.h>
46 #include <linux/errno.h>
47 #include <linux/ioport.h>
48 #include <linux/interrupt.h>
49 #include <linux/delay.h>
50 #include <linux/netdevice.h>
51 #include <linux/etherdevice.h>
52 #include <linux/skbuff.h>
53 #include <linux/init.h>
54 #include <linux/bitops.h>
55 #include <linux/gfp.h>
56 
57 #include <asm/io.h>
58 #include <asm/dma.h>
59 #include <asm/pgtable.h>
60 #include <asm/cacheflush.h>
61 
62 static char version[] __initdata =
63  "82596.c $Revision: 1.5 $\n";
64 
65 #define DRV_NAME "82596"
66 
67 /* DEBUG flags
68  */
69 
70 #define DEB_INIT 0x0001
71 #define DEB_PROBE 0x0002
72 #define DEB_SERIOUS 0x0004
73 #define DEB_ERRORS 0x0008
74 #define DEB_MULTI 0x0010
75 #define DEB_TDR 0x0020
76 #define DEB_OPEN 0x0040
77 #define DEB_RESET 0x0080
78 #define DEB_ADDCMD 0x0100
79 #define DEB_STATUS 0x0200
80 #define DEB_STARTTX 0x0400
81 #define DEB_RXADDR 0x0800
82 #define DEB_TXADDR 0x1000
83 #define DEB_RXFRAME 0x2000
84 #define DEB_INTS 0x4000
85 #define DEB_STRUCT 0x8000
86 #define DEB_ANY 0xffff
87 
88 
89 #define DEB(x,y) if (i596_debug & (x)) y
90 
91 
92 #if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
93 #define ENABLE_MVME16x_NET
94 #endif
95 #if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
96 #define ENABLE_BVME6000_NET
97 #endif
98 #if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
99 #define ENABLE_APRICOT
100 #endif
101 
102 #ifdef ENABLE_MVME16x_NET
103 #include <asm/mvme16xhw.h>
104 #endif
105 #ifdef ENABLE_BVME6000_NET
106 #include <asm/bvme6000hw.h>
107 #endif
108 
109 /*
110  * Define various macros for Channel Attention, word swapping etc., dependent
111  * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
112  */
113 
114 #ifdef __mc68000__
115 #define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
116 #define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
117 #define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
118 #define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
119 #define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
120 #define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
121 #define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
122 #define ISCP_BUSY 0x00010000
123 #define MACH_IS_APRICOT 0
124 #else
125 #define WSWAPrfd(x) ((struct i596_rfd *)((long)x))
126 #define WSWAPrbd(x) ((struct i596_rbd *)((long)x))
127 #define WSWAPiscp(x) ((struct i596_iscp *)((long)x))
128 #define WSWAPscb(x) ((struct i596_scb *)((long)x))
129 #define WSWAPcmd(x) ((struct i596_cmd *)((long)x))
130 #define WSWAPtbd(x) ((struct i596_tbd *)((long)x))
131 #define WSWAPchar(x) ((char *)((long)x))
132 #define ISCP_BUSY 0x0001
133 #define MACH_IS_APRICOT 1
134 #endif
135 
136 /*
137  * The MPU_PORT command allows direct access to the 82596. With PORT access
138  * the following commands are available (p5-18). The 32-bit port command
139  * must be word-swapped with the most significant word written first.
140  * This only applies to VME boards.
141  */
142 #define PORT_RESET 0x00 /* reset 82596 */
143 #define PORT_SELFTEST 0x01 /* selftest */
144 #define PORT_ALTSCP 0x02 /* alternate SCB address */
145 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
146 
147 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
148 
149 MODULE_AUTHOR("Richard Hirst");
150 MODULE_DESCRIPTION("i82596 driver");
151 MODULE_LICENSE("GPL");
152 
153 module_param(i596_debug, int, 0);
154 MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
155 
156 
157 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
158  * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
159  */
160 static int rx_copybreak = 100;
161 
162 #define PKT_BUF_SZ 1536
163 #define MAX_MC_CNT 64
164 
165 #define I596_TOTAL_SIZE 17
166 
167 #define I596_NULL ((void *)0xffffffff)
168 
169 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
170 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
171 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
172 
173 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
174 
175 enum commands {
177  CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
178 };
179 
180 #define STAT_C 0x8000 /* Set to 0 after execution */
181 #define STAT_B 0x4000 /* Command being executed */
182 #define STAT_OK 0x2000 /* Command executed ok */
183 #define STAT_A 0x1000 /* Command aborted */
184 
185 #define CUC_START 0x0100
186 #define CUC_RESUME 0x0200
187 #define CUC_SUSPEND 0x0300
188 #define CUC_ABORT 0x0400
189 #define RX_START 0x0010
190 #define RX_RESUME 0x0020
191 #define RX_SUSPEND 0x0030
192 #define RX_ABORT 0x0040
193 
194 #define TX_TIMEOUT (HZ/20)
195 
196 
197 struct i596_reg {
198  unsigned short porthi;
199  unsigned short portlo;
200  unsigned long ca;
201 };
202 
203 #define EOF 0x8000
204 #define SIZE_MASK 0x3fff
205 
206 struct i596_tbd {
207  unsigned short size;
208  unsigned short pad;
209  struct i596_tbd *next;
210  char *data;
211 };
212 
213 /* The command structure has two 'next' pointers; v_next is the address of
214  * the next command as seen by the CPU, b_next is the address of the next
215  * command as seen by the 82596. The b_next pointer, as used by the 82596
216  * always references the status field of the next command, rather than the
217  * v_next field, because the 82596 is unaware of v_next. It may seem more
218  * logical to put v_next at the end of the structure, but we cannot do that
219  * because the 82596 expects other fields to be there, depending on command
220  * type.
221  */
222 
223 struct i596_cmd {
224  struct i596_cmd *v_next; /* Address from CPUs viewpoint */
225  unsigned short status;
226  unsigned short command;
227  struct i596_cmd *b_next; /* Address from i596 viewpoint */
228 };
229 
230 struct tx_cmd {
231  struct i596_cmd cmd;
232  struct i596_tbd *tbd;
233  unsigned short size;
234  unsigned short pad;
235  struct sk_buff *skb; /* So we can free it after tx */
236 };
237 
238 struct tdr_cmd {
239  struct i596_cmd cmd;
240  unsigned short status;
241  unsigned short pad;
242 };
243 
244 struct mc_cmd {
245  struct i596_cmd cmd;
246  short mc_cnt;
248 };
249 
250 struct sa_cmd {
251  struct i596_cmd cmd;
252  char eth_addr[8];
253 };
254 
255 struct cf_cmd {
256  struct i596_cmd cmd;
257  char i596_config[16];
258 };
259 
260 struct i596_rfd {
261  unsigned short stat;
262  unsigned short cmd;
263  struct i596_rfd *b_next; /* Address from i596 viewpoint */
264  struct i596_rbd *rbd;
265  unsigned short count;
266  unsigned short size;
267  struct i596_rfd *v_next; /* Address from CPUs viewpoint */
268  struct i596_rfd *v_prev;
269 };
270 
271 struct i596_rbd {
272  unsigned short count;
273  unsigned short zero1;
274  struct i596_rbd *b_next;
275  unsigned char *b_data; /* Address from i596 viewpoint */
276  unsigned short size;
277  unsigned short zero2;
278  struct sk_buff *skb;
279  struct i596_rbd *v_next;
280  struct i596_rbd *b_addr; /* This rbd addr from i596 view */
281  unsigned char *v_data; /* Address from CPUs viewpoint */
282 };
283 
284 #define TX_RING_SIZE 64
285 #define RX_RING_SIZE 16
286 
287 struct i596_scb {
288  unsigned short status;
289  unsigned short command;
290  struct i596_cmd *cmd;
291  struct i596_rfd *rfd;
292  unsigned long crc_err;
293  unsigned long align_err;
294  unsigned long resource_err;
295  unsigned long over_err;
296  unsigned long rcvdt_err;
297  unsigned long short_err;
298  unsigned short t_on;
299  unsigned short t_off;
300 };
301 
302 struct i596_iscp {
303  unsigned long stat;
304  struct i596_scb *scb;
305 };
306 
307 struct i596_scp {
308  unsigned long sysbus;
309  unsigned long pad;
310  struct i596_iscp *iscp;
311 };
312 
313 struct i596_private {
314  volatile struct i596_scp scp;
315  volatile struct i596_iscp iscp;
316  volatile struct i596_scb scb;
317  struct sa_cmd sa_cmd;
318  struct cf_cmd cf_cmd;
319  struct tdr_cmd tdr_cmd;
320  struct mc_cmd mc_cmd;
321  unsigned long stat;
328  unsigned long last_cmd;
335 };
336 
337 static char init_setup[] =
338 {
339  0x8E, /* length, prefetch on */
340  0xC8, /* fifo to 8, monitor off */
341 #ifdef CONFIG_VME
342  0xc0, /* don't save bad frames */
343 #else
344  0x80, /* don't save bad frames */
345 #endif
346  0x2E, /* No source address insertion, 8 byte preamble */
347  0x00, /* priority and backoff defaults */
348  0x60, /* interframe spacing */
349  0x00, /* slot time LSB */
350  0xf2, /* slot time and retries */
351  0x00, /* promiscuous mode */
352  0x00, /* collision detect */
353  0x40, /* minimum frame length */
354  0xff,
355  0x00,
356  0x7f /* *multi IA */ };
357 
358 static int i596_open(struct net_device *dev);
359 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
360 static irqreturn_t i596_interrupt(int irq, void *dev_id);
361 static int i596_close(struct net_device *dev);
362 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
363 static void i596_tx_timeout (struct net_device *dev);
364 static void print_eth(unsigned char *buf, char *str);
365 static void set_multicast_list(struct net_device *dev);
366 
367 static int rx_ring_size = RX_RING_SIZE;
368 static int ticks_limit = 25;
369 static int max_cmd_backlog = TX_RING_SIZE-1;
370 
371 
372 static inline void CA(struct net_device *dev)
373 {
374 #ifdef ENABLE_MVME16x_NET
375  if (MACH_IS_MVME16x) {
376  ((struct i596_reg *) dev->base_addr)->ca = 1;
377  }
378 #endif
379 #ifdef ENABLE_BVME6000_NET
380  if (MACH_IS_BVME6000) {
381  volatile u32 i;
382 
383  i = *(volatile u32 *) (dev->base_addr);
384  }
385 #endif
386 #ifdef ENABLE_APRICOT
387  if (MACH_IS_APRICOT) {
388  outw(0, (short) (dev->base_addr) + 4);
389  }
390 #endif
391 }
392 
393 
394 static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
395 {
396 #ifdef ENABLE_MVME16x_NET
397  if (MACH_IS_MVME16x) {
398  struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
399  p->porthi = ((c) | (u32) (x)) & 0xffff;
400  p->portlo = ((c) | (u32) (x)) >> 16;
401  }
402 #endif
403 #ifdef ENABLE_BVME6000_NET
404  if (MACH_IS_BVME6000) {
405  u32 v = (u32) (c) | (u32) (x);
406  v = ((u32) (v) << 16) | ((u32) (v) >> 16);
407  *(volatile u32 *) dev->base_addr = v;
408  udelay(1);
409  *(volatile u32 *) dev->base_addr = v;
410  }
411 #endif
412 }
413 
414 
415 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
416 {
417  while (--delcnt && lp->iscp.stat)
418  udelay(10);
419  if (!delcnt) {
420  printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
421  dev->name, str, lp->scb.status, lp->scb.command);
422  return -1;
423  }
424  else
425  return 0;
426 }
427 
428 
429 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
430 {
431  while (--delcnt && lp->scb.command)
432  udelay(10);
433  if (!delcnt) {
434  printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
435  dev->name, str, lp->scb.status, lp->scb.command);
436  return -1;
437  }
438  else
439  return 0;
440 }
441 
442 
443 static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
444 {
445  volatile struct i596_cmd *c = cmd;
446 
447  while (--delcnt && c->command)
448  udelay(10);
449  if (!delcnt) {
450  printk(KERN_ERR "%s: %s.\n", dev->name, str);
451  return -1;
452  }
453  else
454  return 0;
455 }
456 
457 
458 static void i596_display_data(struct net_device *dev)
459 {
460  struct i596_private *lp = dev->ml_priv;
461  struct i596_cmd *cmd;
462  struct i596_rfd *rfd;
463  struct i596_rbd *rbd;
464 
465  printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
466  &lp->scp, lp->scp.sysbus, lp->scp.iscp);
467  printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
468  &lp->iscp, lp->iscp.stat, lp->iscp.scb);
469  printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
470  " .cmd = %p, .rfd = %p\n",
471  &lp->scb, lp->scb.status, lp->scb.command,
472  lp->scb.cmd, lp->scb.rfd);
473  printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx,"
474  " over %lx, rcvdt %lx, short %lx\n",
475  lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
476  lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
477  cmd = lp->cmd_head;
478  while (cmd != I596_NULL) {
479  printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
480  cmd, cmd->status, cmd->command, cmd->b_next);
481  cmd = cmd->v_next;
482  }
483  rfd = lp->rfd_head;
484  printk(KERN_ERR "rfd_head = %p\n", rfd);
485  do {
486  printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
487  " count %04x\n",
488  rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
489  rfd->count);
490  rfd = rfd->v_next;
491  } while (rfd != lp->rfd_head);
492  rbd = lp->rbd_head;
493  printk(KERN_ERR "rbd_head = %p\n", rbd);
494  do {
495  printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n",
496  rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
497  rbd = rbd->v_next;
498  } while (rbd != lp->rbd_head);
499 }
500 
501 
502 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
503 static irqreturn_t i596_error(int irq, void *dev_id)
504 {
505  struct net_device *dev = dev_id;
506 #ifdef ENABLE_MVME16x_NET
507  if (MACH_IS_MVME16x) {
508  volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
509 
510  pcc2[0x28] = 1;
511  pcc2[0x2b] = 0x1d;
512  }
513 #endif
514 #ifdef ENABLE_BVME6000_NET
515  if (MACH_IS_BVME6000) {
516  volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
517 
518  *ethirq = 1;
519  *ethirq = 3;
520  }
521 #endif
522  printk(KERN_ERR "%s: Error interrupt\n", dev->name);
523  i596_display_data(dev);
524  return IRQ_HANDLED;
525 }
526 #endif
527 
528 static inline void remove_rx_bufs(struct net_device *dev)
529 {
530  struct i596_private *lp = dev->ml_priv;
531  struct i596_rbd *rbd;
532  int i;
533 
534  for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
535  if (rbd->skb == NULL)
536  break;
537  dev_kfree_skb(rbd->skb);
538  rbd->skb = NULL;
539  }
540 }
541 
542 static inline int init_rx_bufs(struct net_device *dev)
543 {
544  struct i596_private *lp = dev->ml_priv;
545  int i;
546  struct i596_rfd *rfd;
547  struct i596_rbd *rbd;
548 
549  /* First build the Receive Buffer Descriptor List */
550 
551  for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
552  struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
553 
554  if (skb == NULL) {
555  remove_rx_bufs(dev);
556  return -ENOMEM;
557  }
558 
559  rbd->v_next = rbd+1;
560  rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
561  rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
562  rbd->skb = skb;
563  rbd->v_data = skb->data;
564  rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
565  rbd->size = PKT_BUF_SZ;
566 #ifdef __mc68000__
568 #endif
569  }
570  lp->rbd_head = lp->rbds;
571  rbd = lp->rbds + rx_ring_size - 1;
572  rbd->v_next = lp->rbds;
573  rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
574 
575  /* Now build the Receive Frame Descriptor List */
576 
577  for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
578  rfd->rbd = I596_NULL;
579  rfd->v_next = rfd+1;
580  rfd->v_prev = rfd-1;
581  rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
582  rfd->cmd = CMD_FLEX;
583  }
584  lp->rfd_head = lp->rfds;
585  lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
586  rfd = lp->rfds;
587  rfd->rbd = lp->rbd_head;
588  rfd->v_prev = lp->rfds + rx_ring_size - 1;
589  rfd = lp->rfds + rx_ring_size - 1;
590  rfd->v_next = lp->rfds;
591  rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
592  rfd->cmd = CMD_EOL|CMD_FLEX;
593 
594  return 0;
595 }
596 
597 
598 static void rebuild_rx_bufs(struct net_device *dev)
599 {
600  struct i596_private *lp = dev->ml_priv;
601  int i;
602 
603  /* Ensure rx frame/buffer descriptors are tidy */
604 
605  for (i = 0; i < rx_ring_size; i++) {
606  lp->rfds[i].rbd = I596_NULL;
607  lp->rfds[i].cmd = CMD_FLEX;
608  }
609  lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
610  lp->rfd_head = lp->rfds;
611  lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
612  lp->rbd_head = lp->rbds;
613  lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
614 }
615 
616 
617 static int init_i596_mem(struct net_device *dev)
618 {
619  struct i596_private *lp = dev->ml_priv;
620 #if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT)
621  short ioaddr = dev->base_addr;
622 #endif
623  unsigned long flags;
624 
625  MPU_PORT(dev, PORT_RESET, NULL);
626 
627  udelay(100); /* Wait 100us - seems to help */
628 
629 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
630 #ifdef ENABLE_MVME16x_NET
631  if (MACH_IS_MVME16x) {
632  volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
633 
634  /* Disable all ints for now */
635  pcc2[0x28] = 1;
636  pcc2[0x2a] = 0x48;
637  /* Following disables snooping. Snooping is not required
638  * as we make appropriate use of non-cached pages for
639  * shared data, and cache_push/cache_clear.
640  */
641  pcc2[0x2b] = 0x08;
642  }
643 #endif
644 #ifdef ENABLE_BVME6000_NET
645  if (MACH_IS_BVME6000) {
646  volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
647 
648  *ethirq = 1;
649  }
650 #endif
651 
652  /* change the scp address */
653 
654  MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
655 
656 #elif defined(ENABLE_APRICOT)
657 
658  {
659  u32 scp = virt_to_bus(&lp->scp);
660 
661  /* change the scp address */
662  outw(0, ioaddr);
663  outw(0, ioaddr);
664  outb(4, ioaddr + 0xf);
665  outw(scp | 2, ioaddr);
666  outw(scp >> 16, ioaddr);
667  }
668 #endif
669 
670  lp->last_cmd = jiffies;
671 
672 #ifdef ENABLE_MVME16x_NET
673  if (MACH_IS_MVME16x)
674  lp->scp.sysbus = 0x00000054;
675 #endif
676 #ifdef ENABLE_BVME6000_NET
677  if (MACH_IS_BVME6000)
678  lp->scp.sysbus = 0x0000004c;
679 #endif
680 #ifdef ENABLE_APRICOT
681  if (MACH_IS_APRICOT)
682  lp->scp.sysbus = 0x00440000;
683 #endif
684 
685  lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
686  lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
687  lp->iscp.stat = ISCP_BUSY;
688  lp->cmd_backlog = 0;
689 
690  lp->cmd_head = lp->scb.cmd = I596_NULL;
691 
692 #ifdef ENABLE_BVME6000_NET
693  if (MACH_IS_BVME6000) {
694  lp->scb.t_on = 7 * 25;
695  lp->scb.t_off = 1 * 25;
696  }
697 #endif
698 
699  DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
700 
701 #if defined(ENABLE_APRICOT)
702  (void) inb(ioaddr + 0x10);
703  outb(4, ioaddr + 0xf);
704 #endif
705  CA(dev);
706 
707  if (wait_istat(dev,lp,1000,"initialization timed out"))
708  goto failed;
709  DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
710 
711  /* Ensure rx frame/buffer descriptors are tidy */
712  rebuild_rx_bufs(dev);
713  lp->scb.command = 0;
714 
715 #ifdef ENABLE_MVME16x_NET
716  if (MACH_IS_MVME16x) {
717  volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
718 
719  /* Enable ints, etc. now */
720  pcc2[0x2a] = 0x55; /* Edge sensitive */
721  pcc2[0x2b] = 0x15;
722  }
723 #endif
724 #ifdef ENABLE_BVME6000_NET
725  if (MACH_IS_BVME6000) {
726  volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
727 
728  *ethirq = 3;
729  }
730 #endif
731 
732 
733  DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
734  memcpy(lp->cf_cmd.i596_config, init_setup, 14);
735  lp->cf_cmd.cmd.command = CmdConfigure;
736  i596_add_cmd(dev, &lp->cf_cmd.cmd);
737 
738  DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
739  memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
740  lp->sa_cmd.cmd.command = CmdSASetup;
741  i596_add_cmd(dev, &lp->sa_cmd.cmd);
742 
743  DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
744  lp->tdr_cmd.cmd.command = CmdTDR;
745  i596_add_cmd(dev, &lp->tdr_cmd.cmd);
746 
747  spin_lock_irqsave (&lp->lock, flags);
748 
749  if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
750  spin_unlock_irqrestore (&lp->lock, flags);
751  goto failed;
752  }
753  DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
754  lp->scb.command = RX_START;
755  CA(dev);
756 
757  spin_unlock_irqrestore (&lp->lock, flags);
758 
759  if (wait_cmd(dev,lp,1000,"RX_START not processed"))
760  goto failed;
761  DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
762  return 0;
763 
764 failed:
765  printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
766  MPU_PORT(dev, PORT_RESET, NULL);
767  return -1;
768 }
769 
770 static inline int i596_rx(struct net_device *dev)
771 {
772  struct i596_private *lp = dev->ml_priv;
773  struct i596_rfd *rfd;
774  struct i596_rbd *rbd;
775  int frames = 0;
776 
777  DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
778  lp->rfd_head, lp->rbd_head));
779 
780  rfd = lp->rfd_head; /* Ref next frame to check */
781 
782  while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
783  if (rfd->rbd == I596_NULL)
784  rbd = I596_NULL;
785  else if (rfd->rbd == lp->rbd_head->b_addr)
786  rbd = lp->rbd_head;
787  else {
788  printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
789  /* XXX Now what? */
790  rbd = I596_NULL;
791  }
792  DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n",
793  rfd, rfd->rbd, rfd->stat));
794 
795  if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
796  /* a good frame */
797  int pkt_len = rbd->count & 0x3fff;
798  struct sk_buff *skb = rbd->skb;
799  int rx_in_place = 0;
800 
801  DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
802  frames++;
803 
804  /* Check if the packet is long enough to just accept
805  * without copying to a properly sized skbuff.
806  */
807 
808  if (pkt_len > rx_copybreak) {
809  struct sk_buff *newskb;
810 
811  /* Get fresh skbuff to replace filled one. */
812  newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
813  if (newskb == NULL) {
814  skb = NULL; /* drop pkt */
815  goto memory_squeeze;
816  }
817  /* Pass up the skb already on the Rx ring. */
818  skb_put(skb, pkt_len);
819  rx_in_place = 1;
820  rbd->skb = newskb;
821  rbd->v_data = newskb->data;
822  rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
823 #ifdef __mc68000__
825 #endif
826  }
827  else
828  skb = netdev_alloc_skb(dev, pkt_len + 2);
829 memory_squeeze:
830  if (skb == NULL) {
831  /* XXX tulip.c can defer packets here!! */
832  printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
833  dev->stats.rx_dropped++;
834  }
835  else {
836  if (!rx_in_place) {
837  /* 16 byte align the data fields */
838  skb_reserve(skb, 2);
839  memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
840  }
841  skb->protocol=eth_type_trans(skb,dev);
842  skb->len = pkt_len;
843 #ifdef __mc68000__
844  cache_clear(virt_to_phys(rbd->skb->data),
845  pkt_len);
846 #endif
847  netif_rx(skb);
848  dev->stats.rx_packets++;
849  dev->stats.rx_bytes+=pkt_len;
850  }
851  }
852  else {
853  DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
854  dev->name, rfd->stat));
855  dev->stats.rx_errors++;
856  if ((rfd->stat) & 0x0001)
857  dev->stats.collisions++;
858  if ((rfd->stat) & 0x0080)
859  dev->stats.rx_length_errors++;
860  if ((rfd->stat) & 0x0100)
861  dev->stats.rx_over_errors++;
862  if ((rfd->stat) & 0x0200)
863  dev->stats.rx_fifo_errors++;
864  if ((rfd->stat) & 0x0400)
865  dev->stats.rx_frame_errors++;
866  if ((rfd->stat) & 0x0800)
867  dev->stats.rx_crc_errors++;
868  if ((rfd->stat) & 0x1000)
869  dev->stats.rx_length_errors++;
870  }
871 
872  /* Clear the buffer descriptor count and EOF + F flags */
873 
874  if (rbd != I596_NULL && (rbd->count & 0x4000)) {
875  rbd->count = 0;
876  lp->rbd_head = rbd->v_next;
877  }
878 
879  /* Tidy the frame descriptor, marking it as end of list */
880 
881  rfd->rbd = I596_NULL;
882  rfd->stat = 0;
883  rfd->cmd = CMD_EOL|CMD_FLEX;
884  rfd->count = 0;
885 
886  /* Remove end-of-list from old end descriptor */
887 
888  rfd->v_prev->cmd = CMD_FLEX;
889 
890  /* Update record of next frame descriptor to process */
891 
892  lp->scb.rfd = rfd->b_next;
893  lp->rfd_head = rfd->v_next;
894  rfd = lp->rfd_head;
895  }
896 
897  DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
898 
899  return 0;
900 }
901 
902 
903 static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
904 {
905  struct i596_cmd *ptr;
906 
907  while (lp->cmd_head != I596_NULL) {
908  ptr = lp->cmd_head;
909  lp->cmd_head = ptr->v_next;
910  lp->cmd_backlog--;
911 
912  switch ((ptr->command) & 0x7) {
913  case CmdTx:
914  {
915  struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
916  struct sk_buff *skb = tx_cmd->skb;
917 
918  dev_kfree_skb(skb);
919 
920  dev->stats.tx_errors++;
921  dev->stats.tx_aborted_errors++;
922 
923  ptr->v_next = ptr->b_next = I596_NULL;
924  tx_cmd->cmd.command = 0; /* Mark as free */
925  break;
926  }
927  default:
928  ptr->v_next = ptr->b_next = I596_NULL;
929  }
930  }
931 
932  wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
933  lp->scb.cmd = I596_NULL;
934 }
935 
936 static void i596_reset(struct net_device *dev, struct i596_private *lp,
937  int ioaddr)
938 {
939  unsigned long flags;
940 
941  DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
942 
943  spin_lock_irqsave (&lp->lock, flags);
944 
945  wait_cmd(dev,lp,100,"i596_reset timed out");
946 
947  netif_stop_queue(dev);
948 
949  lp->scb.command = CUC_ABORT | RX_ABORT;
950  CA(dev);
951 
952  /* wait for shutdown */
953  wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
954  spin_unlock_irqrestore (&lp->lock, flags);
955 
956  i596_cleanup_cmd(dev,lp);
957  i596_rx(dev);
958 
959  netif_start_queue(dev);
960  init_i596_mem(dev);
961 }
962 
963 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
964 {
965  struct i596_private *lp = dev->ml_priv;
966  int ioaddr = dev->base_addr;
967  unsigned long flags;
968 
969  DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
970 
971  cmd->status = 0;
972  cmd->command |= (CMD_EOL | CMD_INTR);
973  cmd->v_next = cmd->b_next = I596_NULL;
974 
975  spin_lock_irqsave (&lp->lock, flags);
976 
977  if (lp->cmd_head != I596_NULL) {
978  lp->cmd_tail->v_next = cmd;
979  lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
980  } else {
981  lp->cmd_head = cmd;
982  wait_cmd(dev,lp,100,"i596_add_cmd timed out");
983  lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
984  lp->scb.command = CUC_START;
985  CA(dev);
986  }
987  lp->cmd_tail = cmd;
988  lp->cmd_backlog++;
989 
990  spin_unlock_irqrestore (&lp->lock, flags);
991 
992  if (lp->cmd_backlog > max_cmd_backlog) {
993  unsigned long tickssofar = jiffies - lp->last_cmd;
994 
995  if (tickssofar < ticks_limit)
996  return;
997 
998  printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
999 
1000  i596_reset(dev, lp, ioaddr);
1001  }
1002 }
1003 
1004 static int i596_open(struct net_device *dev)
1005 {
1006  int res = 0;
1007 
1008  DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
1009 
1010  if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
1011  printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
1012  return -EAGAIN;
1013  }
1014 #ifdef ENABLE_MVME16x_NET
1015  if (MACH_IS_MVME16x) {
1016  if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) {
1017  res = -EAGAIN;
1018  goto err_irq_dev;
1019  }
1020  }
1021 #endif
1022  res = init_rx_bufs(dev);
1023  if (res)
1024  goto err_irq_56;
1025 
1026  netif_start_queue(dev);
1027 
1028  if (init_i596_mem(dev)) {
1029  res = -EAGAIN;
1030  goto err_queue;
1031  }
1032 
1033  return 0;
1034 
1035 err_queue:
1036  netif_stop_queue(dev);
1037  remove_rx_bufs(dev);
1038 err_irq_56:
1039 #ifdef ENABLE_MVME16x_NET
1040  free_irq(0x56, dev);
1041 err_irq_dev:
1042 #endif
1043  free_irq(dev->irq, dev);
1044 
1045  return res;
1046 }
1047 
1048 static void i596_tx_timeout (struct net_device *dev)
1049 {
1050  struct i596_private *lp = dev->ml_priv;
1051  int ioaddr = dev->base_addr;
1052 
1053  /* Transmitter timeout, serious problems. */
1054  DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
1055  dev->name));
1056 
1057  dev->stats.tx_errors++;
1058 
1059  /* Try to restart the adaptor */
1060  if (lp->last_restart == dev->stats.tx_packets) {
1061  DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
1062  /* Shutdown and restart */
1063  i596_reset (dev, lp, ioaddr);
1064  } else {
1065  /* Issue a channel attention signal */
1066  DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
1067  lp->scb.command = CUC_START | RX_START;
1068  CA (dev);
1069  lp->last_restart = dev->stats.tx_packets;
1070  }
1071 
1072  dev->trans_start = jiffies; /* prevent tx timeout */
1073  netif_wake_queue (dev);
1074 }
1075 
1076 static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1077 {
1078  struct i596_private *lp = dev->ml_priv;
1079  struct tx_cmd *tx_cmd;
1080  struct i596_tbd *tbd;
1081  short length = skb->len;
1082 
1083  DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
1084  dev->name, skb->len, skb->data));
1085 
1086  if (skb->len < ETH_ZLEN) {
1087  if (skb_padto(skb, ETH_ZLEN))
1088  return NETDEV_TX_OK;
1089  length = ETH_ZLEN;
1090  }
1091  netif_stop_queue(dev);
1092 
1093  tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1094  tbd = lp->tbds + lp->next_tx_cmd;
1095 
1096  if (tx_cmd->cmd.command) {
1097  printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
1098  dev->name);
1099  dev->stats.tx_dropped++;
1100 
1101  dev_kfree_skb(skb);
1102  } else {
1103  if (++lp->next_tx_cmd == TX_RING_SIZE)
1104  lp->next_tx_cmd = 0;
1105  tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
1106  tbd->next = I596_NULL;
1107 
1108  tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1109  tx_cmd->skb = skb;
1110 
1111  tx_cmd->pad = 0;
1112  tx_cmd->size = 0;
1113  tbd->pad = 0;
1114  tbd->size = EOF | length;
1115 
1116  tbd->data = WSWAPchar(virt_to_bus(skb->data));
1117 
1118 #ifdef __mc68000__
1119  cache_push(virt_to_phys(skb->data), length);
1120 #endif
1121  DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1122  i596_add_cmd(dev, &tx_cmd->cmd);
1123 
1124  dev->stats.tx_packets++;
1125  dev->stats.tx_bytes += length;
1126  }
1127 
1128  netif_start_queue(dev);
1129 
1130  return NETDEV_TX_OK;
1131 }
1132 
1133 static void print_eth(unsigned char *add, char *str)
1134 {
1135  printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1136  add, add + 6, add, add[12], add[13], str);
1137 }
1138 
1139 static int io = 0x300;
1140 static int irq = 10;
1141 
1142 static const struct net_device_ops i596_netdev_ops = {
1143  .ndo_open = i596_open,
1144  .ndo_stop = i596_close,
1145  .ndo_start_xmit = i596_start_xmit,
1146  .ndo_set_rx_mode = set_multicast_list,
1147  .ndo_tx_timeout = i596_tx_timeout,
1148  .ndo_change_mtu = eth_change_mtu,
1149  .ndo_set_mac_address = eth_mac_addr,
1150  .ndo_validate_addr = eth_validate_addr,
1151 };
1152 
1154 {
1155  struct net_device *dev;
1156  int i;
1157  struct i596_private *lp;
1158  char eth_addr[8];
1159  static int probed;
1160  int err;
1161 
1162  if (probed)
1163  return ERR_PTR(-ENODEV);
1164  probed++;
1165 
1166  dev = alloc_etherdev(0);
1167  if (!dev)
1168  return ERR_PTR(-ENOMEM);
1169 
1170  if (unit >= 0) {
1171  sprintf(dev->name, "eth%d", unit);
1173  } else {
1174  dev->base_addr = io;
1175  dev->irq = irq;
1176  }
1177 
1178 #ifdef ENABLE_MVME16x_NET
1179  if (MACH_IS_MVME16x) {
1181  printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
1182  err = -ENODEV;
1183  goto out;
1184  }
1185  memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */
1186  dev->base_addr = MVME_I596_BASE;
1187  dev->irq = (unsigned) MVME16x_IRQ_I596;
1188  goto found;
1189  }
1190 #endif
1191 #ifdef ENABLE_BVME6000_NET
1192  if (MACH_IS_BVME6000) {
1193  volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
1194  unsigned char msr = rtc[3];
1195  int i;
1196 
1197  rtc[3] |= 0x80;
1198  for (i = 0; i < 6; i++)
1199  eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */
1200  rtc[3] = msr;
1201  dev->base_addr = BVME_I596_BASE;
1202  dev->irq = (unsigned) BVME_IRQ_I596;
1203  goto found;
1204  }
1205 #endif
1206 #ifdef ENABLE_APRICOT
1207  {
1208  int checksum = 0;
1209  int ioaddr = 0x300;
1210 
1211  /* this is easy the ethernet interface can only be at 0x300 */
1212  /* first check nothing is already registered here */
1213 
1214  if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) {
1215  printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
1216  err = -EBUSY;
1217  goto out;
1218  }
1219 
1220  dev->base_addr = ioaddr;
1221 
1222  for (i = 0; i < 8; i++) {
1223  eth_addr[i] = inb(ioaddr + 8 + i);
1224  checksum += eth_addr[i];
1225  }
1226 
1227  /* checksum is a multiple of 0x100, got this wrong first time
1228  some machines have 0x100, some 0x200. The DOS driver doesn't
1229  even bother with the checksum.
1230  Some other boards trip the checksum.. but then appear as
1231  ether address 0. Trap these - AC */
1232 
1233  if ((checksum % 0x100) ||
1234  (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
1235  err = -ENODEV;
1236  goto out1;
1237  }
1238 
1239  dev->irq = 10;
1240  goto found;
1241  }
1242 #endif
1243  err = -ENODEV;
1244  goto out;
1245 
1246 found:
1248  if (!dev->mem_start) {
1249  err = -ENOMEM;
1250  goto out1;
1251  }
1252 
1253  DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1254 
1255  for (i = 0; i < 6; i++)
1256  DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1257 
1258  DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1259 
1261 
1262  /* The 82596-specific entries in the device structure. */
1263  dev->netdev_ops = &i596_netdev_ops;
1264  dev->watchdog_timeo = TX_TIMEOUT;
1265 
1266  dev->ml_priv = (void *)(dev->mem_start);
1267 
1268  lp = dev->ml_priv;
1269  DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), "
1270  "lp->scb at 0x%08lx\n",
1271  dev->name, (unsigned long)lp,
1272  sizeof(struct i596_private), (unsigned long)&lp->scb));
1273  memset((void *) lp, 0, sizeof(struct i596_private));
1274 
1275 #ifdef __mc68000__
1276  cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1277  cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1278  kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1279 #endif
1280  lp->scb.command = 0;
1281  lp->scb.cmd = I596_NULL;
1282  lp->scb.rfd = I596_NULL;
1283  spin_lock_init(&lp->lock);
1284 
1285  err = register_netdev(dev);
1286  if (err)
1287  goto out2;
1288  return dev;
1289 out2:
1290 #ifdef __mc68000__
1291  /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1292  * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1293  */
1294  kernel_set_cachemode((void *)(dev->mem_start), 4096,
1295  IOMAP_FULL_CACHING);
1296 #endif
1297  free_page ((u32)(dev->mem_start));
1298 out1:
1299 #ifdef ENABLE_APRICOT
1301 #endif
1302 out:
1303  free_netdev(dev);
1304  return ERR_PTR(err);
1305 }
1306 
1307 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1308 {
1309  struct net_device *dev = dev_id;
1310  struct i596_private *lp;
1311  short ioaddr;
1312  unsigned short status, ack_cmd = 0;
1313  int handled = 0;
1314 
1315 #ifdef ENABLE_BVME6000_NET
1316  if (MACH_IS_BVME6000) {
1317  if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
1318  i596_error(irq, dev_id);
1319  return IRQ_HANDLED;
1320  }
1321  }
1322 #endif
1323  if (dev == NULL) {
1324  printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
1325  return IRQ_NONE;
1326  }
1327 
1328  ioaddr = dev->base_addr;
1329  lp = dev->ml_priv;
1330 
1331  spin_lock (&lp->lock);
1332 
1333  wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1334  status = lp->scb.status;
1335 
1336  DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1337  dev->name, irq, status));
1338 
1339  ack_cmd = status & 0xf000;
1340 
1341  if ((status & 0x8000) || (status & 0x2000)) {
1342  struct i596_cmd *ptr;
1343 
1344  handled = 1;
1345  if ((status & 0x8000))
1346  DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
1347  if ((status & 0x2000))
1348  DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1349 
1350  while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
1351  ptr = lp->cmd_head;
1352 
1353  DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
1354  lp->cmd_head->status, lp->cmd_head->command));
1355  lp->cmd_head = ptr->v_next;
1356  lp->cmd_backlog--;
1357 
1358  switch ((ptr->command) & 0x7) {
1359  case CmdTx:
1360  {
1361  struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1362  struct sk_buff *skb = tx_cmd->skb;
1363 
1364  if ((ptr->status) & STAT_OK) {
1365  DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1366  } else {
1367  dev->stats.tx_errors++;
1368  if ((ptr->status) & 0x0020)
1369  dev->stats.collisions++;
1370  if (!((ptr->status) & 0x0040))
1371  dev->stats.tx_heartbeat_errors++;
1372  if ((ptr->status) & 0x0400)
1373  dev->stats.tx_carrier_errors++;
1374  if ((ptr->status) & 0x0800)
1375  dev->stats.collisions++;
1376  if ((ptr->status) & 0x1000)
1377  dev->stats.tx_aborted_errors++;
1378  }
1379 
1380  dev_kfree_skb_irq(skb);
1381 
1382  tx_cmd->cmd.command = 0; /* Mark free */
1383  break;
1384  }
1385  case CmdTDR:
1386  {
1387  unsigned short status = ((struct tdr_cmd *)ptr)->status;
1388 
1389  if (status & 0x8000) {
1390  DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
1391  } else {
1392  if (status & 0x4000)
1393  printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
1394  if (status & 0x2000)
1395  printk(KERN_ERR "%s: Termination problem.\n", dev->name);
1396  if (status & 0x1000)
1397  printk(KERN_ERR "%s: Short circuit.\n", dev->name);
1398 
1399  DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
1400  }
1401  break;
1402  }
1403  case CmdConfigure:
1404  case CmdMulticastList:
1405  /* Zap command so set_multicast_list() knows it is free */
1406  ptr->command = 0;
1407  break;
1408  }
1409  ptr->v_next = ptr->b_next = I596_NULL;
1410  lp->last_cmd = jiffies;
1411  }
1412 
1413  ptr = lp->cmd_head;
1414  while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1415  ptr->command &= 0x1fff;
1416  ptr = ptr->v_next;
1417  }
1418 
1419  if ((lp->cmd_head != I596_NULL))
1420  ack_cmd |= CUC_START;
1421  lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
1422  }
1423  if ((status & 0x1000) || (status & 0x4000)) {
1424  if ((status & 0x4000))
1425  DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
1426  i596_rx(dev);
1427  /* Only RX_START if stopped - RGH 07-07-96 */
1428  if (status & 0x1000) {
1429  if (netif_running(dev)) {
1430  DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1431  ack_cmd |= RX_START;
1432  dev->stats.rx_errors++;
1433  dev->stats.rx_fifo_errors++;
1434  rebuild_rx_bufs(dev);
1435  }
1436  }
1437  }
1438  wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1439  lp->scb.command = ack_cmd;
1440 
1441 #ifdef ENABLE_MVME16x_NET
1442  if (MACH_IS_MVME16x) {
1443  /* Ack the interrupt */
1444 
1445  volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1446 
1447  pcc2[0x2a] |= 0x08;
1448  }
1449 #endif
1450 #ifdef ENABLE_BVME6000_NET
1451  if (MACH_IS_BVME6000) {
1452  volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1453 
1454  *ethirq = 1;
1455  *ethirq = 3;
1456  }
1457 #endif
1458 #ifdef ENABLE_APRICOT
1459  (void) inb(ioaddr + 0x10);
1460  outb(4, ioaddr + 0xf);
1461 #endif
1462  CA(dev);
1463 
1464  DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1465 
1466  spin_unlock (&lp->lock);
1467  return IRQ_RETVAL(handled);
1468 }
1469 
1470 static int i596_close(struct net_device *dev)
1471 {
1472  struct i596_private *lp = dev->ml_priv;
1473  unsigned long flags;
1474 
1475  netif_stop_queue(dev);
1476 
1477  DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1478  dev->name, lp->scb.status));
1479 
1480  spin_lock_irqsave(&lp->lock, flags);
1481 
1482  wait_cmd(dev,lp,100,"close1 timed out");
1483  lp->scb.command = CUC_ABORT | RX_ABORT;
1484  CA(dev);
1485 
1486  wait_cmd(dev,lp,100,"close2 timed out");
1487 
1488  spin_unlock_irqrestore(&lp->lock, flags);
1489  DEB(DEB_STRUCT,i596_display_data(dev));
1490  i596_cleanup_cmd(dev,lp);
1491 
1492 #ifdef ENABLE_MVME16x_NET
1493  if (MACH_IS_MVME16x) {
1494  volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1495 
1496  /* Disable all ints */
1497  pcc2[0x28] = 1;
1498  pcc2[0x2a] = 0x40;
1499  pcc2[0x2b] = 0x40; /* Set snooping bits now! */
1500  }
1501 #endif
1502 #ifdef ENABLE_BVME6000_NET
1503  if (MACH_IS_BVME6000) {
1504  volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1505 
1506  *ethirq = 1;
1507  }
1508 #endif
1509 
1510 #ifdef ENABLE_MVME16x_NET
1511  free_irq(0x56, dev);
1512 #endif
1513  free_irq(dev->irq, dev);
1514  remove_rx_bufs(dev);
1515 
1516  return 0;
1517 }
1518 
1519 /*
1520  * Set or clear the multicast filter for this adaptor.
1521  */
1522 
1523 static void set_multicast_list(struct net_device *dev)
1524 {
1525  struct i596_private *lp = dev->ml_priv;
1526  int config = 0, cnt;
1527 
1528  DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1529  dev->name, netdev_mc_count(dev),
1530  dev->flags & IFF_PROMISC ? "ON" : "OFF",
1531  dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1532 
1533  if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
1534  return;
1535 
1536  if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1537  lp->cf_cmd.i596_config[8] |= 0x01;
1538  config = 1;
1539  }
1540  if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1541  lp->cf_cmd.i596_config[8] &= ~0x01;
1542  config = 1;
1543  }
1544  if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1545  lp->cf_cmd.i596_config[11] &= ~0x20;
1546  config = 1;
1547  }
1548  if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1549  lp->cf_cmd.i596_config[11] |= 0x20;
1550  config = 1;
1551  }
1552  if (config) {
1553  lp->cf_cmd.cmd.command = CmdConfigure;
1554  i596_add_cmd(dev, &lp->cf_cmd.cmd);
1555  }
1556 
1557  cnt = netdev_mc_count(dev);
1558  if (cnt > MAX_MC_CNT)
1559  {
1560  cnt = MAX_MC_CNT;
1561  printk(KERN_ERR "%s: Only %d multicast addresses supported",
1562  dev->name, cnt);
1563  }
1564 
1565  if (!netdev_mc_empty(dev)) {
1566  struct netdev_hw_addr *ha;
1567  unsigned char *cp;
1568  struct mc_cmd *cmd;
1569 
1570  if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
1571  return;
1572  cmd = &lp->mc_cmd;
1573  cmd->cmd.command = CmdMulticastList;
1574  cmd->mc_cnt = cnt * ETH_ALEN;
1575  cp = cmd->mc_addrs;
1576  netdev_for_each_mc_addr(ha, dev) {
1577  if (!cnt--)
1578  break;
1579  memcpy(cp, ha->addr, ETH_ALEN);
1580  if (i596_debug > 1)
1581  DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
1582  dev->name, cp));
1583  cp += ETH_ALEN;
1584  }
1585  i596_add_cmd(dev, &cmd->cmd);
1586  }
1587 }
1588 
1589 #ifdef MODULE
1590 static struct net_device *dev_82596;
1591 
1592 #ifdef ENABLE_APRICOT
1593 module_param(irq, int, 0);
1594 MODULE_PARM_DESC(irq, "Apricot IRQ number");
1595 #endif
1596 
1597 static int debug = -1;
1598 module_param(debug, int, 0);
1599 MODULE_PARM_DESC(debug, "i82596 debug mask");
1600 
1601 int __init init_module(void)
1602 {
1603  if (debug >= 0)
1604  i596_debug = debug;
1605  dev_82596 = i82596_probe(-1);
1606  if (IS_ERR(dev_82596))
1607  return PTR_ERR(dev_82596);
1608  return 0;
1609 }
1610 
1611 void __exit cleanup_module(void)
1612 {
1613  unregister_netdev(dev_82596);
1614 #ifdef __mc68000__
1615  /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1616  * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1617  */
1618 
1619  kernel_set_cachemode((void *)(dev_82596->mem_start), 4096,
1620  IOMAP_FULL_CACHING);
1621 #endif
1622  free_page ((u32)(dev_82596->mem_start));
1623 #ifdef ENABLE_APRICOT
1624  /* If we don't do this, we can't re-insmod it later. */
1626 #endif
1627  free_netdev(dev_82596);
1628 }
1629 
1630 #endif /* MODULE */