Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
lib82596.c
Go to the documentation of this file.
1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2  munged into HPPA boxen .
3 
4  This driver is based upon 82596.c, original credits are below...
5  but there were too many hoops which HP wants jumped through to
6  keep this code in there in a sane manner.
7 
8  3 primary sources of the mess --
9  1) hppa needs *lots* of cacheline flushing to keep this kind of
10  MMIO running.
11 
12  2) The 82596 needs to see all of its pointers as their physical
13  address. Thus virt_to_bus/bus_to_virt are *everywhere*.
14 
15  3) The implementation HP is using seems to be significantly pickier
16  about when and how the command and RX units are started. some
17  command ordering was changed.
18 
19  Examination of the mach driver leads one to believe that there
20  might be a saner way to pull this off... anyone who feels like a
21  full rewrite can be my guest.
22 
23  Split 02/13/2000 Sam Creasey ([email protected])
24 
25  02/01/2000 Initial modifications for parisc by Helge Deller ([email protected])
26  03/02/2000 changes for better/correct(?) cache-flushing (deller)
27 */
28 
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
30 /*
31  Based on Apricot.c
32  Written 1994 by Mark Evans.
33  This driver is for the Apricot 82596 bus-master interface
34 
35  Modularised 12/94 Mark Evans
36 
37 
38  Modified to support the 82596 ethernet chips on 680x0 VME boards.
39  by Richard Hirst <[email protected]>
40  Renamed to be 82596.c
41 
42  980825: Changed to receive directly in to sk_buffs which are
43  allocated at open() time. Eliminates copy on incoming frames
44  (small ones are still copied). Shared data now held in a
45  non-cached page, so we can run on 68060 in copyback mode.
46 
47  TBD:
48  * look at deferring rx frames rather than discarding (as per tulip)
49  * handle tx ring full as per tulip
50  * performance test to tune rx_copybreak
51 
52  Most of my modifications relate to the braindead big-endian
53  implementation by Intel. When the i596 is operating in
54  'big-endian' mode, it thinks a 32 bit value of 0x12345678
55  should be stored as 0x56781234. This is a real pain, when
56  you have linked lists which are shared by the 680x0 and the
57  i596.
58 
59  Driver skeleton
60  Written 1993 by Donald Becker.
61  Copyright 1993 United States Government as represented by the Director,
62  National Security Agency. This software may only be used and distributed
63  according to the terms of the GNU General Public License as modified by SRC,
64  incorporated herein by reference.
65 
66  The author may be reached as [email protected], or C/O
67  Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68 
69  */
70 
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/errno.h>
75 #include <linux/ioport.h>
76 #include <linux/interrupt.h>
77 #include <linux/delay.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/skbuff.h>
81 #include <linux/init.h>
82 #include <linux/types.h>
83 #include <linux/bitops.h>
84 #include <linux/dma-mapping.h>
85 #include <linux/io.h>
86 #include <linux/irq.h>
87 #include <linux/gfp.h>
88 
89 /* DEBUG flags
90  */
91 
92 #define DEB_INIT 0x0001
93 #define DEB_PROBE 0x0002
94 #define DEB_SERIOUS 0x0004
95 #define DEB_ERRORS 0x0008
96 #define DEB_MULTI 0x0010
97 #define DEB_TDR 0x0020
98 #define DEB_OPEN 0x0040
99 #define DEB_RESET 0x0080
100 #define DEB_ADDCMD 0x0100
101 #define DEB_STATUS 0x0200
102 #define DEB_STARTTX 0x0400
103 #define DEB_RXADDR 0x0800
104 #define DEB_TXADDR 0x1000
105 #define DEB_RXFRAME 0x2000
106 #define DEB_INTS 0x4000
107 #define DEB_STRUCT 0x8000
108 #define DEB_ANY 0xffff
109 
110 
111 #define DEB(x, y) if (i596_debug & (x)) { y; }
112 
113 
114 /*
115  * The MPU_PORT command allows direct access to the 82596. With PORT access
116  * the following commands are available (p5-18). The 32-bit port command
117  * must be word-swapped with the most significant word written first.
118  * This only applies to VME boards.
119  */
120 #define PORT_RESET 0x00 /* reset 82596 */
121 #define PORT_SELFTEST 0x01 /* selftest */
122 #define PORT_ALTSCP 0x02 /* alternate SCB address */
123 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
124 
125 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
126 
127 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
128  * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
129  */
130 static int rx_copybreak = 100;
131 
132 #define PKT_BUF_SZ 1536
133 #define MAX_MC_CNT 64
134 
135 #define ISCP_BUSY 0x0001
136 
137 #define I596_NULL ((u32)0xffffffff)
138 
139 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
140 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
141 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
142 
143 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
144 
145 enum commands {
147  CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
148 };
149 
150 #define STAT_C 0x8000 /* Set to 0 after execution */
151 #define STAT_B 0x4000 /* Command being executed */
152 #define STAT_OK 0x2000 /* Command executed ok */
153 #define STAT_A 0x1000 /* Command aborted */
154 
155 #define CUC_START 0x0100
156 #define CUC_RESUME 0x0200
157 #define CUC_SUSPEND 0x0300
158 #define CUC_ABORT 0x0400
159 #define RX_START 0x0010
160 #define RX_RESUME 0x0020
161 #define RX_SUSPEND 0x0030
162 #define RX_ABORT 0x0040
163 
164 #define TX_TIMEOUT (HZ/20)
165 
166 
167 struct i596_reg {
168  unsigned short porthi;
169  unsigned short portlo;
171 };
172 
173 #define EOF 0x8000
174 #define SIZE_MASK 0x3fff
175 
176 struct i596_tbd {
177  unsigned short size;
178  unsigned short pad;
181  u32 cache_pad[5]; /* Total 32 bytes... */
182 };
183 
184 /* The command structure has two 'next' pointers; v_next is the address of
185  * the next command as seen by the CPU, b_next is the address of the next
186  * command as seen by the 82596. The b_next pointer, as used by the 82596
187  * always references the status field of the next command, rather than the
188  * v_next field, because the 82596 is unaware of v_next. It may seem more
189  * logical to put v_next at the end of the structure, but we cannot do that
190  * because the 82596 expects other fields to be there, depending on command
191  * type.
192  */
193 
194 struct i596_cmd {
195  struct i596_cmd *v_next; /* Address from CPUs viewpoint */
196  unsigned short status;
197  unsigned short command;
198  u32 b_next; /* Address from i596 viewpoint */
199 };
200 
201 struct tx_cmd {
202  struct i596_cmd cmd;
204  unsigned short size;
205  unsigned short pad;
206  struct sk_buff *skb; /* So we can free it after tx */
208 #ifdef __LP64__
209  u32 cache_pad[6]; /* Total 64 bytes... */
210 #else
211  u32 cache_pad[1]; /* Total 32 bytes... */
212 #endif
213 };
214 
215 struct tdr_cmd {
216  struct i596_cmd cmd;
217  unsigned short status;
218  unsigned short pad;
219 };
220 
221 struct mc_cmd {
222  struct i596_cmd cmd;
223  short mc_cnt;
224  char mc_addrs[MAX_MC_CNT*6];
225 };
226 
227 struct sa_cmd {
228  struct i596_cmd cmd;
229  char eth_addr[8];
230 };
231 
232 struct cf_cmd {
233  struct i596_cmd cmd;
234  char i596_config[16];
235 };
236 
237 struct i596_rfd {
238  unsigned short stat;
239  unsigned short cmd;
240  u32 b_next; /* Address from i596 viewpoint */
242  unsigned short count;
243  unsigned short size;
244  struct i596_rfd *v_next; /* Address from CPUs viewpoint */
245  struct i596_rfd *v_prev;
246 #ifndef __LP64__
247  u32 cache_pad[2]; /* Total 32 bytes... */
248 #endif
249 };
250 
251 struct i596_rbd {
252  /* hardware data */
253  unsigned short count;
254  unsigned short zero1;
256  u32 b_data; /* Address from i596 viewpoint */
257  unsigned short size;
258  unsigned short zero2;
259  /* driver data */
260  struct sk_buff *skb;
261  struct i596_rbd *v_next;
262  u32 b_addr; /* This rbd addr from i596 view */
263  unsigned char *v_data; /* Address from CPUs viewpoint */
264  /* Total 32 bytes... */
265 #ifdef __LP64__
266  u32 cache_pad[4];
267 #endif
268 };
269 
270 /* These values as chosen so struct i596_dma fits in one page... */
271 
272 #define TX_RING_SIZE 32
273 #define RX_RING_SIZE 16
274 
275 struct i596_scb {
276  unsigned short status;
277  unsigned short command;
286  unsigned short t_on;
287  unsigned short t_off;
288 };
289 
290 struct i596_iscp {
293 };
294 
295 struct i596_scp {
299 };
300 
301 struct i596_dma {
302  struct i596_scp scp __attribute__((aligned(32)));
303  volatile struct i596_iscp iscp __attribute__((aligned(32)));
304  volatile struct i596_scb scb __attribute__((aligned(32)));
305  struct sa_cmd sa_cmd __attribute__((aligned(32)));
306  struct cf_cmd cf_cmd __attribute__((aligned(32)));
307  struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
308  struct mc_cmd mc_cmd __attribute__((aligned(32)));
309  struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
310  struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
311  struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
312  struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
313 };
314 
315 struct i596_private {
316  struct i596_dma *dma;
319  struct i596_rfd *rfd_head;
320  struct i596_rbd *rbd_head;
321  struct i596_cmd *cmd_tail;
322  struct i596_cmd *cmd_head;
323  int cmd_backlog;
325  int next_tx_cmd;
326  int options;
327  spinlock_t lock; /* serialize access to chip */
330  void __iomem *ca;
331 };
332 
333 static const char init_setup[] =
334 {
335  0x8E, /* length, prefetch on */
336  0xC8, /* fifo to 8, monitor off */
337  0x80, /* don't save bad frames */
338  0x2E, /* No source address insertion, 8 byte preamble */
339  0x00, /* priority and backoff defaults */
340  0x60, /* interframe spacing */
341  0x00, /* slot time LSB */
342  0xf2, /* slot time and retries */
343  0x00, /* promiscuous mode */
344  0x00, /* collision detect */
345  0x40, /* minimum frame length */
346  0xff,
347  0x00,
348  0x7f /* *multi IA */ };
349 
350 static int i596_open(struct net_device *dev);
351 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
352 static irqreturn_t i596_interrupt(int irq, void *dev_id);
353 static int i596_close(struct net_device *dev);
354 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
355 static void i596_tx_timeout (struct net_device *dev);
356 static void print_eth(unsigned char *buf, char *str);
357 static void set_multicast_list(struct net_device *dev);
358 static inline void ca(struct net_device *dev);
359 static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
360 
361 static int rx_ring_size = RX_RING_SIZE;
362 static int ticks_limit = 100;
363 static int max_cmd_backlog = TX_RING_SIZE-1;
364 
365 #ifdef CONFIG_NET_POLL_CONTROLLER
366 static void i596_poll_controller(struct net_device *dev);
367 #endif
368 
369 
370 static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
371 {
372  DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
373  while (--delcnt && dma->iscp.stat) {
374  udelay(10);
375  DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
376  }
377  if (!delcnt) {
378  printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
379  dev->name, str, SWAP16(dma->iscp.stat));
380  return -1;
381  } else
382  return 0;
383 }
384 
385 
386 static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
387 {
388  DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
389  while (--delcnt && dma->scb.command) {
390  udelay(10);
391  DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
392  }
393  if (!delcnt) {
394  printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
395  dev->name, str,
396  SWAP16(dma->scb.status),
397  SWAP16(dma->scb.command));
398  return -1;
399  } else
400  return 0;
401 }
402 
403 
404 static void i596_display_data(struct net_device *dev)
405 {
406  struct i596_private *lp = netdev_priv(dev);
407  struct i596_dma *dma = lp->dma;
408  struct i596_cmd *cmd;
409  struct i596_rfd *rfd;
410  struct i596_rbd *rbd;
411 
412  printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
413  &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
414  printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
415  &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
416  printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
417  " .cmd = %08x, .rfd = %08x\n",
418  &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
419  SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
420  printk(KERN_DEBUG " errors: crc %x, align %x, resource %x,"
421  " over %x, rcvdt %x, short %x\n",
422  SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
423  SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
424  SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
425  cmd = lp->cmd_head;
426  while (cmd != NULL) {
428  "cmd at %p, .status = %04x, .command = %04x,"
429  " .b_next = %08x\n",
430  cmd, SWAP16(cmd->status), SWAP16(cmd->command),
431  SWAP32(cmd->b_next));
432  cmd = cmd->v_next;
433  }
434  rfd = lp->rfd_head;
435  printk(KERN_DEBUG "rfd_head = %p\n", rfd);
436  do {
438  " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
439  " count %04x\n",
440  rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
441  SWAP32(rfd->b_next), SWAP32(rfd->rbd),
442  SWAP16(rfd->count));
443  rfd = rfd->v_next;
444  } while (rfd != lp->rfd_head);
445  rbd = lp->rbd_head;
446  printk(KERN_DEBUG "rbd_head = %p\n", rbd);
447  do {
449  " %p .count %04x, b_next %08x, b_data %08x,"
450  " size %04x\n",
451  rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
452  SWAP32(rbd->b_data), SWAP16(rbd->size));
453  rbd = rbd->v_next;
454  } while (rbd != lp->rbd_head);
455  DMA_INV(dev, dma, sizeof(struct i596_dma));
456 }
457 
458 
459 #define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
460 
461 static inline int init_rx_bufs(struct net_device *dev)
462 {
463  struct i596_private *lp = netdev_priv(dev);
464  struct i596_dma *dma = lp->dma;
465  int i;
466  struct i596_rfd *rfd;
467  struct i596_rbd *rbd;
468 
469  /* First build the Receive Buffer Descriptor List */
470 
471  for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
473  struct sk_buff *skb;
474 
475  skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
476  if (skb == NULL)
477  return -1;
478  dma_addr = dma_map_single(dev->dev.parent, skb->data,
480  rbd->v_next = rbd+1;
481  rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
482  rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
483  rbd->skb = skb;
484  rbd->v_data = skb->data;
485  rbd->b_data = SWAP32(dma_addr);
486  rbd->size = SWAP16(PKT_BUF_SZ);
487  }
488  lp->rbd_head = dma->rbds;
489  rbd = dma->rbds + rx_ring_size - 1;
490  rbd->v_next = dma->rbds;
491  rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
492 
493  /* Now build the Receive Frame Descriptor List */
494 
495  for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
496  rfd->rbd = I596_NULL;
497  rfd->v_next = rfd+1;
498  rfd->v_prev = rfd-1;
499  rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
500  rfd->cmd = SWAP16(CMD_FLEX);
501  }
502  lp->rfd_head = dma->rfds;
503  dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
504  rfd = dma->rfds;
505  rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
506  rfd->v_prev = dma->rfds + rx_ring_size - 1;
507  rfd = dma->rfds + rx_ring_size - 1;
508  rfd->v_next = dma->rfds;
509  rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
510  rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
511 
512  DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
513  return 0;
514 }
515 
516 static inline void remove_rx_bufs(struct net_device *dev)
517 {
518  struct i596_private *lp = netdev_priv(dev);
519  struct i596_rbd *rbd;
520  int i;
521 
522  for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
523  if (rbd->skb == NULL)
524  break;
525  dma_unmap_single(dev->dev.parent,
526  (dma_addr_t)SWAP32(rbd->b_data),
528  dev_kfree_skb(rbd->skb);
529  }
530 }
531 
532 
533 static void rebuild_rx_bufs(struct net_device *dev)
534 {
535  struct i596_private *lp = netdev_priv(dev);
536  struct i596_dma *dma = lp->dma;
537  int i;
538 
539  /* Ensure rx frame/buffer descriptors are tidy */
540 
541  for (i = 0; i < rx_ring_size; i++) {
542  dma->rfds[i].rbd = I596_NULL;
543  dma->rfds[i].cmd = SWAP16(CMD_FLEX);
544  }
545  dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
546  lp->rfd_head = dma->rfds;
547  dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
548  lp->rbd_head = dma->rbds;
549  dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
550 
551  DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
552 }
553 
554 
555 static int init_i596_mem(struct net_device *dev)
556 {
557  struct i596_private *lp = netdev_priv(dev);
558  struct i596_dma *dma = lp->dma;
559  unsigned long flags;
560 
561  mpu_port(dev, PORT_RESET, 0);
562  udelay(100); /* Wait 100us - seems to help */
563 
564  /* change the scp address */
565 
566  lp->last_cmd = jiffies;
567 
568  dma->scp.sysbus = SYSBUS;
569  dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
570  dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
571  dma->iscp.stat = SWAP32(ISCP_BUSY);
572  lp->cmd_backlog = 0;
573 
574  lp->cmd_head = NULL;
575  dma->scb.cmd = I596_NULL;
576 
577  DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
578 
579  DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
580  DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
581  DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
582 
583  mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
584  ca(dev);
585  if (wait_istat(dev, dma, 1000, "initialization timed out"))
586  goto failed;
588  "%s: i82596 initialization successful\n",
589  dev->name));
590 
591  if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
592  printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
593  goto failed;
594  }
595 
596  /* Ensure rx frame/buffer descriptors are tidy */
597  rebuild_rx_bufs(dev);
598 
599  dma->scb.command = 0;
600  DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
601 
603  "%s: queuing CmdConfigure\n", dev->name));
604  memcpy(dma->cf_cmd.i596_config, init_setup, 14);
605  dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
606  DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
607  i596_add_cmd(dev, &dma->cf_cmd.cmd);
608 
609  DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
610  memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
611  dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
612  DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
613  i596_add_cmd(dev, &dma->sa_cmd.cmd);
614 
615  DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
616  dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
617  DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
618  i596_add_cmd(dev, &dma->tdr_cmd.cmd);
619 
620  spin_lock_irqsave (&lp->lock, flags);
621 
622  if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
623  spin_unlock_irqrestore (&lp->lock, flags);
624  goto failed_free_irq;
625  }
626  DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
627  dma->scb.command = SWAP16(RX_START);
628  dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
629  DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
630 
631  ca(dev);
632 
633  spin_unlock_irqrestore (&lp->lock, flags);
634  if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
635  goto failed_free_irq;
637  "%s: Receive unit started OK\n", dev->name));
638  return 0;
639 
640 failed_free_irq:
641  free_irq(dev->irq, dev);
642 failed:
643  printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
644  mpu_port(dev, PORT_RESET, 0);
645  return -1;
646 }
647 
648 
649 static inline int i596_rx(struct net_device *dev)
650 {
651  struct i596_private *lp = netdev_priv(dev);
652  struct i596_rfd *rfd;
653  struct i596_rbd *rbd;
654  int frames = 0;
655 
657  "i596_rx(), rfd_head %p, rbd_head %p\n",
658  lp->rfd_head, lp->rbd_head));
659 
660 
661  rfd = lp->rfd_head; /* Ref next frame to check */
662 
663  DMA_INV(dev, rfd, sizeof(struct i596_rfd));
664  while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
665  if (rfd->rbd == I596_NULL)
666  rbd = NULL;
667  else if (rfd->rbd == lp->rbd_head->b_addr) {
668  rbd = lp->rbd_head;
669  DMA_INV(dev, rbd, sizeof(struct i596_rbd));
670  } else {
671  printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
672  /* XXX Now what? */
673  rbd = NULL;
674  }
676  " rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
677  rfd, rfd->rbd, rfd->stat));
678 
679  if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
680  /* a good frame */
681  int pkt_len = SWAP16(rbd->count) & 0x3fff;
682  struct sk_buff *skb = rbd->skb;
683  int rx_in_place = 0;
684 
685  DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
686  frames++;
687 
688  /* Check if the packet is long enough to just accept
689  * without copying to a properly sized skbuff.
690  */
691 
692  if (pkt_len > rx_copybreak) {
693  struct sk_buff *newskb;
695 
696  dma_unmap_single(dev->dev.parent,
697  (dma_addr_t)SWAP32(rbd->b_data),
699  /* Get fresh skbuff to replace filled one. */
700  newskb = netdev_alloc_skb_ip_align(dev,
701  PKT_BUF_SZ);
702  if (newskb == NULL) {
703  skb = NULL; /* drop pkt */
704  goto memory_squeeze;
705  }
706 
707  /* Pass up the skb already on the Rx ring. */
708  skb_put(skb, pkt_len);
709  rx_in_place = 1;
710  rbd->skb = newskb;
711  dma_addr = dma_map_single(dev->dev.parent,
712  newskb->data,
713  PKT_BUF_SZ,
715  rbd->v_data = newskb->data;
716  rbd->b_data = SWAP32(dma_addr);
717  DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
718  } else
719  skb = netdev_alloc_skb_ip_align(dev, pkt_len);
720 memory_squeeze:
721  if (skb == NULL) {
722  /* XXX tulip.c can defer packets here!! */
724  "%s: i596_rx Memory squeeze, dropping packet.\n",
725  dev->name);
726  dev->stats.rx_dropped++;
727  } else {
728  if (!rx_in_place) {
729  /* 16 byte align the data fields */
730  dma_sync_single_for_cpu(dev->dev.parent,
731  (dma_addr_t)SWAP32(rbd->b_data),
733  memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
734  dma_sync_single_for_device(dev->dev.parent,
735  (dma_addr_t)SWAP32(rbd->b_data),
737  }
738  skb->len = pkt_len;
739  skb->protocol = eth_type_trans(skb, dev);
740  netif_rx(skb);
741  dev->stats.rx_packets++;
742  dev->stats.rx_bytes += pkt_len;
743  }
744  } else {
746  "%s: Error, rfd.stat = 0x%04x\n",
747  dev->name, rfd->stat));
748  dev->stats.rx_errors++;
749  if (rfd->stat & SWAP16(0x0100))
750  dev->stats.collisions++;
751  if (rfd->stat & SWAP16(0x8000))
752  dev->stats.rx_length_errors++;
753  if (rfd->stat & SWAP16(0x0001))
754  dev->stats.rx_over_errors++;
755  if (rfd->stat & SWAP16(0x0002))
756  dev->stats.rx_fifo_errors++;
757  if (rfd->stat & SWAP16(0x0004))
758  dev->stats.rx_frame_errors++;
759  if (rfd->stat & SWAP16(0x0008))
760  dev->stats.rx_crc_errors++;
761  if (rfd->stat & SWAP16(0x0010))
762  dev->stats.rx_length_errors++;
763  }
764 
765  /* Clear the buffer descriptor count and EOF + F flags */
766 
767  if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
768  rbd->count = 0;
769  lp->rbd_head = rbd->v_next;
770  DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
771  }
772 
773  /* Tidy the frame descriptor, marking it as end of list */
774 
775  rfd->rbd = I596_NULL;
776  rfd->stat = 0;
777  rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
778  rfd->count = 0;
779 
780  /* Update record of next frame descriptor to process */
781 
782  lp->dma->scb.rfd = rfd->b_next;
783  lp->rfd_head = rfd->v_next;
784  DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
785 
786  /* Remove end-of-list from old end descriptor */
787 
788  rfd->v_prev->cmd = SWAP16(CMD_FLEX);
789  DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
790  rfd = lp->rfd_head;
791  DMA_INV(dev, rfd, sizeof(struct i596_rfd));
792  }
793 
794  DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
795 
796  return 0;
797 }
798 
799 
800 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
801 {
802  struct i596_cmd *ptr;
803 
804  while (lp->cmd_head != NULL) {
805  ptr = lp->cmd_head;
806  lp->cmd_head = ptr->v_next;
807  lp->cmd_backlog--;
808 
809  switch (SWAP16(ptr->command) & 0x7) {
810  case CmdTx:
811  {
812  struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
813  struct sk_buff *skb = tx_cmd->skb;
814  dma_unmap_single(dev->dev.parent,
815  tx_cmd->dma_addr,
816  skb->len, DMA_TO_DEVICE);
817 
818  dev_kfree_skb(skb);
819 
820  dev->stats.tx_errors++;
821  dev->stats.tx_aborted_errors++;
822 
823  ptr->v_next = NULL;
824  ptr->b_next = I596_NULL;
825  tx_cmd->cmd.command = 0; /* Mark as free */
826  break;
827  }
828  default:
829  ptr->v_next = NULL;
830  ptr->b_next = I596_NULL;
831  }
832  DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
833  }
834 
835  wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
836  lp->dma->scb.cmd = I596_NULL;
837  DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
838 }
839 
840 
841 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
842 {
843  unsigned long flags;
844 
845  DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
846 
847  spin_lock_irqsave (&lp->lock, flags);
848 
849  wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
850 
851  netif_stop_queue(dev);
852 
853  /* FIXME: this command might cause an lpmc */
854  lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
855  DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
856  ca(dev);
857 
858  /* wait for shutdown */
859  wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
860  spin_unlock_irqrestore (&lp->lock, flags);
861 
862  i596_cleanup_cmd(dev, lp);
863  i596_rx(dev);
864 
865  netif_start_queue(dev);
866  init_i596_mem(dev);
867 }
868 
869 
870 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
871 {
872  struct i596_private *lp = netdev_priv(dev);
873  struct i596_dma *dma = lp->dma;
874  unsigned long flags;
875 
876  DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
877  lp->cmd_head));
878 
879  cmd->status = 0;
880  cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
881  cmd->v_next = NULL;
882  cmd->b_next = I596_NULL;
883  DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
884 
885  spin_lock_irqsave (&lp->lock, flags);
886 
887  if (lp->cmd_head != NULL) {
888  lp->cmd_tail->v_next = cmd;
889  lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
890  DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
891  } else {
892  lp->cmd_head = cmd;
893  wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
894  dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
895  dma->scb.command = SWAP16(CUC_START);
896  DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
897  ca(dev);
898  }
899  lp->cmd_tail = cmd;
900  lp->cmd_backlog++;
901 
902  spin_unlock_irqrestore (&lp->lock, flags);
903 
904  if (lp->cmd_backlog > max_cmd_backlog) {
905  unsigned long tickssofar = jiffies - lp->last_cmd;
906 
907  if (tickssofar < ticks_limit)
908  return;
909 
911  "%s: command unit timed out, status resetting.\n",
912  dev->name);
913 #if 1
914  i596_reset(dev, lp);
915 #endif
916  }
917 }
918 
919 static int i596_open(struct net_device *dev)
920 {
922  "%s: i596_open() irq %d.\n", dev->name, dev->irq));
923 
924  if (init_rx_bufs(dev)) {
925  printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
926  return -EAGAIN;
927  }
928  if (init_i596_mem(dev)) {
929  printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
930  goto out_remove_rx_bufs;
931  }
932  netif_start_queue(dev);
933 
934  return 0;
935 
936 out_remove_rx_bufs:
937  remove_rx_bufs(dev);
938  return -EAGAIN;
939 }
940 
941 static void i596_tx_timeout (struct net_device *dev)
942 {
943  struct i596_private *lp = netdev_priv(dev);
944 
945  /* Transmitter timeout, serious problems. */
947  "%s: transmit timed out, status resetting.\n",
948  dev->name));
949 
950  dev->stats.tx_errors++;
951 
952  /* Try to restart the adaptor */
953  if (lp->last_restart == dev->stats.tx_packets) {
954  DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
955  /* Shutdown and restart */
956  i596_reset (dev, lp);
957  } else {
958  /* Issue a channel attention signal */
959  DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
960  lp->dma->scb.command = SWAP16(CUC_START | RX_START);
961  DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
962  ca (dev);
963  lp->last_restart = dev->stats.tx_packets;
964  }
965 
966  dev->trans_start = jiffies; /* prevent tx timeout */
967  netif_wake_queue (dev);
968 }
969 
970 
971 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
972 {
973  struct i596_private *lp = netdev_priv(dev);
974  struct tx_cmd *tx_cmd;
975  struct i596_tbd *tbd;
976  short length = skb->len;
977 
979  "%s: i596_start_xmit(%x,%p) called\n",
980  dev->name, skb->len, skb->data));
981 
982  if (length < ETH_ZLEN) {
983  if (skb_padto(skb, ETH_ZLEN))
984  return NETDEV_TX_OK;
985  length = ETH_ZLEN;
986  }
987 
988  netif_stop_queue(dev);
989 
990  tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
991  tbd = lp->dma->tbds + lp->next_tx_cmd;
992 
993  if (tx_cmd->cmd.command) {
995  "%s: xmit ring full, dropping packet.\n",
996  dev->name));
997  dev->stats.tx_dropped++;
998 
999  dev_kfree_skb(skb);
1000  } else {
1001  if (++lp->next_tx_cmd == TX_RING_SIZE)
1002  lp->next_tx_cmd = 0;
1003  tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1004  tbd->next = I596_NULL;
1005 
1006  tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1007  tx_cmd->skb = skb;
1008 
1009  tx_cmd->pad = 0;
1010  tx_cmd->size = 0;
1011  tbd->pad = 0;
1012  tbd->size = SWAP16(EOF | length);
1013 
1014  tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1015  skb->len, DMA_TO_DEVICE);
1016  tbd->data = SWAP32(tx_cmd->dma_addr);
1017 
1018  DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1019  DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1020  DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1021  i596_add_cmd(dev, &tx_cmd->cmd);
1022 
1023  dev->stats.tx_packets++;
1024  dev->stats.tx_bytes += length;
1025  }
1026 
1027  netif_start_queue(dev);
1028 
1029  return NETDEV_TX_OK;
1030 }
1031 
1032 static void print_eth(unsigned char *add, char *str)
1033 {
1034  printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1035  add, add + 6, add, add[12], add[13], str);
1036 }
1037 static const struct net_device_ops i596_netdev_ops = {
1038  .ndo_open = i596_open,
1039  .ndo_stop = i596_close,
1040  .ndo_start_xmit = i596_start_xmit,
1041  .ndo_set_rx_mode = set_multicast_list,
1042  .ndo_tx_timeout = i596_tx_timeout,
1043  .ndo_change_mtu = eth_change_mtu,
1044  .ndo_validate_addr = eth_validate_addr,
1045  .ndo_set_mac_address = eth_mac_addr,
1046 #ifdef CONFIG_NET_POLL_CONTROLLER
1047  .ndo_poll_controller = i596_poll_controller,
1048 #endif
1049 };
1050 
1051 static int __devinit i82596_probe(struct net_device *dev)
1052 {
1053  int i;
1054  struct i596_private *lp = netdev_priv(dev);
1055  struct i596_dma *dma;
1056 
1057  /* This lot is ensure things have been cache line aligned. */
1058  BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1059  BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
1060  BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
1061  BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1062 #ifndef __LP64__
1063  BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1064 #endif
1065 
1066  if (!dev->base_addr || !dev->irq)
1067  return -ENODEV;
1068 
1069  dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1070  sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1071  if (!dma) {
1072  printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1073  return -ENOMEM;
1074  }
1075 
1076  dev->netdev_ops = &i596_netdev_ops;
1077  dev->watchdog_timeo = TX_TIMEOUT;
1078 
1079  memset(dma, 0, sizeof(struct i596_dma));
1080  lp->dma = dma;
1081 
1082  dma->scb.command = 0;
1083  dma->scb.cmd = I596_NULL;
1084  dma->scb.rfd = I596_NULL;
1085  spin_lock_init(&lp->lock);
1086 
1087  DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1088 
1089  i = register_netdev(dev);
1090  if (i) {
1091  DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1092  (void *)dma, lp->dma_addr);
1093  return i;
1094  }
1095 
1096  DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1097  dev->name, dev->base_addr, dev->dev_addr,
1098  dev->irq));
1100  "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1101  dev->name, dma, (int)sizeof(struct i596_dma),
1102  &dma->scb));
1103 
1104  return 0;
1105 }
1106 
1107 #ifdef CONFIG_NET_POLL_CONTROLLER
1108 static void i596_poll_controller(struct net_device *dev)
1109 {
1110  disable_irq(dev->irq);
1111  i596_interrupt(dev->irq, dev);
1112  enable_irq(dev->irq);
1113 }
1114 #endif
1115 
1116 static irqreturn_t i596_interrupt(int irq, void *dev_id)
1117 {
1118  struct net_device *dev = dev_id;
1119  struct i596_private *lp;
1120  struct i596_dma *dma;
1121  unsigned short status, ack_cmd = 0;
1122 
1123  lp = netdev_priv(dev);
1124  dma = lp->dma;
1125 
1126  spin_lock (&lp->lock);
1127 
1128  wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1129  status = SWAP16(dma->scb.status);
1130 
1132  "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1133  dev->name, dev->irq, status));
1134 
1135  ack_cmd = status & 0xf000;
1136 
1137  if (!ack_cmd) {
1139  "%s: interrupt with no events\n",
1140  dev->name));
1141  spin_unlock (&lp->lock);
1142  return IRQ_NONE;
1143  }
1144 
1145  if ((status & 0x8000) || (status & 0x2000)) {
1146  struct i596_cmd *ptr;
1147 
1148  if ((status & 0x8000))
1149  DEB(DEB_INTS,
1151  "%s: i596 interrupt completed command.\n",
1152  dev->name));
1153  if ((status & 0x2000))
1154  DEB(DEB_INTS,
1156  "%s: i596 interrupt command unit inactive %x.\n",
1157  dev->name, status & 0x0700));
1158 
1159  while (lp->cmd_head != NULL) {
1160  DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1161  if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1162  break;
1163 
1164  ptr = lp->cmd_head;
1165 
1166  DEB(DEB_STATUS,
1168  "cmd_head->status = %04x, ->command = %04x\n",
1169  SWAP16(lp->cmd_head->status),
1170  SWAP16(lp->cmd_head->command)));
1171  lp->cmd_head = ptr->v_next;
1172  lp->cmd_backlog--;
1173 
1174  switch (SWAP16(ptr->command) & 0x7) {
1175  case CmdTx:
1176  {
1177  struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1178  struct sk_buff *skb = tx_cmd->skb;
1179 
1180  if (ptr->status & SWAP16(STAT_OK)) {
1181  DEB(DEB_TXADDR,
1182  print_eth(skb->data, "tx-done"));
1183  } else {
1184  dev->stats.tx_errors++;
1185  if (ptr->status & SWAP16(0x0020))
1186  dev->stats.collisions++;
1187  if (!(ptr->status & SWAP16(0x0040)))
1188  dev->stats.tx_heartbeat_errors++;
1189  if (ptr->status & SWAP16(0x0400))
1190  dev->stats.tx_carrier_errors++;
1191  if (ptr->status & SWAP16(0x0800))
1192  dev->stats.collisions++;
1193  if (ptr->status & SWAP16(0x1000))
1194  dev->stats.tx_aborted_errors++;
1195  }
1196  dma_unmap_single(dev->dev.parent,
1197  tx_cmd->dma_addr,
1198  skb->len, DMA_TO_DEVICE);
1199  dev_kfree_skb_irq(skb);
1200 
1201  tx_cmd->cmd.command = 0; /* Mark free */
1202  break;
1203  }
1204  case CmdTDR:
1205  {
1206  unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1207 
1208  if (status & 0x8000) {
1209  DEB(DEB_ANY,
1210  printk(KERN_DEBUG "%s: link ok.\n",
1211  dev->name));
1212  } else {
1213  if (status & 0x4000)
1215  "%s: Transceiver problem.\n",
1216  dev->name);
1217  if (status & 0x2000)
1219  "%s: Termination problem.\n",
1220  dev->name);
1221  if (status & 0x1000)
1223  "%s: Short circuit.\n",
1224  dev->name);
1225 
1226  DEB(DEB_TDR,
1227  printk(KERN_DEBUG "%s: Time %d.\n",
1228  dev->name, status & 0x07ff));
1229  }
1230  break;
1231  }
1232  case CmdConfigure:
1233  /*
1234  * Zap command so set_multicast_list() know
1235  * it is free
1236  */
1237  ptr->command = 0;
1238  break;
1239  }
1240  ptr->v_next = NULL;
1241  ptr->b_next = I596_NULL;
1242  DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1243  lp->last_cmd = jiffies;
1244  }
1245 
1246  /* This mess is arranging that only the last of any outstanding
1247  * commands has the interrupt bit set. Should probably really
1248  * only add to the cmd queue when the CU is stopped.
1249  */
1250  ptr = lp->cmd_head;
1251  while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1252  struct i596_cmd *prev = ptr;
1253 
1254  ptr->command &= SWAP16(0x1fff);
1255  ptr = ptr->v_next;
1256  DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1257  }
1258 
1259  if (lp->cmd_head != NULL)
1260  ack_cmd |= CUC_START;
1261  dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1262  DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1263  }
1264  if ((status & 0x1000) || (status & 0x4000)) {
1265  if ((status & 0x4000))
1266  DEB(DEB_INTS,
1268  "%s: i596 interrupt received a frame.\n",
1269  dev->name));
1270  i596_rx(dev);
1271  /* Only RX_START if stopped - RGH 07-07-96 */
1272  if (status & 0x1000) {
1273  if (netif_running(dev)) {
1274  DEB(DEB_ERRORS,
1276  "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1277  dev->name, status));
1278  ack_cmd |= RX_START;
1279  dev->stats.rx_errors++;
1280  dev->stats.rx_fifo_errors++;
1281  rebuild_rx_bufs(dev);
1282  }
1283  }
1284  }
1285  wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1286  dma->scb.command = SWAP16(ack_cmd);
1287  DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1288 
1289  /* DANGER: I suspect that some kind of interrupt
1290  acknowledgement aside from acking the 82596 might be needed
1291  here... but it's running acceptably without */
1292 
1293  ca(dev);
1294 
1295  wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1296  DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1297 
1298  spin_unlock (&lp->lock);
1299  return IRQ_HANDLED;
1300 }
1301 
1302 static int i596_close(struct net_device *dev)
1303 {
1304  struct i596_private *lp = netdev_priv(dev);
1305  unsigned long flags;
1306 
1307  netif_stop_queue(dev);
1308 
1309  DEB(DEB_INIT,
1311  "%s: Shutting down ethercard, status was %4.4x.\n",
1312  dev->name, SWAP16(lp->dma->scb.status)));
1313 
1314  spin_lock_irqsave(&lp->lock, flags);
1315 
1316  wait_cmd(dev, lp->dma, 100, "close1 timed out");
1317  lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1318  DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1319 
1320  ca(dev);
1321 
1322  wait_cmd(dev, lp->dma, 100, "close2 timed out");
1323  spin_unlock_irqrestore(&lp->lock, flags);
1324  DEB(DEB_STRUCT, i596_display_data(dev));
1325  i596_cleanup_cmd(dev, lp);
1326 
1327  free_irq(dev->irq, dev);
1328  remove_rx_bufs(dev);
1329 
1330  return 0;
1331 }
1332 
1333 /*
1334  * Set or clear the multicast filter for this adaptor.
1335  */
1336 
1337 static void set_multicast_list(struct net_device *dev)
1338 {
1339  struct i596_private *lp = netdev_priv(dev);
1340  struct i596_dma *dma = lp->dma;
1341  int config = 0, cnt;
1342 
1343  DEB(DEB_MULTI,
1345  "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1346  dev->name, netdev_mc_count(dev),
1347  dev->flags & IFF_PROMISC ? "ON" : "OFF",
1348  dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1349 
1350  if ((dev->flags & IFF_PROMISC) &&
1351  !(dma->cf_cmd.i596_config[8] & 0x01)) {
1352  dma->cf_cmd.i596_config[8] |= 0x01;
1353  config = 1;
1354  }
1355  if (!(dev->flags & IFF_PROMISC) &&
1356  (dma->cf_cmd.i596_config[8] & 0x01)) {
1357  dma->cf_cmd.i596_config[8] &= ~0x01;
1358  config = 1;
1359  }
1360  if ((dev->flags & IFF_ALLMULTI) &&
1361  (dma->cf_cmd.i596_config[11] & 0x20)) {
1362  dma->cf_cmd.i596_config[11] &= ~0x20;
1363  config = 1;
1364  }
1365  if (!(dev->flags & IFF_ALLMULTI) &&
1366  !(dma->cf_cmd.i596_config[11] & 0x20)) {
1367  dma->cf_cmd.i596_config[11] |= 0x20;
1368  config = 1;
1369  }
1370  if (config) {
1371  if (dma->cf_cmd.cmd.command)
1373  "%s: config change request already queued\n",
1374  dev->name);
1375  else {
1376  dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1377  DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1378  i596_add_cmd(dev, &dma->cf_cmd.cmd);
1379  }
1380  }
1381 
1382  cnt = netdev_mc_count(dev);
1383  if (cnt > MAX_MC_CNT) {
1384  cnt = MAX_MC_CNT;
1385  printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1386  dev->name, cnt);
1387  }
1388 
1389  if (!netdev_mc_empty(dev)) {
1390  struct netdev_hw_addr *ha;
1391  unsigned char *cp;
1392  struct mc_cmd *cmd;
1393 
1394  cmd = &dma->mc_cmd;
1395  cmd->cmd.command = SWAP16(CmdMulticastList);
1396  cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1397  cp = cmd->mc_addrs;
1398  netdev_for_each_mc_addr(ha, dev) {
1399  if (!cnt--)
1400  break;
1401  memcpy(cp, ha->addr, 6);
1402  if (i596_debug > 1)
1403  DEB(DEB_MULTI,
1405  "%s: Adding address %pM\n",
1406  dev->name, cp));
1407  cp += 6;
1408  }
1409  DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1410  i596_add_cmd(dev, &cmd->cmd);
1411  }
1412 }