Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sgiseeq.c
Go to the documentation of this file.
1 /*
2  * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
3  *
4  * Copyright (C) 1996 David S. Miller ([email protected])
5  */
6 
7 #undef DEBUG
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/types.h>
16 #include <linux/interrupt.h>
17 #include <linux/string.h>
18 #include <linux/delay.h>
19 #include <linux/netdevice.h>
20 #include <linux/platform_device.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 
24 #include <asm/sgi/hpc3.h>
25 #include <asm/sgi/ip22.h>
26 #include <asm/sgi/seeq.h>
27 
28 #include "sgiseeq.h"
29 
30 static char *sgiseeqstr = "SGI Seeq8003";
31 
32 /*
33  * If you want speed, you do something silly, it always has worked for me. So,
34  * with that in mind, I've decided to make this driver look completely like a
35  * stupid Lance from a driver architecture perspective. Only difference is that
36  * here our "ring buffer" looks and acts like a real Lance one does but is
37  * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised
38  * how a stupid idea like this can pay off in performance, not to mention
39  * making this driver 2,000 times easier to write. ;-)
40  */
41 
42 /* Tune these if we tend to run out often etc. */
43 #define SEEQ_RX_BUFFERS 16
44 #define SEEQ_TX_BUFFERS 16
45 
46 #define PKT_BUF_SZ 1584
47 
48 #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
49 #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
50 #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
51 #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
52 
53 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
54  sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
55  sp->tx_old - sp->tx_new - 1)
56 
57 #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
58  (dma_addr_t)((unsigned long)(v) - \
59  (unsigned long)((sp)->rx_desc)))
60 
61 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
62  * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
63  */
64 static int rx_copybreak = 100;
65 
66 #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
67 
69  volatile struct hpc_dma_desc rdma;
71  struct sk_buff *skb;
72 };
73 
75  volatile struct hpc_dma_desc tdma;
77  struct sk_buff *skb;
78 };
79 
80 /*
81  * Warning: This structure is laid out in a certain way because HPC dma
82  * descriptors must be 8-byte aligned. So don't touch this without
83  * some care.
84  */
85 struct sgiseeq_init_block { /* Note the name ;-) */
88 };
89 
93 
94  /* Ptrs to the descriptors in uncached space. */
97 
98  char *name;
101 
102  /* Ring entry counters. */
103  unsigned int rx_new, tx_new;
104  unsigned int rx_old, tx_old;
105 
106  int is_edlc;
107  unsigned char control;
108  unsigned char mode;
109 
111 };
112 
113 static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
114 {
115  dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
117 }
118 
119 static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
120 {
121  dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
122  DMA_TO_DEVICE);
123 }
124 
125 static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
126 {
128  udelay(20);
129  hregs->reset = 0;
130 }
131 
132 static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
133  struct sgiseeq_regs *sregs)
134 {
135  hregs->rx_ctrl = hregs->tx_ctrl = 0;
136  hpc3_eth_reset(hregs);
137 }
138 
139 #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
140  SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
141 
142 static inline void seeq_go(struct sgiseeq_private *sp,
143  struct hpc3_ethregs *hregs,
144  struct sgiseeq_regs *sregs)
145 {
146  sregs->rstat = sp->mode | RSTAT_GO_BITS;
147  hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
148 }
149 
150 static inline void __sgiseeq_set_mac_address(struct net_device *dev)
151 {
152  struct sgiseeq_private *sp = netdev_priv(dev);
153  struct sgiseeq_regs *sregs = sp->sregs;
154  int i;
155 
156  sregs->tstat = SEEQ_TCMD_RB0;
157  for (i = 0; i < 6; i++)
158  sregs->rw.eth_addr[i] = dev->dev_addr[i];
159 }
160 
161 static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
162 {
163  struct sgiseeq_private *sp = netdev_priv(dev);
164  struct sockaddr *sa = addr;
165 
166  memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
167 
168  spin_lock_irq(&sp->tx_lock);
169  __sgiseeq_set_mac_address(dev);
170  spin_unlock_irq(&sp->tx_lock);
171 
172  return 0;
173 }
174 
175 #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
176 #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
177 #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
178 
179 static int seeq_init_ring(struct net_device *dev)
180 {
181  struct sgiseeq_private *sp = netdev_priv(dev);
182  int i;
183 
184  netif_stop_queue(dev);
185  sp->rx_new = sp->tx_new = 0;
186  sp->rx_old = sp->tx_old = 0;
187 
188  __sgiseeq_set_mac_address(dev);
189 
190  /* Setup tx ring. */
191  for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
192  sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
193  dma_sync_desc_dev(dev, &sp->tx_desc[i]);
194  }
195 
196  /* And now the rx ring. */
197  for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
198  if (!sp->rx_desc[i].skb) {
200  struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
201 
202  if (skb == NULL)
203  return -ENOMEM;
204  skb_reserve(skb, 2);
205  dma_addr = dma_map_single(dev->dev.parent,
206  skb->data - 2,
208  sp->rx_desc[i].skb = skb;
209  sp->rx_desc[i].rdma.pbuf = dma_addr;
210  }
211  sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
212  dma_sync_desc_dev(dev, &sp->rx_desc[i]);
213  }
214  sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
215  dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
216  return 0;
217 }
218 
219 static void seeq_purge_ring(struct net_device *dev)
220 {
221  struct sgiseeq_private *sp = netdev_priv(dev);
222  int i;
223 
224  /* clear tx ring. */
225  for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
226  if (sp->tx_desc[i].skb) {
227  dev_kfree_skb(sp->tx_desc[i].skb);
228  sp->tx_desc[i].skb = NULL;
229  }
230  }
231 
232  /* And now the rx ring. */
233  for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
234  if (sp->rx_desc[i].skb) {
235  dev_kfree_skb(sp->rx_desc[i].skb);
236  sp->rx_desc[i].skb = NULL;
237  }
238  }
239 }
240 
241 #ifdef DEBUG
242 static struct sgiseeq_private *gpriv;
243 static struct net_device *gdev;
244 
245 static void sgiseeq_dump_rings(void)
246 {
247  static int once;
248  struct sgiseeq_rx_desc *r = gpriv->rx_desc;
249  struct sgiseeq_tx_desc *t = gpriv->tx_desc;
250  struct hpc3_ethregs *hregs = gpriv->hregs;
251  int i;
252 
253  if (once)
254  return;
255  once++;
256  printk("RING DUMP:\n");
257  for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
258  printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
259  i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
260  r[i].rdma.pnext);
261  i += 1;
262  printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
263  i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
264  r[i].rdma.pnext);
265  }
266  for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
267  printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
268  i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
269  t[i].tdma.pnext);
270  i += 1;
271  printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
272  i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
273  t[i].tdma.pnext);
274  }
275  printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
276  gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
277  printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
278  hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
279  printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
280  hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
281 }
282 #endif
283 
284 #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
285 #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
286 
287 static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
288  struct sgiseeq_regs *sregs)
289 {
290  struct hpc3_ethregs *hregs = sp->hregs;
291  int err;
292 
293  reset_hpc3_and_seeq(hregs, sregs);
294  err = seeq_init_ring(dev);
295  if (err)
296  return err;
297 
298  /* Setup to field the proper interrupt types. */
299  if (sp->is_edlc) {
300  sregs->tstat = TSTAT_INIT_EDLC;
301  sregs->rw.wregs.control = sp->control;
302  sregs->rw.wregs.frame_gap = 0;
303  } else {
304  sregs->tstat = TSTAT_INIT_SEEQ;
305  }
306 
307  hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
308  hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
309 
310  seeq_go(sp, hregs, sregs);
311  return 0;
312 }
313 
314 static void record_rx_errors(struct net_device *dev, unsigned char status)
315 {
316  if (status & SEEQ_RSTAT_OVERF ||
317  status & SEEQ_RSTAT_SFRAME)
318  dev->stats.rx_over_errors++;
319  if (status & SEEQ_RSTAT_CERROR)
320  dev->stats.rx_crc_errors++;
321  if (status & SEEQ_RSTAT_DERROR)
322  dev->stats.rx_frame_errors++;
323  if (status & SEEQ_RSTAT_REOF)
324  dev->stats.rx_errors++;
325 }
326 
327 static inline void rx_maybe_restart(struct sgiseeq_private *sp,
328  struct hpc3_ethregs *hregs,
329  struct sgiseeq_regs *sregs)
330 {
331  if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
332  hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
333  seeq_go(sp, hregs, sregs);
334  }
335 }
336 
337 static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
338  struct hpc3_ethregs *hregs,
339  struct sgiseeq_regs *sregs)
340 {
341  struct sgiseeq_rx_desc *rd;
342  struct sk_buff *skb = NULL;
343  struct sk_buff *newskb;
344  unsigned char pkt_status;
345  int len = 0;
346  unsigned int orig_end = PREV_RX(sp->rx_new);
347 
348  /* Service every received packet. */
349  rd = &sp->rx_desc[sp->rx_new];
350  dma_sync_desc_cpu(dev, rd);
351  while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
352  len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
353  dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
355  pkt_status = rd->skb->data[len];
356  if (pkt_status & SEEQ_RSTAT_FIG) {
357  /* Packet is OK. */
358  /* We don't want to receive our own packets */
359  if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
360  if (len > rx_copybreak) {
361  skb = rd->skb;
362  newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
363  if (!newskb) {
364  newskb = skb;
365  skb = NULL;
366  goto memory_squeeze;
367  }
368  skb_reserve(newskb, 2);
369  } else {
370  skb = netdev_alloc_skb_ip_align(dev, len);
371  if (skb)
372  skb_copy_to_linear_data(skb, rd->skb->data, len);
373 
374  newskb = rd->skb;
375  }
376 memory_squeeze:
377  if (skb) {
378  skb_put(skb, len);
379  skb->protocol = eth_type_trans(skb, dev);
380  netif_rx(skb);
381  dev->stats.rx_packets++;
382  dev->stats.rx_bytes += len;
383  } else {
384  printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
385  dev->name);
386  dev->stats.rx_dropped++;
387  }
388  } else {
389  /* Silently drop my own packets */
390  newskb = rd->skb;
391  }
392  } else {
393  record_rx_errors(dev, pkt_status);
394  newskb = rd->skb;
395  }
396  rd->skb = newskb;
397  rd->rdma.pbuf = dma_map_single(dev->dev.parent,
398  newskb->data - 2,
400 
401  /* Return the entry to the ring pool. */
402  rd->rdma.cntinfo = RCNTINFO_INIT;
403  sp->rx_new = NEXT_RX(sp->rx_new);
404  dma_sync_desc_dev(dev, rd);
405  rd = &sp->rx_desc[sp->rx_new];
406  dma_sync_desc_cpu(dev, rd);
407  }
408  dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
409  sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
410  dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
411  dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
412  sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
413  dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
414  rx_maybe_restart(sp, hregs, sregs);
415 }
416 
417 static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
418  struct sgiseeq_regs *sregs)
419 {
420  if (sp->is_edlc) {
421  sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
422  sregs->rw.wregs.control = sp->control;
423  }
424 }
425 
426 static inline void kick_tx(struct net_device *dev,
427  struct sgiseeq_private *sp,
428  struct hpc3_ethregs *hregs)
429 {
430  struct sgiseeq_tx_desc *td;
431  int i = sp->tx_old;
432 
433  /* If the HPC aint doin nothin, and there are more packets
434  * with ETXD cleared and XIU set we must make very certain
435  * that we restart the HPC else we risk locking up the
436  * adapter. The following code is only safe iff the HPCDMA
437  * is not active!
438  */
439  td = &sp->tx_desc[i];
440  dma_sync_desc_cpu(dev, td);
441  while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
442  (HPCDMA_XIU | HPCDMA_ETXD)) {
443  i = NEXT_TX(i);
444  td = &sp->tx_desc[i];
445  dma_sync_desc_cpu(dev, td);
446  }
447  if (td->tdma.cntinfo & HPCDMA_XIU) {
448  hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
449  hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
450  }
451 }
452 
453 static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
454  struct hpc3_ethregs *hregs,
455  struct sgiseeq_regs *sregs)
456 {
457  struct sgiseeq_tx_desc *td;
458  unsigned long status = hregs->tx_ctrl;
459  int j;
460 
461  tx_maybe_reset_collisions(sp, sregs);
462 
463  if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
464  /* Oops, HPC detected some sort of error. */
465  if (status & SEEQ_TSTAT_R16)
466  dev->stats.tx_aborted_errors++;
467  if (status & SEEQ_TSTAT_UFLOW)
468  dev->stats.tx_fifo_errors++;
469  if (status & SEEQ_TSTAT_LCLS)
470  dev->stats.collisions++;
471  }
472 
473  /* Ack 'em... */
474  for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
475  td = &sp->tx_desc[j];
476 
477  dma_sync_desc_cpu(dev, td);
478  if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
479  break;
480  if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
481  if (!(status & HPC3_ETXCTRL_ACTIVE)) {
482  hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
483  hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
484  }
485  break;
486  }
487  dev->stats.tx_packets++;
488  sp->tx_old = NEXT_TX(sp->tx_old);
489  td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
490  td->tdma.cntinfo |= HPCDMA_EOX;
491  if (td->skb) {
492  dev_kfree_skb_any(td->skb);
493  td->skb = NULL;
494  }
495  dma_sync_desc_dev(dev, td);
496  }
497 }
498 
499 static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
500 {
501  struct net_device *dev = (struct net_device *) dev_id;
502  struct sgiseeq_private *sp = netdev_priv(dev);
503  struct hpc3_ethregs *hregs = sp->hregs;
504  struct sgiseeq_regs *sregs = sp->sregs;
505 
506  spin_lock(&sp->tx_lock);
507 
508  /* Ack the IRQ and set software state. */
509  hregs->reset = HPC3_ERST_CLRIRQ;
510 
511  /* Always check for received packets. */
512  sgiseeq_rx(dev, sp, hregs, sregs);
513 
514  /* Only check for tx acks if we have something queued. */
515  if (sp->tx_old != sp->tx_new)
516  sgiseeq_tx(dev, sp, hregs, sregs);
517 
518  if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
519  netif_wake_queue(dev);
520  }
521  spin_unlock(&sp->tx_lock);
522 
523  return IRQ_HANDLED;
524 }
525 
526 static int sgiseeq_open(struct net_device *dev)
527 {
528  struct sgiseeq_private *sp = netdev_priv(dev);
529  struct sgiseeq_regs *sregs = sp->sregs;
530  unsigned int irq = dev->irq;
531  int err;
532 
533  if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
534  printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
535  return -EAGAIN;
536  }
537 
538  err = init_seeq(dev, sp, sregs);
539  if (err)
540  goto out_free_irq;
541 
542  netif_start_queue(dev);
543 
544  return 0;
545 
546 out_free_irq:
547  free_irq(irq, dev);
548 
549  return err;
550 }
551 
552 static int sgiseeq_close(struct net_device *dev)
553 {
554  struct sgiseeq_private *sp = netdev_priv(dev);
555  struct sgiseeq_regs *sregs = sp->sregs;
556  unsigned int irq = dev->irq;
557 
558  netif_stop_queue(dev);
559 
560  /* Shutdown the Seeq. */
561  reset_hpc3_and_seeq(sp->hregs, sregs);
562  free_irq(irq, dev);
563  seeq_purge_ring(dev);
564 
565  return 0;
566 }
567 
568 static inline int sgiseeq_reset(struct net_device *dev)
569 {
570  struct sgiseeq_private *sp = netdev_priv(dev);
571  struct sgiseeq_regs *sregs = sp->sregs;
572  int err;
573 
574  err = init_seeq(dev, sp, sregs);
575  if (err)
576  return err;
577 
578  dev->trans_start = jiffies; /* prevent tx timeout */
579  netif_wake_queue(dev);
580 
581  return 0;
582 }
583 
584 static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
585 {
586  struct sgiseeq_private *sp = netdev_priv(dev);
587  struct hpc3_ethregs *hregs = sp->hregs;
588  unsigned long flags;
589  struct sgiseeq_tx_desc *td;
590  int len, entry;
591 
592  spin_lock_irqsave(&sp->tx_lock, flags);
593 
594  /* Setup... */
595  len = skb->len;
596  if (len < ETH_ZLEN) {
597  if (skb_padto(skb, ETH_ZLEN)) {
598  spin_unlock_irqrestore(&sp->tx_lock, flags);
599  return NETDEV_TX_OK;
600  }
601  len = ETH_ZLEN;
602  }
603 
604  dev->stats.tx_bytes += len;
605  entry = sp->tx_new;
606  td = &sp->tx_desc[entry];
607  dma_sync_desc_cpu(dev, td);
608 
609  /* Create entry. There are so many races with adding a new
610  * descriptor to the chain:
611  * 1) Assume that the HPC is off processing a DMA chain while
612  * we are changing all of the following.
613  * 2) Do no allow the HPC to look at a new descriptor until
614  * we have completely set up it's state. This means, do
615  * not clear HPCDMA_EOX in the current last descritptor
616  * until the one we are adding looks consistent and could
617  * be processes right now.
618  * 3) The tx interrupt code must notice when we've added a new
619  * entry and the HPC got to the end of the chain before we
620  * added this new entry and restarted it.
621  */
622  td->skb = skb;
623  td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
624  len, DMA_TO_DEVICE);
625  td->tdma.cntinfo = (len & HPCDMA_BCNT) |
627  dma_sync_desc_dev(dev, td);
628  if (sp->tx_old != sp->tx_new) {
629  struct sgiseeq_tx_desc *backend;
630 
631  backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
632  dma_sync_desc_cpu(dev, backend);
633  backend->tdma.cntinfo &= ~HPCDMA_EOX;
634  dma_sync_desc_dev(dev, backend);
635  }
636  sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
637 
638  /* Maybe kick the HPC back into motion. */
639  if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
640  kick_tx(dev, sp, hregs);
641 
642  if (!TX_BUFFS_AVAIL(sp))
643  netif_stop_queue(dev);
644  spin_unlock_irqrestore(&sp->tx_lock, flags);
645 
646  return NETDEV_TX_OK;
647 }
648 
649 static void timeout(struct net_device *dev)
650 {
651  printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
652  sgiseeq_reset(dev);
653 
654  dev->trans_start = jiffies; /* prevent tx timeout */
655  netif_wake_queue(dev);
656 }
657 
658 static void sgiseeq_set_multicast(struct net_device *dev)
659 {
660  struct sgiseeq_private *sp = netdev_priv(dev);
661  unsigned char oldmode = sp->mode;
662 
663  if(dev->flags & IFF_PROMISC)
664  sp->mode = SEEQ_RCMD_RANY;
665  else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
666  sp->mode = SEEQ_RCMD_RBMCAST;
667  else
668  sp->mode = SEEQ_RCMD_RBCAST;
669 
670  /* XXX I know this sucks, but is there a better way to reprogram
671  * XXX the receiver? At least, this shouldn't happen too often.
672  */
673 
674  if (oldmode != sp->mode)
675  sgiseeq_reset(dev);
676 }
677 
678 static inline void setup_tx_ring(struct net_device *dev,
679  struct sgiseeq_tx_desc *buf,
680  int nbufs)
681 {
682  struct sgiseeq_private *sp = netdev_priv(dev);
683  int i = 0;
684 
685  while (i < (nbufs - 1)) {
686  buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
687  buf[i].tdma.pbuf = 0;
688  dma_sync_desc_dev(dev, &buf[i]);
689  i++;
690  }
691  buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
692  dma_sync_desc_dev(dev, &buf[i]);
693 }
694 
695 static inline void setup_rx_ring(struct net_device *dev,
696  struct sgiseeq_rx_desc *buf,
697  int nbufs)
698 {
699  struct sgiseeq_private *sp = netdev_priv(dev);
700  int i = 0;
701 
702  while (i < (nbufs - 1)) {
703  buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
704  buf[i].rdma.pbuf = 0;
705  dma_sync_desc_dev(dev, &buf[i]);
706  i++;
707  }
708  buf[i].rdma.pbuf = 0;
709  buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
710  dma_sync_desc_dev(dev, &buf[i]);
711 }
712 
713 static const struct net_device_ops sgiseeq_netdev_ops = {
714  .ndo_open = sgiseeq_open,
715  .ndo_stop = sgiseeq_close,
716  .ndo_start_xmit = sgiseeq_start_xmit,
717  .ndo_tx_timeout = timeout,
718  .ndo_set_rx_mode = sgiseeq_set_multicast,
719  .ndo_set_mac_address = sgiseeq_set_mac_address,
720  .ndo_change_mtu = eth_change_mtu,
721  .ndo_validate_addr = eth_validate_addr,
722 };
723 
724 static int __devinit sgiseeq_probe(struct platform_device *pdev)
725 {
726  struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
727  struct hpc3_regs *hpcregs = pd->hpc;
728  struct sgiseeq_init_block *sr;
729  unsigned int irq = pd->irq;
730  struct sgiseeq_private *sp;
731  struct net_device *dev;
732  int err;
733 
734  dev = alloc_etherdev(sizeof (struct sgiseeq_private));
735  if (!dev) {
736  err = -ENOMEM;
737  goto err_out;
738  }
739 
740  platform_set_drvdata(pdev, dev);
741  sp = netdev_priv(dev);
742 
743  /* Make private data page aligned */
744  sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
745  &sp->srings_dma, GFP_KERNEL);
746  if (!sr) {
747  printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
748  err = -ENOMEM;
749  goto err_out_free_dev;
750  }
751  sp->srings = sr;
752  sp->rx_desc = sp->srings->rxvector;
753  sp->tx_desc = sp->srings->txvector;
754  spin_lock_init(&sp->tx_lock);
755 
756  /* A couple calculations now, saves many cycles later. */
757  setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
758  setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
759 
760  memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
761 
762 #ifdef DEBUG
763  gpriv = sp;
764  gdev = dev;
765 #endif
766  sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
767  sp->hregs = &hpcregs->ethregs;
768  sp->name = sgiseeqstr;
769  sp->mode = SEEQ_RCMD_RBCAST;
770 
771  /* Setup PIO and DMA transfer timing */
772  sp->hregs->pconfig = 0x161;
773  sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
775 
776  /* Setup PIO and DMA transfer timing */
777  sp->hregs->pconfig = 0x161;
778  sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
780 
781  /* Reset the chip. */
782  hpc3_eth_reset(sp->hregs);
783 
784  sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
785  if (sp->is_edlc)
789 
790  dev->netdev_ops = &sgiseeq_netdev_ops;
791  dev->watchdog_timeo = (200 * HZ) / 1000;
792  dev->irq = irq;
793 
794  if (register_netdev(dev)) {
795  printk(KERN_ERR "Sgiseeq: Cannot register net device, "
796  "aborting.\n");
797  err = -ENODEV;
798  goto err_out_free_page;
799  }
800 
801  printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
802 
803  return 0;
804 
805 err_out_free_page:
806  free_page((unsigned long) sp->srings);
807 err_out_free_dev:
808  free_netdev(dev);
809 
810 err_out:
811  return err;
812 }
813 
814 static int __exit sgiseeq_remove(struct platform_device *pdev)
815 {
816  struct net_device *dev = platform_get_drvdata(pdev);
817  struct sgiseeq_private *sp = netdev_priv(dev);
818 
819  unregister_netdev(dev);
820  dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
821  sp->srings_dma);
822  free_netdev(dev);
823  platform_set_drvdata(pdev, NULL);
824 
825  return 0;
826 }
827 
828 static struct platform_driver sgiseeq_driver = {
829  .probe = sgiseeq_probe,
830  .remove = __exit_p(sgiseeq_remove),
831  .driver = {
832  .name = "sgiseeq",
833  .owner = THIS_MODULE,
834  }
835 };
836 
837 module_platform_driver(sgiseeq_driver);
838 
839 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
840 MODULE_AUTHOR("Linux/MIPS Mailing List <[email protected]>");
841 MODULE_LICENSE("GPL");
842 MODULE_ALIAS("platform:sgiseeq");