Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
meth.c
Go to the documentation of this file.
1 /*
2  * meth.c -- O2 Builtin 10/100 Ethernet driver
3  *
4  * Copyright (C) 2001-2003 Ilya Volynets
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/types.h>
20 #include <linux/interrupt.h>
21 
22 #include <linux/in.h>
23 #include <linux/in6.h>
24 #include <linux/device.h> /* struct device, et al */
25 #include <linux/netdevice.h> /* struct device, and other headers */
26 #include <linux/etherdevice.h> /* eth_type_trans */
27 #include <linux/ip.h> /* struct iphdr */
28 #include <linux/tcp.h> /* struct tcphdr */
29 #include <linux/skbuff.h>
30 #include <linux/mii.h> /* MII definitions */
31 #include <linux/crc32.h>
32 
33 #include <asm/ip32/mace.h>
34 #include <asm/ip32/ip32_ints.h>
35 
36 #include <asm/io.h>
37 
38 #include "meth.h"
39 
40 #ifndef MFE_DEBUG
41 #define MFE_DEBUG 0
42 #endif
43 
44 #if MFE_DEBUG>=1
45 #define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args)
46 #define MFE_RX_DEBUG 2
47 #else
48 #define DPRINTK(str,args...)
49 #define MFE_RX_DEBUG 0
50 #endif
51 
52 
53 static const char *meth_str="SGI O2 Fast Ethernet";
54 
55 /* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
56 #define TX_TIMEOUT (400*HZ/1000)
57 
58 static int timeout = TX_TIMEOUT;
59 module_param(timeout, int, 0);
60 
61 /*
62  * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
63  * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC.
64  */
65 #define METH_MCF_LIMIT 32
66 
67 /*
68  * This structure is private to each device. It is used to pass
69  * packets in and out, so there is place for a packet
70  */
71 struct meth_private {
72  /* in-memory copy of MAC Control register */
74 
75  /* in-memory copy of DMA Control register */
76  unsigned long dma_ctrl;
77  /* address of PHY, used by mdio_* functions, initialized in mdio_probe */
78  unsigned long phy_addr;
83  unsigned long tx_read, tx_write, tx_count;
84 
88  unsigned long rx_write;
89 
90  /* Multicast filter. */
92 
94 };
95 
96 static void meth_tx_timeout(struct net_device *dev);
97 static irqreturn_t meth_interrupt(int irq, void *dev_id);
98 
99 /* global, initialized in ip32-setup.c */
100 char o2meth_eaddr[8]={0,0,0,0,0,0,0,0};
101 
102 static inline void load_eaddr(struct net_device *dev)
103 {
104  int i;
105  u64 macaddr;
106 
107  DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr);
108  macaddr = 0;
109  for (i = 0; i < 6; i++)
110  macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
111 
112  mace->eth.mac_addr = macaddr;
113 }
114 
115 /*
116  * Waits for BUSY status of mdio bus to clear
117  */
118 #define WAIT_FOR_PHY(___rval) \
119  while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \
120  udelay(25); \
121  }
122 /*read phy register, return value read */
123 static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg)
124 {
125  unsigned long rval;
126  WAIT_FOR_PHY(rval);
127  mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f);
128  udelay(25);
129  mace->eth.phy_trans_go = 1;
130  udelay(25);
131  WAIT_FOR_PHY(rval);
132  return rval & MDIO_DATA_MASK;
133 }
134 
135 static int mdio_probe(struct meth_private *priv)
136 {
137  int i;
138  unsigned long p2, p3, flags;
139  /* check if phy is detected already */
140  if(priv->phy_addr>=0&&priv->phy_addr<32)
141  return 0;
142  spin_lock_irqsave(&priv->meth_lock, flags);
143  for (i=0;i<32;++i){
144  priv->phy_addr=i;
145  p2=mdio_read(priv,2);
146  p3=mdio_read(priv,3);
147 #if MFE_DEBUG>=2
148  switch ((p2<<12)|(p3>>4)){
149  case PHY_QS6612X:
150  DPRINTK("PHY is QS6612X\n");
151  break;
152  case PHY_ICS1889:
153  DPRINTK("PHY is ICS1889\n");
154  break;
155  case PHY_ICS1890:
156  DPRINTK("PHY is ICS1890\n");
157  break;
158  case PHY_DP83840:
159  DPRINTK("PHY is DP83840\n");
160  break;
161  }
162 #endif
163  if(p2!=0xffff&&p2!=0x0000){
164  DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4));
165  break;
166  }
167  }
168  spin_unlock_irqrestore(&priv->meth_lock, flags);
169  if(priv->phy_addr<32) {
170  return 0;
171  }
172  DPRINTK("Oopsie! PHY is not known!\n");
173  priv->phy_addr=-1;
174  return -ENODEV;
175 }
176 
177 static void meth_check_link(struct net_device *dev)
178 {
179  struct meth_private *priv = netdev_priv(dev);
180  unsigned long mii_advertising = mdio_read(priv, 4);
181  unsigned long mii_partner = mdio_read(priv, 5);
182  unsigned long negotiated = mii_advertising & mii_partner;
183  unsigned long duplex, speed;
184 
185  if (mii_partner == 0xffff)
186  return;
187 
188  speed = (negotiated & 0x0380) ? METH_100MBIT : 0;
189  duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ?
190  METH_PHY_FDX : 0;
191 
192  if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) {
193  DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half");
194  if (duplex)
195  priv->mac_ctrl |= METH_PHY_FDX;
196  else
197  priv->mac_ctrl &= ~METH_PHY_FDX;
198  mace->eth.mac_ctrl = priv->mac_ctrl;
199  }
200 
201  if ((priv->mac_ctrl & METH_100MBIT) ^ speed) {
202  DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10);
203  if (duplex)
204  priv->mac_ctrl |= METH_100MBIT;
205  else
206  priv->mac_ctrl &= ~METH_100MBIT;
207  mace->eth.mac_ctrl = priv->mac_ctrl;
208  }
209 }
210 
211 
212 static int meth_init_tx_ring(struct meth_private *priv)
213 {
214  /* Init TX ring */
216  &priv->tx_ring_dma, GFP_ATOMIC);
217  if (!priv->tx_ring)
218  return -ENOMEM;
220  priv->tx_count = priv->tx_read = priv->tx_write = 0;
221  mace->eth.tx_ring_base = priv->tx_ring_dma;
222  /* Now init skb save area */
223  memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs));
224  memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas));
225  return 0;
226 }
227 
228 static int meth_init_rx_ring(struct meth_private *priv)
229 {
230  int i;
231 
232  for (i = 0; i < RX_RING_ENTRIES; i++) {
233  priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0);
234  /* 8byte status vector + 3quad padding + 2byte padding,
235  * to put data on 64bit aligned boundary */
236  skb_reserve(priv->rx_skbs[i],METH_RX_HEAD);
237  priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head);
238  /* I'll need to re-sync it after each RX */
239  priv->rx_ring_dmas[i] =
240  dma_map_single(NULL, priv->rx_ring[i],
242  mace->eth.rx_fifo = priv->rx_ring_dmas[i];
243  }
244  priv->rx_write = 0;
245  return 0;
246 }
247 static void meth_free_tx_ring(struct meth_private *priv)
248 {
249  int i;
250 
251  /* Remove any pending skb */
252  for (i = 0; i < TX_RING_ENTRIES; i++) {
253  if (priv->tx_skbs[i])
254  dev_kfree_skb(priv->tx_skbs[i]);
255  priv->tx_skbs[i] = NULL;
256  }
258  priv->tx_ring_dma);
259 }
260 
261 /* Presumes RX DMA engine is stopped, and RX fifo ring is reset */
262 static void meth_free_rx_ring(struct meth_private *priv)
263 {
264  int i;
265 
266  for (i = 0; i < RX_RING_ENTRIES; i++) {
269  priv->rx_ring[i] = 0;
270  priv->rx_ring_dmas[i] = 0;
271  kfree_skb(priv->rx_skbs[i]);
272  }
273 }
274 
275 int meth_reset(struct net_device *dev)
276 {
277  struct meth_private *priv = netdev_priv(dev);
278 
279  /* Reset card */
280  mace->eth.mac_ctrl = SGI_MAC_RESET;
281  udelay(1);
282  mace->eth.mac_ctrl = 0;
283  udelay(25);
284 
285  /* Load ethernet address */
286  load_eaddr(dev);
287  /* Should load some "errata", but later */
288 
289  /* Check for device */
290  if (mdio_probe(priv) < 0) {
291  DPRINTK("Unable to find PHY\n");
292  return -ENODEV;
293  }
294 
295  /* Initial mode: 10 | Half-duplex | Accept normal packets */
297  if (dev->flags & IFF_PROMISC)
298  priv->mac_ctrl |= METH_PROMISC;
299  mace->eth.mac_ctrl = priv->mac_ctrl;
300 
301  /* Autonegotiate speed and duplex mode */
302  meth_check_link(dev);
303 
304  /* Now set dma control, but don't enable DMA, yet */
305  priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) |
306  (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT);
307  mace->eth.dma_ctrl = priv->dma_ctrl;
308 
309  return 0;
310 }
311 
312 /*============End Helper Routines=====================*/
313 
314 /*
315  * Open and close
316  */
317 static int meth_open(struct net_device *dev)
318 {
319  struct meth_private *priv = netdev_priv(dev);
320  int ret;
321 
322  priv->phy_addr = -1; /* No PHY is known yet... */
323 
324  /* Initialize the hardware */
325  ret = meth_reset(dev);
326  if (ret < 0)
327  return ret;
328 
329  /* Allocate the ring buffers */
330  ret = meth_init_tx_ring(priv);
331  if (ret < 0)
332  return ret;
333  ret = meth_init_rx_ring(priv);
334  if (ret < 0)
335  goto out_free_tx_ring;
336 
337  ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev);
338  if (ret) {
339  printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
340  goto out_free_rx_ring;
341  }
342 
343  /* Start DMA */
344  priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/
346  mace->eth.dma_ctrl = priv->dma_ctrl;
347 
348  DPRINTK("About to start queue\n");
349  netif_start_queue(dev);
350 
351  return 0;
352 
353 out_free_rx_ring:
354  meth_free_rx_ring(priv);
355 out_free_tx_ring:
356  meth_free_tx_ring(priv);
357 
358  return ret;
359 }
360 
361 static int meth_release(struct net_device *dev)
362 {
363  struct meth_private *priv = netdev_priv(dev);
364 
365  DPRINTK("Stopping queue\n");
366  netif_stop_queue(dev); /* can't transmit any more */
367  /* shut down DMA */
370  mace->eth.dma_ctrl = priv->dma_ctrl;
371  free_irq(dev->irq, dev);
372  meth_free_tx_ring(priv);
373  meth_free_rx_ring(priv);
374 
375  return 0;
376 }
377 
378 /*
379  * Receive a packet: retrieve, encapsulate and pass over to upper levels
380  */
381 static void meth_rx(struct net_device* dev, unsigned long int_status)
382 {
383  struct sk_buff *skb;
384  unsigned long status, flags;
385  struct meth_private *priv = netdev_priv(dev);
386  unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8;
387 
388  spin_lock_irqsave(&priv->meth_lock, flags);
389  priv->dma_ctrl &= ~METH_DMA_RX_INT_EN;
390  mace->eth.dma_ctrl = priv->dma_ctrl;
391  spin_unlock_irqrestore(&priv->meth_lock, flags);
392 
393  if (int_status & METH_INT_RX_UNDERFLOW) {
394  fifo_rptr = (fifo_rptr - 1) & 0x0f;
395  }
396  while (priv->rx_write != fifo_rptr) {
399  status = priv->rx_ring[priv->rx_write]->status.raw;
400 #if MFE_DEBUG
401  if (!(status & METH_RX_ST_VALID)) {
402  DPRINTK("Not received? status=%016lx\n",status);
403  }
404 #endif
405  if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) {
406  int len = (status & 0xffff) - 4; /* omit CRC */
407  /* length sanity check */
408  if (len < 60 || len > 1518) {
409  printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2Lx.\n",
410  dev->name, priv->rx_write,
411  priv->rx_ring[priv->rx_write]->status.raw);
412  dev->stats.rx_errors++;
413  dev->stats.rx_length_errors++;
414  skb = priv->rx_skbs[priv->rx_write];
415  } else {
416  skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC);
417  if (!skb) {
418  /* Ouch! No memory! Drop packet on the floor */
419  DPRINTK("No mem: dropping packet\n");
420  dev->stats.rx_dropped++;
421  skb = priv->rx_skbs[priv->rx_write];
422  } else {
423  struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write];
424  /* 8byte status vector + 3quad padding + 2byte padding,
425  * to put data on 64bit aligned boundary */
426  skb_reserve(skb, METH_RX_HEAD);
427  /* Write metadata, and then pass to the receive level */
428  skb_put(skb_c, len);
429  priv->rx_skbs[priv->rx_write] = skb;
430  skb_c->protocol = eth_type_trans(skb_c, dev);
431  dev->stats.rx_packets++;
432  dev->stats.rx_bytes += len;
433  netif_rx(skb_c);
434  }
435  }
436  } else {
437  dev->stats.rx_errors++;
438  skb=priv->rx_skbs[priv->rx_write];
439 #if MFE_DEBUG>0
440  printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status);
442  printk(KERN_WARNING "Receive Code Violation\n");
443  if(status&METH_RX_ST_CRC_ERR)
444  printk(KERN_WARNING "CRC error\n");
445  if(status&METH_RX_ST_INV_PREAMBLE_CTX)
446  printk(KERN_WARNING "Invalid Preamble Context\n");
447  if(status&METH_RX_ST_LONG_EVT_SEEN)
448  printk(KERN_WARNING "Long Event Seen...\n");
449  if(status&METH_RX_ST_BAD_PACKET)
450  printk(KERN_WARNING "Bad Packet\n");
451  if(status&METH_RX_ST_CARRIER_EVT_SEEN)
452  printk(KERN_WARNING "Carrier Event Seen\n");
453 #endif
454  }
455  priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head;
456  priv->rx_ring[priv->rx_write]->status.raw = 0;
457  priv->rx_ring_dmas[priv->rx_write] =
458  dma_map_single(NULL, priv->rx_ring[priv->rx_write],
460  mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write];
461  ADVANCE_RX_PTR(priv->rx_write);
462  }
463  spin_lock_irqsave(&priv->meth_lock, flags);
464  /* In case there was underflow, and Rx DMA was disabled */
466  mace->eth.dma_ctrl = priv->dma_ctrl;
467  mace->eth.int_stat = METH_INT_RX_THRESHOLD;
468  spin_unlock_irqrestore(&priv->meth_lock, flags);
469 }
470 
471 static int meth_tx_full(struct net_device *dev)
472 {
473  struct meth_private *priv = netdev_priv(dev);
474 
475  return priv->tx_count >= TX_RING_ENTRIES - 1;
476 }
477 
478 static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status)
479 {
480  struct meth_private *priv = netdev_priv(dev);
481  unsigned long status, flags;
482  struct sk_buff *skb;
483  unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16;
484 
485  spin_lock_irqsave(&priv->meth_lock, flags);
486 
487  /* Stop DMA notification */
488  priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
489  mace->eth.dma_ctrl = priv->dma_ctrl;
490 
491  while (priv->tx_read != rptr) {
492  skb = priv->tx_skbs[priv->tx_read];
493  status = priv->tx_ring[priv->tx_read].header.raw;
494 #if MFE_DEBUG>=1
495  if (priv->tx_read == priv->tx_write)
496  DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr);
497 #endif
498  if (status & METH_TX_ST_DONE) {
499  if (status & METH_TX_ST_SUCCESS){
500  dev->stats.tx_packets++;
501  dev->stats.tx_bytes += skb->len;
502  } else {
503  dev->stats.tx_errors++;
504 #if MFE_DEBUG>=1
505  DPRINTK("TX error: status=%016lx <",status);
506  if(status & METH_TX_ST_SUCCESS)
507  printk(" SUCCESS");
508  if(status & METH_TX_ST_TOOLONG)
509  printk(" TOOLONG");
510  if(status & METH_TX_ST_UNDERRUN)
511  printk(" UNDERRUN");
512  if(status & METH_TX_ST_EXCCOLL)
513  printk(" EXCCOLL");
514  if(status & METH_TX_ST_DEFER)
515  printk(" DEFER");
516  if(status & METH_TX_ST_LATECOLL)
517  printk(" LATECOLL");
518  printk(" >\n");
519 #endif
520  }
521  } else {
522  DPRINTK("RPTR points us here, but packet not done?\n");
523  break;
524  }
525  dev_kfree_skb_irq(skb);
526  priv->tx_skbs[priv->tx_read] = NULL;
527  priv->tx_ring[priv->tx_read].header.raw = 0;
528  priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1);
529  priv->tx_count--;
530  }
531 
532  /* wake up queue if it was stopped */
533  if (netif_queue_stopped(dev) && !meth_tx_full(dev)) {
534  netif_wake_queue(dev);
535  }
536 
537  mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT;
538  spin_unlock_irqrestore(&priv->meth_lock, flags);
539 }
540 
541 static void meth_error(struct net_device* dev, unsigned status)
542 {
543  struct meth_private *priv = netdev_priv(dev);
544  unsigned long flags;
545 
546  printk(KERN_WARNING "meth: error status: 0x%08x\n",status);
547  /* check for errors too... */
548  if (status & (METH_INT_TX_LINK_FAIL))
549  printk(KERN_WARNING "meth: link failure\n");
550  /* Should I do full reset in this case? */
551  if (status & (METH_INT_MEM_ERROR))
552  printk(KERN_WARNING "meth: memory error\n");
553  if (status & (METH_INT_TX_ABORT))
554  printk(KERN_WARNING "meth: aborted\n");
555  if (status & (METH_INT_RX_OVERFLOW))
556  printk(KERN_WARNING "meth: Rx overflow\n");
557  if (status & (METH_INT_RX_UNDERFLOW)) {
558  printk(KERN_WARNING "meth: Rx underflow\n");
559  spin_lock_irqsave(&priv->meth_lock, flags);
560  mace->eth.int_stat = METH_INT_RX_UNDERFLOW;
561  /* more underflow interrupts will be delivered,
562  * effectively throwing us into an infinite loop.
563  * Thus I stop processing Rx in this case. */
564  priv->dma_ctrl &= ~METH_DMA_RX_EN;
565  mace->eth.dma_ctrl = priv->dma_ctrl;
566  DPRINTK("Disabled meth Rx DMA temporarily\n");
567  spin_unlock_irqrestore(&priv->meth_lock, flags);
568  }
569  mace->eth.int_stat = METH_INT_ERROR;
570 }
571 
572 /*
573  * The typical interrupt entry point
574  */
575 static irqreturn_t meth_interrupt(int irq, void *dev_id)
576 {
577  struct net_device *dev = (struct net_device *)dev_id;
578  struct meth_private *priv = netdev_priv(dev);
579  unsigned long status;
580 
581  status = mace->eth.int_stat;
582  while (status & 0xff) {
583  /* First handle errors - if we get Rx underflow,
584  * Rx DMA will be disabled, and Rx handler will reenable
585  * it. I don't think it's possible to get Rx underflow,
586  * without getting Rx interrupt */
587  if (status & METH_INT_ERROR) {
588  meth_error(dev, status);
589  }
590  if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) {
591  /* a transmission is over: free the skb */
592  meth_tx_cleanup(dev, status);
593  }
594  if (status & METH_INT_RX_THRESHOLD) {
595  if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN))
596  break;
597  /* send it to meth_rx for handling */
598  meth_rx(dev, status);
599  }
600  status = mace->eth.int_stat;
601  }
602 
603  return IRQ_HANDLED;
604 }
605 
606 /*
607  * Transmits packets that fit into TX descriptor (are <=120B)
608  */
609 static void meth_tx_short_prepare(struct meth_private *priv,
610  struct sk_buff *skb)
611 {
612  tx_packet *desc = &priv->tx_ring[priv->tx_write];
613  int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
614 
615  desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16);
616  /* maybe I should set whole thing to 0 first... */
617  skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len);
618  if (skb->len < len)
619  memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len);
620 }
621 #define TX_CATBUF1 BIT(25)
622 static void meth_tx_1page_prepare(struct meth_private *priv,
623  struct sk_buff *skb)
624 {
625  tx_packet *desc = &priv->tx_ring[priv->tx_write];
626  void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7);
627  int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data);
628  int buffer_len = skb->len - unaligned_len;
629  dma_addr_t catbuf;
630 
631  desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1);
632 
633  /* unaligned part */
634  if (unaligned_len) {
635  skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
636  unaligned_len);
637  desc->header.raw |= (128 - unaligned_len) << 16;
638  }
639 
640  /* first page */
641  catbuf = dma_map_single(NULL, buffer_data, buffer_len,
642  DMA_TO_DEVICE);
643  desc->data.cat_buf[0].form.start_addr = catbuf >> 3;
644  desc->data.cat_buf[0].form.len = buffer_len - 1;
645 }
646 #define TX_CATBUF2 BIT(26)
647 static void meth_tx_2page_prepare(struct meth_private *priv,
648  struct sk_buff *skb)
649 {
650  tx_packet *desc = &priv->tx_ring[priv->tx_write];
651  void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7);
652  void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data);
653  int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data);
654  int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data);
655  int buffer2_len = skb->len - buffer1_len - unaligned_len;
656  dma_addr_t catbuf1, catbuf2;
657 
658  desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1);
659  /* unaligned part */
660  if (unaligned_len){
661  skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len),
662  unaligned_len);
663  desc->header.raw |= (128 - unaligned_len) << 16;
664  }
665 
666  /* first page */
667  catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len,
668  DMA_TO_DEVICE);
669  desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3;
670  desc->data.cat_buf[0].form.len = buffer1_len - 1;
671  /* second page */
672  catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len,
673  DMA_TO_DEVICE);
674  desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3;
675  desc->data.cat_buf[1].form.len = buffer2_len - 1;
676 }
677 
678 static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
679 {
680  /* Remember the skb, so we can free it at interrupt time */
681  priv->tx_skbs[priv->tx_write] = skb;
682  if (skb->len <= 120) {
683  /* Whole packet fits into descriptor */
684  meth_tx_short_prepare(priv, skb);
685  } else if (PAGE_ALIGN((unsigned long)skb->data) !=
686  PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) {
687  /* Packet crosses page boundary */
688  meth_tx_2page_prepare(priv, skb);
689  } else {
690  /* Packet is in one page */
691  meth_tx_1page_prepare(priv, skb);
692  }
693  priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1);
694  mace->eth.tx_info = priv->tx_write;
695  priv->tx_count++;
696 }
697 
698 /*
699  * Transmit a packet (called by the kernel)
700  */
701 static int meth_tx(struct sk_buff *skb, struct net_device *dev)
702 {
703  struct meth_private *priv = netdev_priv(dev);
704  unsigned long flags;
705 
706  spin_lock_irqsave(&priv->meth_lock, flags);
707  /* Stop DMA notification */
708  priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN);
709  mace->eth.dma_ctrl = priv->dma_ctrl;
710 
711  meth_add_to_tx_ring(priv, skb);
712  dev->trans_start = jiffies; /* save the timestamp */
713 
714  /* If TX ring is full, tell the upper layer to stop sending packets */
715  if (meth_tx_full(dev)) {
716  printk(KERN_DEBUG "TX full: stopping\n");
717  netif_stop_queue(dev);
718  }
719 
720  /* Restart DMA notification */
721  priv->dma_ctrl |= METH_DMA_TX_INT_EN;
722  mace->eth.dma_ctrl = priv->dma_ctrl;
723 
724  spin_unlock_irqrestore(&priv->meth_lock, flags);
725 
726  return NETDEV_TX_OK;
727 }
728 
729 /*
730  * Deal with a transmit timeout.
731  */
732 static void meth_tx_timeout(struct net_device *dev)
733 {
734  struct meth_private *priv = netdev_priv(dev);
735  unsigned long flags;
736 
737  printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
738 
739  /* Protect against concurrent rx interrupts */
740  spin_lock_irqsave(&priv->meth_lock,flags);
741 
742  /* Try to reset the interface. */
743  meth_reset(dev);
744 
745  dev->stats.tx_errors++;
746 
747  /* Clear all rings */
748  meth_free_tx_ring(priv);
749  meth_free_rx_ring(priv);
750  meth_init_tx_ring(priv);
751  meth_init_rx_ring(priv);
752 
753  /* Restart dma */
755  mace->eth.dma_ctrl = priv->dma_ctrl;
756 
757  /* Enable interrupt */
758  spin_unlock_irqrestore(&priv->meth_lock, flags);
759 
760  dev->trans_start = jiffies; /* prevent tx timeout */
761  netif_wake_queue(dev);
762 }
763 
764 /*
765  * Ioctl commands
766  */
767 static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
768 {
769  /* XXX Not yet implemented */
770  switch(cmd) {
771  case SIOCGMIIPHY:
772  case SIOCGMIIREG:
773  case SIOCSMIIREG:
774  default:
775  return -EOPNOTSUPP;
776  }
777 }
778 
779 static void meth_set_rx_mode(struct net_device *dev)
780 {
781  struct meth_private *priv = netdev_priv(dev);
782  unsigned long flags;
783 
784  netif_stop_queue(dev);
785  spin_lock_irqsave(&priv->meth_lock, flags);
786  priv->mac_ctrl &= ~METH_PROMISC;
787 
788  if (dev->flags & IFF_PROMISC) {
789  priv->mac_ctrl |= METH_PROMISC;
790  priv->mcast_filter = 0xffffffffffffffffUL;
791  } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) ||
792  (dev->flags & IFF_ALLMULTI)) {
793  priv->mac_ctrl |= METH_ACCEPT_AMCAST;
794  priv->mcast_filter = 0xffffffffffffffffUL;
795  } else {
796  struct netdev_hw_addr *ha;
797  priv->mac_ctrl |= METH_ACCEPT_MCAST;
798 
799  netdev_for_each_mc_addr(ha, dev)
800  set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26),
801  (volatile unsigned long *)&priv->mcast_filter);
802  }
803 
804  /* Write the changes to the chip registers. */
805  mace->eth.mac_ctrl = priv->mac_ctrl;
806  mace->eth.mcast_filter = priv->mcast_filter;
807 
808  /* Done! */
809  spin_unlock_irqrestore(&priv->meth_lock, flags);
810  netif_wake_queue(dev);
811 }
812 
813 static const struct net_device_ops meth_netdev_ops = {
814  .ndo_open = meth_open,
815  .ndo_stop = meth_release,
816  .ndo_start_xmit = meth_tx,
817  .ndo_do_ioctl = meth_ioctl,
818  .ndo_tx_timeout = meth_tx_timeout,
819  .ndo_change_mtu = eth_change_mtu,
820  .ndo_validate_addr = eth_validate_addr,
821  .ndo_set_mac_address = eth_mac_addr,
822  .ndo_set_rx_mode = meth_set_rx_mode,
823 };
824 
825 /*
826  * The init function.
827  */
828 static int __devinit meth_probe(struct platform_device *pdev)
829 {
830  struct net_device *dev;
831  struct meth_private *priv;
832  int err;
833 
834  dev = alloc_etherdev(sizeof(struct meth_private));
835  if (!dev)
836  return -ENOMEM;
837 
838  dev->netdev_ops = &meth_netdev_ops;
839  dev->watchdog_timeo = timeout;
840  dev->irq = MACE_ETHERNET_IRQ;
841  dev->base_addr = (unsigned long)&mace->eth;
842  memcpy(dev->dev_addr, o2meth_eaddr, 6);
843 
844  priv = netdev_priv(dev);
845  spin_lock_init(&priv->meth_lock);
846  SET_NETDEV_DEV(dev, &pdev->dev);
847 
848  err = register_netdev(dev);
849  if (err) {
850  free_netdev(dev);
851  return err;
852  }
853 
854  printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n",
855  dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29));
856  return 0;
857 }
858 
859 static int __exit meth_remove(struct platform_device *pdev)
860 {
861  struct net_device *dev = platform_get_drvdata(pdev);
862 
863  unregister_netdev(dev);
864  free_netdev(dev);
865  platform_set_drvdata(pdev, NULL);
866 
867  return 0;
868 }
869 
870 static struct platform_driver meth_driver = {
871  .probe = meth_probe,
872  .remove = __exit_p(meth_remove),
873  .driver = {
874  .name = "meth",
875  .owner = THIS_MODULE,
876  }
877 };
878 
879 module_platform_driver(meth_driver);
880 
881 MODULE_AUTHOR("Ilya Volynets <[email protected]>");
882 MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
883 MODULE_LICENSE("GPL");
884 MODULE_ALIAS("platform:meth");