Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
w90p910_ether.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2008-2009 Nuvoton technology corporation.
3  *
4  * Wan ZongShun <[email protected]>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation;version 2 of the License.
9  *
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/mii.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/ethtool.h>
19 #include <linux/platform_device.h>
20 #include <linux/clk.h>
21 #include <linux/gfp.h>
22 
23 #define DRV_MODULE_NAME "w90p910-emc"
24 #define DRV_MODULE_VERSION "0.1"
25 
26 /* Ethernet MAC Registers */
27 #define REG_CAMCMR 0x00
28 #define REG_CAMEN 0x04
29 #define REG_CAMM_BASE 0x08
30 #define REG_CAML_BASE 0x0c
31 #define REG_TXDLSA 0x88
32 #define REG_RXDLSA 0x8C
33 #define REG_MCMDR 0x90
34 #define REG_MIID 0x94
35 #define REG_MIIDA 0x98
36 #define REG_FFTCR 0x9C
37 #define REG_TSDR 0xa0
38 #define REG_RSDR 0xa4
39 #define REG_DMARFC 0xa8
40 #define REG_MIEN 0xac
41 #define REG_MISTA 0xb0
42 #define REG_CTXDSA 0xcc
43 #define REG_CTXBSA 0xd0
44 #define REG_CRXDSA 0xd4
45 #define REG_CRXBSA 0xd8
46 
47 /* mac controller bit */
48 #define MCMDR_RXON 0x01
49 #define MCMDR_ACP (0x01 << 3)
50 #define MCMDR_SPCRC (0x01 << 5)
51 #define MCMDR_TXON (0x01 << 8)
52 #define MCMDR_FDUP (0x01 << 18)
53 #define MCMDR_ENMDC (0x01 << 19)
54 #define MCMDR_OPMOD (0x01 << 20)
55 #define SWR (0x01 << 24)
56 
57 /* cam command regiser */
58 #define CAMCMR_AUP 0x01
59 #define CAMCMR_AMP (0x01 << 1)
60 #define CAMCMR_ABP (0x01 << 2)
61 #define CAMCMR_CCAM (0x01 << 3)
62 #define CAMCMR_ECMP (0x01 << 4)
63 #define CAM0EN 0x01
64 
65 /* mac mii controller bit */
66 #define MDCCR (0x0a << 20)
67 #define PHYAD (0x01 << 8)
68 #define PHYWR (0x01 << 16)
69 #define PHYBUSY (0x01 << 17)
70 #define PHYPRESP (0x01 << 18)
71 #define CAM_ENTRY_SIZE 0x08
72 
73 /* rx and tx status */
74 #define TXDS_TXCP (0x01 << 19)
75 #define RXDS_CRCE (0x01 << 17)
76 #define RXDS_PTLE (0x01 << 19)
77 #define RXDS_RXGD (0x01 << 20)
78 #define RXDS_ALIE (0x01 << 21)
79 #define RXDS_RP (0x01 << 22)
80 
81 /* mac interrupt status*/
82 #define MISTA_EXDEF (0x01 << 19)
83 #define MISTA_TXBERR (0x01 << 24)
84 #define MISTA_TDU (0x01 << 23)
85 #define MISTA_RDU (0x01 << 10)
86 #define MISTA_RXBERR (0x01 << 11)
87 
88 #define ENSTART 0x01
89 #define ENRXINTR 0x01
90 #define ENRXGD (0x01 << 4)
91 #define ENRXBERR (0x01 << 11)
92 #define ENTXINTR (0x01 << 16)
93 #define ENTXCP (0x01 << 18)
94 #define ENTXABT (0x01 << 21)
95 #define ENTXBERR (0x01 << 24)
96 #define ENMDC (0x01 << 19)
97 #define PHYBUSY (0x01 << 17)
98 #define MDCCR_VAL 0xa00000
99 
100 /* rx and tx owner bit */
101 #define RX_OWEN_DMA (0x01 << 31)
102 #define RX_OWEN_CPU (~(0x03 << 30))
103 #define TX_OWEN_DMA (0x01 << 31)
104 #define TX_OWEN_CPU (~(0x01 << 31))
105 
106 /* tx frame desc controller bit */
107 #define MACTXINTEN 0x04
108 #define CRCMODE 0x02
109 #define PADDINGMODE 0x01
110 
111 /* fftcr controller bit */
112 #define TXTHD (0x03 << 8)
113 #define BLENGTH (0x01 << 20)
114 
115 /* global setting for driver */
116 #define RX_DESC_SIZE 50
117 #define TX_DESC_SIZE 10
118 #define MAX_RBUFF_SZ 0x600
119 #define MAX_TBUFF_SZ 0x600
120 #define TX_TIMEOUT (HZ/2)
121 #define DELAY 1000
122 #define CAM0 0x0
123 
124 static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg);
125 
126 struct w90p910_rxbd {
127  unsigned int sl;
128  unsigned int buffer;
129  unsigned int reserved;
130  unsigned int next;
131 };
132 
133 struct w90p910_txbd {
134  unsigned int mode;
135  unsigned int buffer;
136  unsigned int sl;
137  unsigned int next;
138 };
139 
140 struct recv_pdesc {
143 };
144 
145 struct tran_pdesc {
148 };
149 
151  struct recv_pdesc *rdesc;
152  struct tran_pdesc *tdesc;
157  struct resource *res;
158  struct sk_buff *skb;
159  struct clk *clk;
160  struct clk *rmiiclk;
161  struct mii_if_info mii;
163  void __iomem *reg;
164  int rxirq;
165  int txirq;
166  unsigned int cur_tx;
167  unsigned int cur_rx;
168  unsigned int finish_tx;
169  unsigned int rx_packets;
170  unsigned int rx_bytes;
171  unsigned int start_tx_ptr;
172  unsigned int start_rx_ptr;
173  unsigned int linkflag;
174 };
175 
176 static void update_linkspeed_register(struct net_device *dev,
177  unsigned int speed, unsigned int duplex)
178 {
179  struct w90p910_ether *ether = netdev_priv(dev);
180  unsigned int val;
181 
182  val = __raw_readl(ether->reg + REG_MCMDR);
183 
184  if (speed == SPEED_100) {
185  /* 100 full/half duplex */
186  if (duplex == DUPLEX_FULL) {
187  val |= (MCMDR_OPMOD | MCMDR_FDUP);
188  } else {
189  val |= MCMDR_OPMOD;
190  val &= ~MCMDR_FDUP;
191  }
192  } else {
193  /* 10 full/half duplex */
194  if (duplex == DUPLEX_FULL) {
195  val |= MCMDR_FDUP;
196  val &= ~MCMDR_OPMOD;
197  } else {
198  val &= ~(MCMDR_FDUP | MCMDR_OPMOD);
199  }
200  }
201 
202  __raw_writel(val, ether->reg + REG_MCMDR);
203 }
204 
205 static void update_linkspeed(struct net_device *dev)
206 {
207  struct w90p910_ether *ether = netdev_priv(dev);
208  struct platform_device *pdev;
209  unsigned int bmsr, bmcr, lpa, speed, duplex;
210 
211  pdev = ether->pdev;
212 
213  if (!mii_link_ok(&ether->mii)) {
214  ether->linkflag = 0x0;
215  netif_carrier_off(dev);
216  dev_warn(&pdev->dev, "%s: Link down.\n", dev->name);
217  return;
218  }
219 
220  if (ether->linkflag == 1)
221  return;
222 
223  bmsr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMSR);
224  bmcr = w90p910_mdio_read(dev, ether->mii.phy_id, MII_BMCR);
225 
226  if (bmcr & BMCR_ANENABLE) {
227  if (!(bmsr & BMSR_ANEGCOMPLETE))
228  return;
229 
230  lpa = w90p910_mdio_read(dev, ether->mii.phy_id, MII_LPA);
231 
232  if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF))
233  speed = SPEED_100;
234  else
235  speed = SPEED_10;
236 
237  if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL))
238  duplex = DUPLEX_FULL;
239  else
240  duplex = DUPLEX_HALF;
241 
242  } else {
243  speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
244  duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
245  }
246 
247  update_linkspeed_register(dev, speed, duplex);
248 
249  dev_info(&pdev->dev, "%s: Link now %i-%s\n", dev->name, speed,
250  (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
251  ether->linkflag = 0x01;
252 
253  netif_carrier_on(dev);
254 }
255 
256 static void w90p910_check_link(unsigned long dev_id)
257 {
258  struct net_device *dev = (struct net_device *) dev_id;
259  struct w90p910_ether *ether = netdev_priv(dev);
260 
261  update_linkspeed(dev);
262  mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
263 }
264 
265 static void w90p910_write_cam(struct net_device *dev,
266  unsigned int x, unsigned char *pval)
267 {
268  struct w90p910_ether *ether = netdev_priv(dev);
269  unsigned int msw, lsw;
270 
271  msw = (pval[0] << 24) | (pval[1] << 16) | (pval[2] << 8) | pval[3];
272 
273  lsw = (pval[4] << 24) | (pval[5] << 16);
274 
275  __raw_writel(lsw, ether->reg + REG_CAML_BASE + x * CAM_ENTRY_SIZE);
276  __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
277 }
278 
279 static int w90p910_init_desc(struct net_device *dev)
280 {
281  struct w90p910_ether *ether;
282  struct w90p910_txbd *tdesc;
283  struct w90p910_rxbd *rdesc;
284  struct platform_device *pdev;
285  unsigned int i;
286 
287  ether = netdev_priv(dev);
288  pdev = ether->pdev;
289 
290  ether->tdesc = (struct tran_pdesc *)
291  dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
292  &ether->tdesc_phys, GFP_KERNEL);
293 
294  if (!ether->tdesc) {
295  dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
296  return -ENOMEM;
297  }
298 
299  ether->rdesc = (struct recv_pdesc *)
300  dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
301  &ether->rdesc_phys, GFP_KERNEL);
302 
303  if (!ether->rdesc) {
304  dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
305  dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
306  ether->tdesc, ether->tdesc_phys);
307  return -ENOMEM;
308  }
309 
310  for (i = 0; i < TX_DESC_SIZE; i++) {
311  unsigned int offset;
312 
313  tdesc = &(ether->tdesc->desclist[i]);
314 
315  if (i == TX_DESC_SIZE - 1)
316  offset = offsetof(struct tran_pdesc, desclist[0]);
317  else
318  offset = offsetof(struct tran_pdesc, desclist[i + 1]);
319 
320  tdesc->next = ether->tdesc_phys + offset;
321  tdesc->buffer = ether->tdesc_phys +
322  offsetof(struct tran_pdesc, tran_buf[i]);
323  tdesc->sl = 0;
324  tdesc->mode = 0;
325  }
326 
327  ether->start_tx_ptr = ether->tdesc_phys;
328 
329  for (i = 0; i < RX_DESC_SIZE; i++) {
330  unsigned int offset;
331 
332  rdesc = &(ether->rdesc->desclist[i]);
333 
334  if (i == RX_DESC_SIZE - 1)
335  offset = offsetof(struct recv_pdesc, desclist[0]);
336  else
337  offset = offsetof(struct recv_pdesc, desclist[i + 1]);
338 
339  rdesc->next = ether->rdesc_phys + offset;
340  rdesc->sl = RX_OWEN_DMA;
341  rdesc->buffer = ether->rdesc_phys +
342  offsetof(struct recv_pdesc, recv_buf[i]);
343  }
344 
345  ether->start_rx_ptr = ether->rdesc_phys;
346 
347  return 0;
348 }
349 
350 static void w90p910_set_fifo_threshold(struct net_device *dev)
351 {
352  struct w90p910_ether *ether = netdev_priv(dev);
353  unsigned int val;
354 
355  val = TXTHD | BLENGTH;
356  __raw_writel(val, ether->reg + REG_FFTCR);
357 }
358 
359 static void w90p910_return_default_idle(struct net_device *dev)
360 {
361  struct w90p910_ether *ether = netdev_priv(dev);
362  unsigned int val;
363 
364  val = __raw_readl(ether->reg + REG_MCMDR);
365  val |= SWR;
366  __raw_writel(val, ether->reg + REG_MCMDR);
367 }
368 
369 static void w90p910_trigger_rx(struct net_device *dev)
370 {
371  struct w90p910_ether *ether = netdev_priv(dev);
372 
373  __raw_writel(ENSTART, ether->reg + REG_RSDR);
374 }
375 
376 static void w90p910_trigger_tx(struct net_device *dev)
377 {
378  struct w90p910_ether *ether = netdev_priv(dev);
379 
380  __raw_writel(ENSTART, ether->reg + REG_TSDR);
381 }
382 
383 static void w90p910_enable_mac_interrupt(struct net_device *dev)
384 {
385  struct w90p910_ether *ether = netdev_priv(dev);
386  unsigned int val;
387 
388  val = ENTXINTR | ENRXINTR | ENRXGD | ENTXCP;
389  val |= ENTXBERR | ENRXBERR | ENTXABT;
390 
391  __raw_writel(val, ether->reg + REG_MIEN);
392 }
393 
394 static void w90p910_get_and_clear_int(struct net_device *dev,
395  unsigned int *val)
396 {
397  struct w90p910_ether *ether = netdev_priv(dev);
398 
399  *val = __raw_readl(ether->reg + REG_MISTA);
400  __raw_writel(*val, ether->reg + REG_MISTA);
401 }
402 
403 static void w90p910_set_global_maccmd(struct net_device *dev)
404 {
405  struct w90p910_ether *ether = netdev_priv(dev);
406  unsigned int val;
407 
408  val = __raw_readl(ether->reg + REG_MCMDR);
409  val |= MCMDR_SPCRC | MCMDR_ENMDC | MCMDR_ACP | ENMDC;
410  __raw_writel(val, ether->reg + REG_MCMDR);
411 }
412 
413 static void w90p910_enable_cam(struct net_device *dev)
414 {
415  struct w90p910_ether *ether = netdev_priv(dev);
416  unsigned int val;
417 
418  w90p910_write_cam(dev, CAM0, dev->dev_addr);
419 
420  val = __raw_readl(ether->reg + REG_CAMEN);
421  val |= CAM0EN;
422  __raw_writel(val, ether->reg + REG_CAMEN);
423 }
424 
425 static void w90p910_enable_cam_command(struct net_device *dev)
426 {
427  struct w90p910_ether *ether = netdev_priv(dev);
428  unsigned int val;
429 
431  __raw_writel(val, ether->reg + REG_CAMCMR);
432 }
433 
434 static void w90p910_enable_tx(struct net_device *dev, unsigned int enable)
435 {
436  struct w90p910_ether *ether = netdev_priv(dev);
437  unsigned int val;
438 
439  val = __raw_readl(ether->reg + REG_MCMDR);
440 
441  if (enable)
442  val |= MCMDR_TXON;
443  else
444  val &= ~MCMDR_TXON;
445 
446  __raw_writel(val, ether->reg + REG_MCMDR);
447 }
448 
449 static void w90p910_enable_rx(struct net_device *dev, unsigned int enable)
450 {
451  struct w90p910_ether *ether = netdev_priv(dev);
452  unsigned int val;
453 
454  val = __raw_readl(ether->reg + REG_MCMDR);
455 
456  if (enable)
457  val |= MCMDR_RXON;
458  else
459  val &= ~MCMDR_RXON;
460 
461  __raw_writel(val, ether->reg + REG_MCMDR);
462 }
463 
464 static void w90p910_set_curdest(struct net_device *dev)
465 {
466  struct w90p910_ether *ether = netdev_priv(dev);
467 
468  __raw_writel(ether->start_rx_ptr, ether->reg + REG_RXDLSA);
469  __raw_writel(ether->start_tx_ptr, ether->reg + REG_TXDLSA);
470 }
471 
472 static void w90p910_reset_mac(struct net_device *dev)
473 {
474  struct w90p910_ether *ether = netdev_priv(dev);
475 
476  w90p910_enable_tx(dev, 0);
477  w90p910_enable_rx(dev, 0);
478  w90p910_set_fifo_threshold(dev);
479  w90p910_return_default_idle(dev);
480 
481  if (!netif_queue_stopped(dev))
482  netif_stop_queue(dev);
483 
484  w90p910_init_desc(dev);
485 
486  dev->trans_start = jiffies; /* prevent tx timeout */
487  ether->cur_tx = 0x0;
488  ether->finish_tx = 0x0;
489  ether->cur_rx = 0x0;
490 
491  w90p910_set_curdest(dev);
492  w90p910_enable_cam(dev);
493  w90p910_enable_cam_command(dev);
494  w90p910_enable_mac_interrupt(dev);
495  w90p910_enable_tx(dev, 1);
496  w90p910_enable_rx(dev, 1);
497  w90p910_trigger_tx(dev);
498  w90p910_trigger_rx(dev);
499 
500  dev->trans_start = jiffies; /* prevent tx timeout */
501 
502  if (netif_queue_stopped(dev))
503  netif_wake_queue(dev);
504 }
505 
506 static void w90p910_mdio_write(struct net_device *dev,
507  int phy_id, int reg, int data)
508 {
509  struct w90p910_ether *ether = netdev_priv(dev);
510  struct platform_device *pdev;
511  unsigned int val, i;
512 
513  pdev = ether->pdev;
514 
515  __raw_writel(data, ether->reg + REG_MIID);
516 
517  val = (phy_id << 0x08) | reg;
518  val |= PHYBUSY | PHYWR | MDCCR_VAL;
519  __raw_writel(val, ether->reg + REG_MIIDA);
520 
521  for (i = 0; i < DELAY; i++) {
522  if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
523  break;
524  }
525 
526  if (i == DELAY)
527  dev_warn(&pdev->dev, "mdio write timed out\n");
528 }
529 
530 static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
531 {
532  struct w90p910_ether *ether = netdev_priv(dev);
533  struct platform_device *pdev;
534  unsigned int val, i, data;
535 
536  pdev = ether->pdev;
537 
538  val = (phy_id << 0x08) | reg;
539  val |= PHYBUSY | MDCCR_VAL;
540  __raw_writel(val, ether->reg + REG_MIIDA);
541 
542  for (i = 0; i < DELAY; i++) {
543  if ((__raw_readl(ether->reg + REG_MIIDA) & PHYBUSY) == 0)
544  break;
545  }
546 
547  if (i == DELAY) {
548  dev_warn(&pdev->dev, "mdio read timed out\n");
549  data = 0xffff;
550  } else {
551  data = __raw_readl(ether->reg + REG_MIID);
552  }
553 
554  return data;
555 }
556 
557 static int w90p910_set_mac_address(struct net_device *dev, void *addr)
558 {
559  struct sockaddr *address = addr;
560 
561  if (!is_valid_ether_addr(address->sa_data))
562  return -EADDRNOTAVAIL;
563 
564  memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
565  w90p910_write_cam(dev, CAM0, dev->dev_addr);
566 
567  return 0;
568 }
569 
570 static int w90p910_ether_close(struct net_device *dev)
571 {
572  struct w90p910_ether *ether = netdev_priv(dev);
573  struct platform_device *pdev;
574 
575  pdev = ether->pdev;
576 
577  dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc),
578  ether->rdesc, ether->rdesc_phys);
579  dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
580  ether->tdesc, ether->tdesc_phys);
581 
582  netif_stop_queue(dev);
583 
584  del_timer_sync(&ether->check_timer);
585  clk_disable(ether->rmiiclk);
586  clk_disable(ether->clk);
587 
588  free_irq(ether->txirq, dev);
589  free_irq(ether->rxirq, dev);
590 
591  return 0;
592 }
593 
594 static struct net_device_stats *w90p910_ether_stats(struct net_device *dev)
595 {
596  struct w90p910_ether *ether;
597 
598  ether = netdev_priv(dev);
599 
600  return &ether->stats;
601 }
602 
603 static int w90p910_send_frame(struct net_device *dev,
604  unsigned char *data, int length)
605 {
606  struct w90p910_ether *ether;
607  struct w90p910_txbd *txbd;
608  struct platform_device *pdev;
609  unsigned char *buffer;
610 
611  ether = netdev_priv(dev);
612  pdev = ether->pdev;
613 
614  txbd = &ether->tdesc->desclist[ether->cur_tx];
615  buffer = ether->tdesc->tran_buf[ether->cur_tx];
616 
617  if (length > 1514) {
618  dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
619  length = 1514;
620  }
621 
622  txbd->sl = length & 0xFFFF;
623 
624  memcpy(buffer, data, length);
625 
627 
628  w90p910_enable_tx(dev, 1);
629 
630  w90p910_trigger_tx(dev);
631 
632  if (++ether->cur_tx >= TX_DESC_SIZE)
633  ether->cur_tx = 0;
634 
635  txbd = &ether->tdesc->desclist[ether->cur_tx];
636 
637  if (txbd->mode & TX_OWEN_DMA)
638  netif_stop_queue(dev);
639 
640  return 0;
641 }
642 
643 static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
644 {
645  struct w90p910_ether *ether = netdev_priv(dev);
646 
647  if (!(w90p910_send_frame(dev, skb->data, skb->len))) {
648  ether->skb = skb;
649  dev_kfree_skb_irq(skb);
650  return 0;
651  }
652  return -EAGAIN;
653 }
654 
655 static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
656 {
657  struct w90p910_ether *ether;
658  struct w90p910_txbd *txbd;
659  struct platform_device *pdev;
660  struct net_device *dev;
661  unsigned int cur_entry, entry, status;
662 
663  dev = dev_id;
664  ether = netdev_priv(dev);
665  pdev = ether->pdev;
666 
667  w90p910_get_and_clear_int(dev, &status);
668 
669  cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
670 
671  entry = ether->tdesc_phys +
672  offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
673 
674  while (entry != cur_entry) {
675  txbd = &ether->tdesc->desclist[ether->finish_tx];
676 
677  if (++ether->finish_tx >= TX_DESC_SIZE)
678  ether->finish_tx = 0;
679 
680  if (txbd->sl & TXDS_TXCP) {
681  ether->stats.tx_packets++;
682  ether->stats.tx_bytes += txbd->sl & 0xFFFF;
683  } else {
684  ether->stats.tx_errors++;
685  }
686 
687  txbd->sl = 0x0;
688  txbd->mode = 0x0;
689 
690  if (netif_queue_stopped(dev))
691  netif_wake_queue(dev);
692 
693  entry = ether->tdesc_phys +
694  offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
695  }
696 
697  if (status & MISTA_EXDEF) {
698  dev_err(&pdev->dev, "emc defer exceed interrupt\n");
699  } else if (status & MISTA_TXBERR) {
700  dev_err(&pdev->dev, "emc bus error interrupt\n");
701  w90p910_reset_mac(dev);
702  } else if (status & MISTA_TDU) {
703  if (netif_queue_stopped(dev))
704  netif_wake_queue(dev);
705  }
706 
707  return IRQ_HANDLED;
708 }
709 
710 static void netdev_rx(struct net_device *dev)
711 {
712  struct w90p910_ether *ether;
713  struct w90p910_rxbd *rxbd;
714  struct platform_device *pdev;
715  struct sk_buff *skb;
716  unsigned char *data;
717  unsigned int length, status, val, entry;
718 
719  ether = netdev_priv(dev);
720  pdev = ether->pdev;
721 
722  rxbd = &ether->rdesc->desclist[ether->cur_rx];
723 
724  do {
725  val = __raw_readl(ether->reg + REG_CRXDSA);
726 
727  entry = ether->rdesc_phys +
728  offsetof(struct recv_pdesc, desclist[ether->cur_rx]);
729 
730  if (val == entry)
731  break;
732 
733  status = rxbd->sl;
734  length = status & 0xFFFF;
735 
736  if (status & RXDS_RXGD) {
737  data = ether->rdesc->recv_buf[ether->cur_rx];
738  skb = netdev_alloc_skb(dev, length + 2);
739  if (!skb) {
740  dev_err(&pdev->dev, "get skb buffer error\n");
741  ether->stats.rx_dropped++;
742  return;
743  }
744 
745  skb_reserve(skb, 2);
746  skb_put(skb, length);
747  skb_copy_to_linear_data(skb, data, length);
748  skb->protocol = eth_type_trans(skb, dev);
749  ether->stats.rx_packets++;
750  ether->stats.rx_bytes += length;
751  netif_rx(skb);
752  } else {
753  ether->stats.rx_errors++;
754 
755  if (status & RXDS_RP) {
756  dev_err(&pdev->dev, "rx runt err\n");
757  ether->stats.rx_length_errors++;
758  } else if (status & RXDS_CRCE) {
759  dev_err(&pdev->dev, "rx crc err\n");
760  ether->stats.rx_crc_errors++;
761  } else if (status & RXDS_ALIE) {
762  dev_err(&pdev->dev, "rx aligment err\n");
763  ether->stats.rx_frame_errors++;
764  } else if (status & RXDS_PTLE) {
765  dev_err(&pdev->dev, "rx longer err\n");
766  ether->stats.rx_over_errors++;
767  }
768  }
769 
770  rxbd->sl = RX_OWEN_DMA;
771  rxbd->reserved = 0x0;
772 
773  if (++ether->cur_rx >= RX_DESC_SIZE)
774  ether->cur_rx = 0;
775 
776  rxbd = &ether->rdesc->desclist[ether->cur_rx];
777 
778  } while (1);
779 }
780 
781 static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
782 {
783  struct net_device *dev;
784  struct w90p910_ether *ether;
785  struct platform_device *pdev;
786  unsigned int status;
787 
788  dev = dev_id;
789  ether = netdev_priv(dev);
790  pdev = ether->pdev;
791 
792  w90p910_get_and_clear_int(dev, &status);
793 
794  if (status & MISTA_RDU) {
795  netdev_rx(dev);
796  w90p910_trigger_rx(dev);
797 
798  return IRQ_HANDLED;
799  } else if (status & MISTA_RXBERR) {
800  dev_err(&pdev->dev, "emc rx bus error\n");
801  w90p910_reset_mac(dev);
802  }
803 
804  netdev_rx(dev);
805  return IRQ_HANDLED;
806 }
807 
808 static int w90p910_ether_open(struct net_device *dev)
809 {
810  struct w90p910_ether *ether;
811  struct platform_device *pdev;
812 
813  ether = netdev_priv(dev);
814  pdev = ether->pdev;
815 
816  w90p910_reset_mac(dev);
817  w90p910_set_fifo_threshold(dev);
818  w90p910_set_curdest(dev);
819  w90p910_enable_cam(dev);
820  w90p910_enable_cam_command(dev);
821  w90p910_enable_mac_interrupt(dev);
822  w90p910_set_global_maccmd(dev);
823  w90p910_enable_rx(dev, 1);
824 
825  clk_enable(ether->rmiiclk);
826  clk_enable(ether->clk);
827 
828  ether->rx_packets = 0x0;
829  ether->rx_bytes = 0x0;
830 
831  if (request_irq(ether->txirq, w90p910_tx_interrupt,
832  0x0, pdev->name, dev)) {
833  dev_err(&pdev->dev, "register irq tx failed\n");
834  return -EAGAIN;
835  }
836 
837  if (request_irq(ether->rxirq, w90p910_rx_interrupt,
838  0x0, pdev->name, dev)) {
839  dev_err(&pdev->dev, "register irq rx failed\n");
840  free_irq(ether->txirq, dev);
841  return -EAGAIN;
842  }
843 
844  mod_timer(&ether->check_timer, jiffies + msecs_to_jiffies(1000));
845  netif_start_queue(dev);
846  w90p910_trigger_rx(dev);
847 
848  dev_info(&pdev->dev, "%s is OPENED\n", dev->name);
849 
850  return 0;
851 }
852 
853 static void w90p910_ether_set_multicast_list(struct net_device *dev)
854 {
855  struct w90p910_ether *ether;
856  unsigned int rx_mode;
857 
858  ether = netdev_priv(dev);
859 
860  if (dev->flags & IFF_PROMISC)
862  else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
863  rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
864  else
865  rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
866  __raw_writel(rx_mode, ether->reg + REG_CAMCMR);
867 }
868 
869 static int w90p910_ether_ioctl(struct net_device *dev,
870  struct ifreq *ifr, int cmd)
871 {
872  struct w90p910_ether *ether = netdev_priv(dev);
873  struct mii_ioctl_data *data = if_mii(ifr);
874 
875  return generic_mii_ioctl(&ether->mii, data, cmd, NULL);
876 }
877 
878 static void w90p910_get_drvinfo(struct net_device *dev,
879  struct ethtool_drvinfo *info)
880 {
881  strcpy(info->driver, DRV_MODULE_NAME);
883 }
884 
885 static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
886 {
887  struct w90p910_ether *ether = netdev_priv(dev);
888  return mii_ethtool_gset(&ether->mii, cmd);
889 }
890 
891 static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
892 {
893  struct w90p910_ether *ether = netdev_priv(dev);
894  return mii_ethtool_sset(&ether->mii, cmd);
895 }
896 
897 static int w90p910_nway_reset(struct net_device *dev)
898 {
899  struct w90p910_ether *ether = netdev_priv(dev);
900  return mii_nway_restart(&ether->mii);
901 }
902 
903 static u32 w90p910_get_link(struct net_device *dev)
904 {
905  struct w90p910_ether *ether = netdev_priv(dev);
906  return mii_link_ok(&ether->mii);
907 }
908 
909 static const struct ethtool_ops w90p910_ether_ethtool_ops = {
910  .get_settings = w90p910_get_settings,
911  .set_settings = w90p910_set_settings,
912  .get_drvinfo = w90p910_get_drvinfo,
913  .nway_reset = w90p910_nway_reset,
914  .get_link = w90p910_get_link,
915 };
916 
917 static const struct net_device_ops w90p910_ether_netdev_ops = {
918  .ndo_open = w90p910_ether_open,
919  .ndo_stop = w90p910_ether_close,
920  .ndo_start_xmit = w90p910_ether_start_xmit,
921  .ndo_get_stats = w90p910_ether_stats,
922  .ndo_set_rx_mode = w90p910_ether_set_multicast_list,
923  .ndo_set_mac_address = w90p910_set_mac_address,
924  .ndo_do_ioctl = w90p910_ether_ioctl,
925  .ndo_validate_addr = eth_validate_addr,
926  .ndo_change_mtu = eth_change_mtu,
927 };
928 
929 static void __init get_mac_address(struct net_device *dev)
930 {
931  struct w90p910_ether *ether = netdev_priv(dev);
932  struct platform_device *pdev;
933  char addr[6];
934 
935  pdev = ether->pdev;
936 
937  addr[0] = 0x00;
938  addr[1] = 0x02;
939  addr[2] = 0xac;
940  addr[3] = 0x55;
941  addr[4] = 0x88;
942  addr[5] = 0xa8;
943 
944  if (is_valid_ether_addr(addr))
945  memcpy(dev->dev_addr, &addr, 0x06);
946  else
947  dev_err(&pdev->dev, "invalid mac address\n");
948 }
949 
950 static int w90p910_ether_setup(struct net_device *dev)
951 {
952  struct w90p910_ether *ether = netdev_priv(dev);
953 
954  ether_setup(dev);
955  dev->netdev_ops = &w90p910_ether_netdev_ops;
956  dev->ethtool_ops = &w90p910_ether_ethtool_ops;
957 
958  dev->tx_queue_len = 16;
959  dev->dma = 0x0;
960  dev->watchdog_timeo = TX_TIMEOUT;
961 
962  get_mac_address(dev);
963 
964  ether->cur_tx = 0x0;
965  ether->cur_rx = 0x0;
966  ether->finish_tx = 0x0;
967  ether->linkflag = 0x0;
968  ether->mii.phy_id = 0x01;
969  ether->mii.phy_id_mask = 0x1f;
970  ether->mii.reg_num_mask = 0x1f;
971  ether->mii.dev = dev;
972  ether->mii.mdio_read = w90p910_mdio_read;
973  ether->mii.mdio_write = w90p910_mdio_write;
974 
975  setup_timer(&ether->check_timer, w90p910_check_link,
976  (unsigned long)dev);
977 
978  return 0;
979 }
980 
981 static int __devinit w90p910_ether_probe(struct platform_device *pdev)
982 {
983  struct w90p910_ether *ether;
984  struct net_device *dev;
985  int error;
986 
987  dev = alloc_etherdev(sizeof(struct w90p910_ether));
988  if (!dev)
989  return -ENOMEM;
990 
991  ether = netdev_priv(dev);
992 
993  ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
994  if (ether->res == NULL) {
995  dev_err(&pdev->dev, "failed to get I/O memory\n");
996  error = -ENXIO;
997  goto failed_free;
998  }
999 
1000  if (!request_mem_region(ether->res->start,
1001  resource_size(ether->res), pdev->name)) {
1002  dev_err(&pdev->dev, "failed to request I/O memory\n");
1003  error = -EBUSY;
1004  goto failed_free;
1005  }
1006 
1007  ether->reg = ioremap(ether->res->start, resource_size(ether->res));
1008  if (ether->reg == NULL) {
1009  dev_err(&pdev->dev, "failed to remap I/O memory\n");
1010  error = -ENXIO;
1011  goto failed_free_mem;
1012  }
1013 
1014  ether->txirq = platform_get_irq(pdev, 0);
1015  if (ether->txirq < 0) {
1016  dev_err(&pdev->dev, "failed to get ether tx irq\n");
1017  error = -ENXIO;
1018  goto failed_free_io;
1019  }
1020 
1021  ether->rxirq = platform_get_irq(pdev, 1);
1022  if (ether->rxirq < 0) {
1023  dev_err(&pdev->dev, "failed to get ether rx irq\n");
1024  error = -ENXIO;
1025  goto failed_free_txirq;
1026  }
1027 
1028  platform_set_drvdata(pdev, dev);
1029 
1030  ether->clk = clk_get(&pdev->dev, NULL);
1031  if (IS_ERR(ether->clk)) {
1032  dev_err(&pdev->dev, "failed to get ether clock\n");
1033  error = PTR_ERR(ether->clk);
1034  goto failed_free_rxirq;
1035  }
1036 
1037  ether->rmiiclk = clk_get(&pdev->dev, "RMII");
1038  if (IS_ERR(ether->rmiiclk)) {
1039  dev_err(&pdev->dev, "failed to get ether clock\n");
1040  error = PTR_ERR(ether->rmiiclk);
1041  goto failed_put_clk;
1042  }
1043 
1044  ether->pdev = pdev;
1045 
1046  w90p910_ether_setup(dev);
1047 
1048  error = register_netdev(dev);
1049  if (error != 0) {
1050  dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n");
1051  error = -ENODEV;
1052  goto failed_put_rmiiclk;
1053  }
1054 
1055  return 0;
1056 failed_put_rmiiclk:
1057  clk_put(ether->rmiiclk);
1058 failed_put_clk:
1059  clk_put(ether->clk);
1060 failed_free_rxirq:
1061  free_irq(ether->rxirq, pdev);
1062  platform_set_drvdata(pdev, NULL);
1063 failed_free_txirq:
1064  free_irq(ether->txirq, pdev);
1065 failed_free_io:
1066  iounmap(ether->reg);
1067 failed_free_mem:
1068  release_mem_region(ether->res->start, resource_size(ether->res));
1069 failed_free:
1070  free_netdev(dev);
1071  return error;
1072 }
1073 
1074 static int __devexit w90p910_ether_remove(struct platform_device *pdev)
1075 {
1076  struct net_device *dev = platform_get_drvdata(pdev);
1077  struct w90p910_ether *ether = netdev_priv(dev);
1078 
1079  unregister_netdev(dev);
1080 
1081  clk_put(ether->rmiiclk);
1082  clk_put(ether->clk);
1083 
1084  iounmap(ether->reg);
1085  release_mem_region(ether->res->start, resource_size(ether->res));
1086 
1087  free_irq(ether->txirq, dev);
1088  free_irq(ether->rxirq, dev);
1089 
1090  del_timer_sync(&ether->check_timer);
1091  platform_set_drvdata(pdev, NULL);
1092 
1093  free_netdev(dev);
1094  return 0;
1095 }
1096 
1097 static struct platform_driver w90p910_ether_driver = {
1098  .probe = w90p910_ether_probe,
1099  .remove = __devexit_p(w90p910_ether_remove),
1100  .driver = {
1101  .name = "nuc900-emc",
1102  .owner = THIS_MODULE,
1103  },
1104 };
1105 
1106 module_platform_driver(w90p910_ether_driver);
1107 
1108 MODULE_AUTHOR("Wan ZongShun <[email protected]>");
1109 MODULE_DESCRIPTION("w90p910 MAC driver!");
1110 MODULE_LICENSE("GPL");
1111 MODULE_ALIAS("platform:nuc900-emc");
1112