Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
xilinx_axienet_main.c
Go to the documentation of this file.
1 /*
2  * Xilinx Axi Ethernet device driver
3  *
4  * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5  * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <[email protected]>
6  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
7  * Copyright (c) 2010 - 2011 Michal Simek <[email protected]>
8  * Copyright (c) 2010 - 2011 PetaLogix
9  * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
10  *
11  * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
12  * and Spartan6.
13  *
14  * TODO:
15  * - Add Axi Fifo support.
16  * - Factor out Axi DMA code into separate driver.
17  * - Test and fix basic multicast filtering.
18  * - Add support for extended multicast filtering.
19  * - Test basic VLAN support.
20  * - Add support for extended VLAN support.
21  */
22 
23 #include <linux/delay.h>
24 #include <linux/etherdevice.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/netdevice.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_platform.h>
30 #include <linux/of_address.h>
31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h>
33 #include <linux/phy.h>
34 #include <linux/mii.h>
35 #include <linux/ethtool.h>
36 
37 #include "xilinx_axienet.h"
38 
39 /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
40 #define TX_BD_NUM 64
41 #define RX_BD_NUM 128
42 
43 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
44 #define DRIVER_NAME "xaxienet"
45 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
46 #define DRIVER_VERSION "1.00a"
47 
48 #define AXIENET_REGS_N 32
49 
50 /* Match table for of_platform binding */
51 static struct of_device_id axienet_of_match[] __devinitdata = {
52  { .compatible = "xlnx,axi-ethernet-1.00.a", },
53  { .compatible = "xlnx,axi-ethernet-1.01.a", },
54  { .compatible = "xlnx,axi-ethernet-2.01.a", },
55  {},
56 };
57 
58 MODULE_DEVICE_TABLE(of, axienet_of_match);
59 
60 /* Option table for setting up Axi Ethernet hardware options */
61 static struct axienet_option axienet_options[] = {
62  /* Turn on jumbo packet support for both Rx and Tx */
63  {
64  .opt = XAE_OPTION_JUMBO,
65  .reg = XAE_TC_OFFSET,
66  .m_or = XAE_TC_JUM_MASK,
67  }, {
68  .opt = XAE_OPTION_JUMBO,
69  .reg = XAE_RCW1_OFFSET,
70  .m_or = XAE_RCW1_JUM_MASK,
71  }, { /* Turn on VLAN packet support for both Rx and Tx */
72  .opt = XAE_OPTION_VLAN,
73  .reg = XAE_TC_OFFSET,
74  .m_or = XAE_TC_VLAN_MASK,
75  }, {
76  .opt = XAE_OPTION_VLAN,
77  .reg = XAE_RCW1_OFFSET,
78  .m_or = XAE_RCW1_VLAN_MASK,
79  }, { /* Turn on FCS stripping on receive packets */
80  .opt = XAE_OPTION_FCS_STRIP,
81  .reg = XAE_RCW1_OFFSET,
82  .m_or = XAE_RCW1_FCS_MASK,
83  }, { /* Turn on FCS insertion on transmit packets */
84  .opt = XAE_OPTION_FCS_INSERT,
85  .reg = XAE_TC_OFFSET,
86  .m_or = XAE_TC_FCS_MASK,
87  }, { /* Turn off length/type field checking on receive packets */
89  .reg = XAE_RCW1_OFFSET,
90  .m_or = XAE_RCW1_LT_DIS_MASK,
91  }, { /* Turn on Rx flow control */
93  .reg = XAE_FCC_OFFSET,
94  .m_or = XAE_FCC_FCRX_MASK,
95  }, { /* Turn on Tx flow control */
97  .reg = XAE_FCC_OFFSET,
98  .m_or = XAE_FCC_FCTX_MASK,
99  }, { /* Turn on promiscuous frame filtering */
100  .opt = XAE_OPTION_PROMISC,
101  .reg = XAE_FMI_OFFSET,
102  .m_or = XAE_FMI_PM_MASK,
103  }, { /* Enable transmitter */
104  .opt = XAE_OPTION_TXEN,
105  .reg = XAE_TC_OFFSET,
106  .m_or = XAE_TC_TX_MASK,
107  }, { /* Enable receiver */
108  .opt = XAE_OPTION_RXEN,
109  .reg = XAE_RCW1_OFFSET,
110  .m_or = XAE_RCW1_RX_MASK,
111  },
112  {}
113 };
114 
124 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
125 {
126  return in_be32(lp->dma_regs + reg);
127 }
128 
138 static inline void axienet_dma_out32(struct axienet_local *lp,
139  off_t reg, u32 value)
140 {
141  out_be32((lp->dma_regs + reg), value);
142 }
143 
152 static void axienet_dma_bd_release(struct net_device *ndev)
153 {
154  int i;
155  struct axienet_local *lp = netdev_priv(ndev);
156 
157  for (i = 0; i < RX_BD_NUM; i++) {
158  dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
160  dev_kfree_skb((struct sk_buff *)
161  (lp->rx_bd_v[i].sw_id_offset));
162  }
163 
164  if (lp->rx_bd_v) {
165  dma_free_coherent(ndev->dev.parent,
166  sizeof(*lp->rx_bd_v) * RX_BD_NUM,
167  lp->rx_bd_v,
168  lp->rx_bd_p);
169  }
170  if (lp->tx_bd_v) {
171  dma_free_coherent(ndev->dev.parent,
172  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
173  lp->tx_bd_v,
174  lp->tx_bd_p);
175  }
176 }
177 
189 static int axienet_dma_bd_init(struct net_device *ndev)
190 {
191  u32 cr;
192  int i;
193  struct sk_buff *skb;
194  struct axienet_local *lp = netdev_priv(ndev);
195 
196  /* Reset the indexes which are used for accessing the BDs */
197  lp->tx_bd_ci = 0;
198  lp->tx_bd_tail = 0;
199  lp->rx_bd_ci = 0;
200 
201  /*
202  * Allocate the Tx and Rx buffer descriptors.
203  */
204  lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
205  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
206  &lp->tx_bd_p,
207  GFP_KERNEL);
208  if (!lp->tx_bd_v) {
209  dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
210  "descriptors");
211  goto out;
212  }
213 
214  lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
215  sizeof(*lp->rx_bd_v) * RX_BD_NUM,
216  &lp->rx_bd_p,
217  GFP_KERNEL);
218  if (!lp->rx_bd_v) {
219  dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
220  "descriptors");
221  goto out;
222  }
223 
224  memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
225  for (i = 0; i < TX_BD_NUM; i++) {
226  lp->tx_bd_v[i].next = lp->tx_bd_p +
227  sizeof(*lp->tx_bd_v) *
228  ((i + 1) % TX_BD_NUM);
229  }
230 
231  memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
232  for (i = 0; i < RX_BD_NUM; i++) {
233  lp->rx_bd_v[i].next = lp->rx_bd_p +
234  sizeof(*lp->rx_bd_v) *
235  ((i + 1) % RX_BD_NUM);
236 
237  skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
238  if (!skb) {
239  dev_err(&ndev->dev, "alloc_skb error %d\n", i);
240  goto out;
241  }
242 
243  lp->rx_bd_v[i].sw_id_offset = (u32) skb;
244  lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
245  skb->data,
246  lp->max_frm_size,
248  lp->rx_bd_v[i].cntrl = lp->max_frm_size;
249  }
250 
251  /* Start updating the Rx channel control register */
252  cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
253  /* Update the interrupt coalesce count */
254  cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
256  /* Update the delay timer count */
257  cr = ((cr & ~XAXIDMA_DELAY_MASK) |
259  /* Enable coalesce, delay timer and error interrupts */
260  cr |= XAXIDMA_IRQ_ALL_MASK;
261  /* Write to the Rx channel control register */
262  axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
263 
264  /* Start updating the Tx channel control register */
265  cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
266  /* Update the interrupt coalesce count */
267  cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
269  /* Update the delay timer count */
270  cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
272  /* Enable coalesce, delay timer and error interrupts */
273  cr |= XAXIDMA_IRQ_ALL_MASK;
274  /* Write to the Tx channel control register */
275  axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
276 
277  /* Populate the tail pointer and bring the Rx Axi DMA engine out of
278  * halted state. This will make the Rx side ready for reception.*/
279  axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
280  cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
281  axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
283  axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
284  (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
285 
286  /* Write to the RS (Run-stop) bit in the Tx channel control register.
287  * Tx channel is now ready to run. But only after we write to the
288  * tail pointer register that the Tx channel will start transmitting */
289  axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
290  cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
291  axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
293 
294  return 0;
295 out:
296  axienet_dma_bd_release(ndev);
297  return -ENOMEM;
298 }
299 
308 static void axienet_set_mac_address(struct net_device *ndev, void *address)
309 {
310  struct axienet_local *lp = netdev_priv(ndev);
311 
312  if (address)
313  memcpy(ndev->dev_addr, address, ETH_ALEN);
314  if (!is_valid_ether_addr(ndev->dev_addr))
315  eth_random_addr(ndev->dev_addr);
316 
317  /* Set up unicast MAC address filter set its mac address */
318  axienet_iow(lp, XAE_UAW0_OFFSET,
319  (ndev->dev_addr[0]) |
320  (ndev->dev_addr[1] << 8) |
321  (ndev->dev_addr[2] << 16) |
322  (ndev->dev_addr[3] << 24));
323  axienet_iow(lp, XAE_UAW1_OFFSET,
324  (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
326  (ndev->dev_addr[4] |
327  (ndev->dev_addr[5] << 8))));
328 }
329 
341 static int netdev_set_mac_address(struct net_device *ndev, void *p)
342 {
343  struct sockaddr *addr = p;
344  axienet_set_mac_address(ndev, addr->sa_data);
345  return 0;
346 }
347 
359 static void axienet_set_multicast_list(struct net_device *ndev)
360 {
361  int i;
362  u32 reg, af0reg, af1reg;
363  struct axienet_local *lp = netdev_priv(ndev);
364 
365  if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
367  /* We must make the kernel realize we had to move into
368  * promiscuous mode. If it was a promiscuous mode request
369  * the flag is already set. If not we set it. */
370  ndev->flags |= IFF_PROMISC;
371  reg = axienet_ior(lp, XAE_FMI_OFFSET);
372  reg |= XAE_FMI_PM_MASK;
373  axienet_iow(lp, XAE_FMI_OFFSET, reg);
374  dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
375  } else if (!netdev_mc_empty(ndev)) {
376  struct netdev_hw_addr *ha;
377 
378  i = 0;
379  netdev_for_each_mc_addr(ha, ndev) {
381  break;
382 
383  af0reg = (ha->addr[0]);
384  af0reg |= (ha->addr[1] << 8);
385  af0reg |= (ha->addr[2] << 16);
386  af0reg |= (ha->addr[3] << 24);
387 
388  af1reg = (ha->addr[4]);
389  af1reg |= (ha->addr[5] << 8);
390 
391  reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
392  reg |= i;
393 
394  axienet_iow(lp, XAE_FMI_OFFSET, reg);
395  axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
396  axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
397  i++;
398  }
399  } else {
400  reg = axienet_ior(lp, XAE_FMI_OFFSET);
401  reg &= ~XAE_FMI_PM_MASK;
402 
403  axienet_iow(lp, XAE_FMI_OFFSET, reg);
404 
405  for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
406  reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
407  reg |= i;
408 
409  axienet_iow(lp, XAE_FMI_OFFSET, reg);
410  axienet_iow(lp, XAE_AF0_OFFSET, 0);
411  axienet_iow(lp, XAE_AF1_OFFSET, 0);
412  }
413 
414  dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
415  }
416 }
417 
429 static void axienet_setoptions(struct net_device *ndev, u32 options)
430 {
431  int reg;
432  struct axienet_local *lp = netdev_priv(ndev);
433  struct axienet_option *tp = &axienet_options[0];
434 
435  while (tp->opt) {
436  reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
437  if (options & tp->opt)
438  reg |= tp->m_or;
439  axienet_iow(lp, tp->reg, reg);
440  tp++;
441  }
442 
443  lp->options |= options;
444 }
445 
446 static void __axienet_device_reset(struct axienet_local *lp,
447  struct device *dev, off_t offset)
448 {
449  u32 timeout;
450  /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
451  * process of Axi DMA takes a while to complete as all pending
452  * commands/transfers will be flushed or completed during this
453  * reset process. */
454  axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
455  timeout = DELAY_OF_ONE_MILLISEC;
456  while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
457  udelay(1);
458  if (--timeout == 0) {
459  dev_err(dev, "axienet_device_reset DMA "
460  "reset timeout!\n");
461  break;
462  }
463  }
464 }
465 
477 static void axienet_device_reset(struct net_device *ndev)
478 {
479  u32 axienet_status;
480  struct axienet_local *lp = netdev_priv(ndev);
481 
482  __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
483  __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
484 
486  lp->options &= (~XAE_OPTION_JUMBO);
487 
488  if ((ndev->mtu > XAE_MTU) &&
489  (ndev->mtu <= XAE_JUMBO_MTU) &&
490  (lp->jumbo_support)) {
491  lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
492  XAE_TRL_SIZE;
493  lp->options |= XAE_OPTION_JUMBO;
494  }
495 
496  if (axienet_dma_bd_init(ndev)) {
497  dev_err(&ndev->dev, "axienet_device_reset descriptor "
498  "allocation failed\n");
499  }
500 
501  axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
502  axienet_status &= ~XAE_RCW1_RX_MASK;
503  axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
504 
505  axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
506  if (axienet_status & XAE_INT_RXRJECT_MASK)
507  axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
508 
509  axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
510 
511  /* Sync default options with HW but leave receiver and
512  * transmitter disabled.*/
513  axienet_setoptions(ndev, lp->options &
515  axienet_set_mac_address(ndev, NULL);
516  axienet_set_multicast_list(ndev);
517  axienet_setoptions(ndev, lp->options);
518 
519  ndev->trans_start = jiffies;
520 }
521 
530 static void axienet_adjust_link(struct net_device *ndev)
531 {
532  u32 emmc_reg;
533  u32 link_state;
534  u32 setspeed = 1;
535  struct axienet_local *lp = netdev_priv(ndev);
536  struct phy_device *phy = lp->phy_dev;
537 
538  link_state = phy->speed | (phy->duplex << 1) | phy->link;
539  if (lp->last_link != link_state) {
540  if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
542  setspeed = 0;
543  } else {
544  if ((phy->speed == SPEED_1000) &&
545  (lp->phy_type == XAE_PHY_TYPE_MII))
546  setspeed = 0;
547  }
548 
549  if (setspeed == 1) {
550  emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
551  emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
552 
553  switch (phy->speed) {
554  case SPEED_1000:
555  emmc_reg |= XAE_EMMC_LINKSPD_1000;
556  break;
557  case SPEED_100:
558  emmc_reg |= XAE_EMMC_LINKSPD_100;
559  break;
560  case SPEED_10:
561  emmc_reg |= XAE_EMMC_LINKSPD_10;
562  break;
563  default:
564  dev_err(&ndev->dev, "Speed other than 10, 100 "
565  "or 1Gbps is not supported\n");
566  break;
567  }
568 
569  axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
570  lp->last_link = link_state;
571  phy_print_status(phy);
572  } else {
573  dev_err(&ndev->dev, "Error setting Axi Ethernet "
574  "mac speed\n");
575  }
576  }
577 }
578 
590 static void axienet_start_xmit_done(struct net_device *ndev)
591 {
592  u32 size = 0;
593  u32 packets = 0;
594  struct axienet_local *lp = netdev_priv(ndev);
595  struct axidma_bd *cur_p;
596  unsigned int status = 0;
597 
598  cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
599  status = cur_p->status;
600  while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
601  dma_unmap_single(ndev->dev.parent, cur_p->phys,
603  DMA_TO_DEVICE);
604  if (cur_p->app4)
605  dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
606  /*cur_p->phys = 0;*/
607  cur_p->app0 = 0;
608  cur_p->app1 = 0;
609  cur_p->app2 = 0;
610  cur_p->app4 = 0;
611  cur_p->status = 0;
612 
613  size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
614  packets++;
615 
616  lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
617  cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
618  status = cur_p->status;
619  }
620 
621  ndev->stats.tx_packets += packets;
622  ndev->stats.tx_bytes += size;
623  netif_wake_queue(ndev);
624 }
625 
639 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
640  int num_frag)
641 {
642  struct axidma_bd *cur_p;
643  cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
644  if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
645  return NETDEV_TX_BUSY;
646  return 0;
647 }
648 
662 static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
663 {
664  u32 ii;
665  u32 num_frag;
666  u32 csum_start_off;
667  u32 csum_index_off;
668  skb_frag_t *frag;
669  dma_addr_t tail_p;
670  struct axienet_local *lp = netdev_priv(ndev);
671  struct axidma_bd *cur_p;
672 
673  num_frag = skb_shinfo(skb)->nr_frags;
674  cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
675 
676  if (axienet_check_tx_bd_space(lp, num_frag)) {
677  if (!netif_queue_stopped(ndev))
678  netif_stop_queue(ndev);
679  return NETDEV_TX_BUSY;
680  }
681 
682  if (skb->ip_summed == CHECKSUM_PARTIAL) {
684  /* Tx Full Checksum Offload Enabled */
685  cur_p->app0 |= 2;
686  } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
687  csum_start_off = skb_transport_offset(skb);
688  csum_index_off = csum_start_off + skb->csum_offset;
689  /* Tx Partial Checksum Offload Enabled */
690  cur_p->app0 |= 1;
691  cur_p->app1 = (csum_start_off << 16) | csum_index_off;
692  }
693  } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
694  cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
695  }
696 
697  cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
698  cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
699  skb_headlen(skb), DMA_TO_DEVICE);
700 
701  for (ii = 0; ii < num_frag; ii++) {
702  lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
703  cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
704  frag = &skb_shinfo(skb)->frags[ii];
705  cur_p->phys = dma_map_single(ndev->dev.parent,
706  skb_frag_address(frag),
707  skb_frag_size(frag),
708  DMA_TO_DEVICE);
709  cur_p->cntrl = skb_frag_size(frag);
710  }
711 
713  cur_p->app4 = (unsigned long)skb;
714 
715  tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
716  /* Start the transfer */
717  axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
718  lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
719 
720  return NETDEV_TX_OK;
721 }
722 
732 static void axienet_recv(struct net_device *ndev)
733 {
734  u32 length;
735  u32 csumstatus;
736  u32 size = 0;
737  u32 packets = 0;
738  dma_addr_t tail_p;
739  struct axienet_local *lp = netdev_priv(ndev);
740  struct sk_buff *skb, *new_skb;
741  struct axidma_bd *cur_p;
742 
743  tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
744  cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
745 
746  while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
747  skb = (struct sk_buff *) (cur_p->sw_id_offset);
748  length = cur_p->app4 & 0x0000FFFF;
749 
750  dma_unmap_single(ndev->dev.parent, cur_p->phys,
751  lp->max_frm_size,
753 
754  skb_put(skb, length);
755  skb->protocol = eth_type_trans(skb, ndev);
756  /*skb_checksum_none_assert(skb);*/
757  skb->ip_summed = CHECKSUM_NONE;
758 
759  /* if we're doing Rx csum offload, set it up */
761  csumstatus = (cur_p->app2 &
763  if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
764  (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
766  }
767  } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
769  skb->len > 64) {
770  skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
772  }
773 
774  netif_rx(skb);
775 
776  size += length;
777  packets++;
778 
779  new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
780  if (!new_skb) {
781  dev_err(&ndev->dev, "no memory for new sk_buff\n");
782  return;
783  }
784  cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
785  lp->max_frm_size,
787  cur_p->cntrl = lp->max_frm_size;
788  cur_p->status = 0;
789  cur_p->sw_id_offset = (u32) new_skb;
790 
791  lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
792  cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
793  }
794 
795  ndev->stats.rx_packets += packets;
796  ndev->stats.rx_bytes += size;
797 
798  axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
799 }
800 
811 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
812 {
813  u32 cr;
814  unsigned int status;
815  struct net_device *ndev = _ndev;
816  struct axienet_local *lp = netdev_priv(ndev);
817 
818  status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
819  if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
820  axienet_start_xmit_done(lp->ndev);
821  goto out;
822  }
823  if (!(status & XAXIDMA_IRQ_ALL_MASK))
824  dev_err(&ndev->dev, "No interrupts asserted in Tx path");
825  if (status & XAXIDMA_IRQ_ERROR_MASK) {
826  dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
827  dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
828  (lp->tx_bd_v[lp->tx_bd_ci]).phys);
829 
830  cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
831  /* Disable coalesce, delay timer and error interrupts */
832  cr &= (~XAXIDMA_IRQ_ALL_MASK);
833  /* Write to the Tx channel control register */
834  axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
835 
836  cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
837  /* Disable coalesce, delay timer and error interrupts */
838  cr &= (~XAXIDMA_IRQ_ALL_MASK);
839  /* Write to the Rx channel control register */
840  axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
841 
842  tasklet_schedule(&lp->dma_err_tasklet);
843  }
844 out:
845  axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
846  return IRQ_HANDLED;
847 }
848 
859 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
860 {
861  u32 cr;
862  unsigned int status;
863  struct net_device *ndev = _ndev;
864  struct axienet_local *lp = netdev_priv(ndev);
865 
866  status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
867  if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
868  axienet_recv(lp->ndev);
869  goto out;
870  }
871  if (!(status & XAXIDMA_IRQ_ALL_MASK))
872  dev_err(&ndev->dev, "No interrupts asserted in Rx path");
873  if (status & XAXIDMA_IRQ_ERROR_MASK) {
874  dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
875  dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
876  (lp->rx_bd_v[lp->rx_bd_ci]).phys);
877 
878  cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
879  /* Disable coalesce, delay timer and error interrupts */
880  cr &= (~XAXIDMA_IRQ_ALL_MASK);
881  /* Finally write to the Tx channel control register */
882  axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
883 
884  cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
885  /* Disable coalesce, delay timer and error interrupts */
886  cr &= (~XAXIDMA_IRQ_ALL_MASK);
887  /* write to the Rx channel control register */
888  axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
889 
890  tasklet_schedule(&lp->dma_err_tasklet);
891  }
892 out:
893  axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
894  return IRQ_HANDLED;
895 }
896 
897 static void axienet_dma_err_handler(unsigned long data);
898 
912 static int axienet_open(struct net_device *ndev)
913 {
914  int ret, mdio_mcreg;
915  struct axienet_local *lp = netdev_priv(ndev);
916 
917  dev_dbg(&ndev->dev, "axienet_open()\n");
918 
919  mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
921  if (ret < 0)
922  return ret;
923  /* Disable the MDIO interface till Axi Ethernet Reset is completed.
924  * When we do an Axi Ethernet reset, it resets the complete core
925  * including the MDIO. If MDIO is not disabled when the reset
926  * process is started, MDIO will be broken afterwards. */
927  axienet_iow(lp, XAE_MDIO_MC_OFFSET,
928  (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
929  axienet_device_reset(ndev);
930  /* Enable the MDIO */
931  axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
933  if (ret < 0)
934  return ret;
935 
936  if (lp->phy_node) {
937  lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
938  axienet_adjust_link, 0,
940  if (!lp->phy_dev) {
941  dev_err(lp->dev, "of_phy_connect() failed\n");
942  return -ENODEV;
943  }
944  phy_start(lp->phy_dev);
945  }
946 
947  /* Enable tasklets for Axi DMA error handling */
948  tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
949  (unsigned long) lp);
950 
951  /* Enable interrupts for Axi DMA Tx */
952  ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
953  if (ret)
954  goto err_tx_irq;
955  /* Enable interrupts for Axi DMA Rx */
956  ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
957  if (ret)
958  goto err_rx_irq;
959 
960  return 0;
961 
962 err_rx_irq:
963  free_irq(lp->tx_irq, ndev);
964 err_tx_irq:
965  if (lp->phy_dev)
966  phy_disconnect(lp->phy_dev);
967  lp->phy_dev = NULL;
969  dev_err(lp->dev, "request_irq() failed\n");
970  return ret;
971 }
972 
983 static int axienet_stop(struct net_device *ndev)
984 {
985  u32 cr;
986  struct axienet_local *lp = netdev_priv(ndev);
987 
988  dev_dbg(&ndev->dev, "axienet_close()\n");
989 
990  cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
991  axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
992  cr & (~XAXIDMA_CR_RUNSTOP_MASK));
993  cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
994  axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
995  cr & (~XAXIDMA_CR_RUNSTOP_MASK));
996  axienet_setoptions(ndev, lp->options &
998 
1000 
1001  free_irq(lp->tx_irq, ndev);
1002  free_irq(lp->rx_irq, ndev);
1003 
1004  if (lp->phy_dev)
1005  phy_disconnect(lp->phy_dev);
1006  lp->phy_dev = NULL;
1007 
1008  axienet_dma_bd_release(ndev);
1009  return 0;
1010 }
1011 
1023 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1024 {
1025  struct axienet_local *lp = netdev_priv(ndev);
1026 
1027  if (netif_running(ndev))
1028  return -EBUSY;
1029  if (lp->jumbo_support) {
1030  if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
1031  return -EINVAL;
1032  ndev->mtu = new_mtu;
1033  } else {
1034  if ((new_mtu > XAE_MTU) || (new_mtu < 64))
1035  return -EINVAL;
1036  ndev->mtu = new_mtu;
1037  }
1038 
1039  return 0;
1040 }
1041 
1042 #ifdef CONFIG_NET_POLL_CONTROLLER
1043 
1050 static void axienet_poll_controller(struct net_device *ndev)
1051 {
1052  struct axienet_local *lp = netdev_priv(ndev);
1053  disable_irq(lp->tx_irq);
1054  disable_irq(lp->rx_irq);
1055  axienet_rx_irq(lp->tx_irq, ndev);
1056  axienet_tx_irq(lp->rx_irq, ndev);
1057  enable_irq(lp->tx_irq);
1058  enable_irq(lp->rx_irq);
1059 }
1060 #endif
1061 
1062 static const struct net_device_ops axienet_netdev_ops = {
1063  .ndo_open = axienet_open,
1064  .ndo_stop = axienet_stop,
1065  .ndo_start_xmit = axienet_start_xmit,
1066  .ndo_change_mtu = axienet_change_mtu,
1067  .ndo_set_mac_address = netdev_set_mac_address,
1068  .ndo_validate_addr = eth_validate_addr,
1069  .ndo_set_rx_mode = axienet_set_multicast_list,
1070 #ifdef CONFIG_NET_POLL_CONTROLLER
1071  .ndo_poll_controller = axienet_poll_controller,
1072 #endif
1073 };
1074 
1085 static int axienet_ethtools_get_settings(struct net_device *ndev,
1086  struct ethtool_cmd *ecmd)
1087 {
1088  struct axienet_local *lp = netdev_priv(ndev);
1089  struct phy_device *phydev = lp->phy_dev;
1090  if (!phydev)
1091  return -ENODEV;
1092  return phy_ethtool_gset(phydev, ecmd);
1093 }
1094 
1106 static int axienet_ethtools_set_settings(struct net_device *ndev,
1107  struct ethtool_cmd *ecmd)
1108 {
1109  struct axienet_local *lp = netdev_priv(ndev);
1110  struct phy_device *phydev = lp->phy_dev;
1111  if (!phydev)
1112  return -ENODEV;
1113  return phy_ethtool_sset(phydev, ecmd);
1114 }
1115 
1124 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1125  struct ethtool_drvinfo *ed)
1126 {
1127  memset(ed, 0, sizeof(struct ethtool_drvinfo));
1128  strcpy(ed->driver, DRIVER_NAME);
1130  ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
1131 }
1132 
1141 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1142 {
1143  return sizeof(u32) * AXIENET_REGS_N;
1144 }
1145 
1156 static void axienet_ethtools_get_regs(struct net_device *ndev,
1157  struct ethtool_regs *regs, void *ret)
1158 {
1159  u32 *data = (u32 *) ret;
1160  size_t len = sizeof(u32) * AXIENET_REGS_N;
1161  struct axienet_local *lp = netdev_priv(ndev);
1162 
1163  regs->version = 0;
1164  regs->len = len;
1165 
1166  memset(data, 0, len);
1167  data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1168  data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1169  data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1170  data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1171  data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1172  data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1173  data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1174  data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1175  data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1176  data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1177  data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1178  data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1179  data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1180  data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1181  data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1182  data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1183  data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1184  data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1185  data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1186  data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1187  data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1188  data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1189  data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1190  data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
1191  data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
1192  data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
1193  data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
1194  data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1195  data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1196  data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1197  data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1198  data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1199 }
1200 
1210 static void
1211 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1212  struct ethtool_pauseparam *epauseparm)
1213 {
1214  u32 regval;
1215  struct axienet_local *lp = netdev_priv(ndev);
1216  epauseparm->autoneg = 0;
1217  regval = axienet_ior(lp, XAE_FCC_OFFSET);
1218  epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1219  epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1220 }
1221 
1232 static int
1233 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1234  struct ethtool_pauseparam *epauseparm)
1235 {
1236  u32 regval = 0;
1237  struct axienet_local *lp = netdev_priv(ndev);
1238 
1239  if (netif_running(ndev)) {
1240  printk(KERN_ERR "%s: Please stop netif before applying "
1241  "configruation\n", ndev->name);
1242  return -EFAULT;
1243  }
1244 
1245  regval = axienet_ior(lp, XAE_FCC_OFFSET);
1246  if (epauseparm->tx_pause)
1247  regval |= XAE_FCC_FCTX_MASK;
1248  else
1249  regval &= ~XAE_FCC_FCTX_MASK;
1250  if (epauseparm->rx_pause)
1251  regval |= XAE_FCC_FCRX_MASK;
1252  else
1253  regval &= ~XAE_FCC_FCRX_MASK;
1254  axienet_iow(lp, XAE_FCC_OFFSET, regval);
1255 
1256  return 0;
1257 }
1258 
1268 static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1269  struct ethtool_coalesce *ecoalesce)
1270 {
1271  u32 regval = 0;
1272  struct axienet_local *lp = netdev_priv(ndev);
1273  regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1274  ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1276  regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1277  ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1279  return 0;
1280 }
1281 
1291 static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1292  struct ethtool_coalesce *ecoalesce)
1293 {
1294  struct axienet_local *lp = netdev_priv(ndev);
1295 
1296  if (netif_running(ndev)) {
1297  printk(KERN_ERR "%s: Please stop netif before applying "
1298  "configruation\n", ndev->name);
1299  return -EFAULT;
1300  }
1301 
1302  if ((ecoalesce->rx_coalesce_usecs) ||
1303  (ecoalesce->rx_coalesce_usecs_irq) ||
1304  (ecoalesce->rx_max_coalesced_frames_irq) ||
1305  (ecoalesce->tx_coalesce_usecs) ||
1306  (ecoalesce->tx_coalesce_usecs_irq) ||
1307  (ecoalesce->tx_max_coalesced_frames_irq) ||
1308  (ecoalesce->stats_block_coalesce_usecs) ||
1309  (ecoalesce->use_adaptive_rx_coalesce) ||
1310  (ecoalesce->use_adaptive_tx_coalesce) ||
1311  (ecoalesce->pkt_rate_low) ||
1312  (ecoalesce->rx_coalesce_usecs_low) ||
1313  (ecoalesce->rx_max_coalesced_frames_low) ||
1314  (ecoalesce->tx_coalesce_usecs_low) ||
1315  (ecoalesce->tx_max_coalesced_frames_low) ||
1316  (ecoalesce->pkt_rate_high) ||
1317  (ecoalesce->rx_coalesce_usecs_high) ||
1318  (ecoalesce->rx_max_coalesced_frames_high) ||
1319  (ecoalesce->tx_coalesce_usecs_high) ||
1320  (ecoalesce->tx_max_coalesced_frames_high) ||
1321  (ecoalesce->rate_sample_interval))
1322  return -EOPNOTSUPP;
1323  if (ecoalesce->rx_max_coalesced_frames)
1324  lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1325  if (ecoalesce->tx_max_coalesced_frames)
1326  lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1327 
1328  return 0;
1329 }
1330 
1331 static struct ethtool_ops axienet_ethtool_ops = {
1332  .get_settings = axienet_ethtools_get_settings,
1333  .set_settings = axienet_ethtools_set_settings,
1334  .get_drvinfo = axienet_ethtools_get_drvinfo,
1335  .get_regs_len = axienet_ethtools_get_regs_len,
1336  .get_regs = axienet_ethtools_get_regs,
1337  .get_link = ethtool_op_get_link,
1338  .get_pauseparam = axienet_ethtools_get_pauseparam,
1339  .set_pauseparam = axienet_ethtools_set_pauseparam,
1340  .get_coalesce = axienet_ethtools_get_coalesce,
1341  .set_coalesce = axienet_ethtools_set_coalesce,
1342 };
1343 
1351 static void axienet_dma_err_handler(unsigned long data)
1352 {
1353  u32 axienet_status;
1354  u32 cr, i;
1355  int mdio_mcreg;
1356  struct axienet_local *lp = (struct axienet_local *) data;
1357  struct net_device *ndev = lp->ndev;
1358  struct axidma_bd *cur_p;
1359 
1360  axienet_setoptions(ndev, lp->options &
1362  mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1364  /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1365  * When we do an Axi Ethernet reset, it resets the complete core
1366  * including the MDIO. So if MDIO is not disabled when the reset
1367  * process is started, MDIO will be broken afterwards. */
1368  axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
1370 
1371  __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
1372  __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
1373 
1374  axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1376 
1377  for (i = 0; i < TX_BD_NUM; i++) {
1378  cur_p = &lp->tx_bd_v[i];
1379  if (cur_p->phys)
1380  dma_unmap_single(ndev->dev.parent, cur_p->phys,
1381  (cur_p->cntrl &
1383  DMA_TO_DEVICE);
1384  if (cur_p->app4)
1385  dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
1386  cur_p->phys = 0;
1387  cur_p->cntrl = 0;
1388  cur_p->status = 0;
1389  cur_p->app0 = 0;
1390  cur_p->app1 = 0;
1391  cur_p->app2 = 0;
1392  cur_p->app3 = 0;
1393  cur_p->app4 = 0;
1394  cur_p->sw_id_offset = 0;
1395  }
1396 
1397  for (i = 0; i < RX_BD_NUM; i++) {
1398  cur_p = &lp->rx_bd_v[i];
1399  cur_p->status = 0;
1400  cur_p->app0 = 0;
1401  cur_p->app1 = 0;
1402  cur_p->app2 = 0;
1403  cur_p->app3 = 0;
1404  cur_p->app4 = 0;
1405  }
1406 
1407  lp->tx_bd_ci = 0;
1408  lp->tx_bd_tail = 0;
1409  lp->rx_bd_ci = 0;
1410 
1411  /* Start updating the Rx channel control register */
1412  cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1413  /* Update the interrupt coalesce count */
1414  cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1416  /* Update the delay timer count */
1417  cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1419  /* Enable coalesce, delay timer and error interrupts */
1420  cr |= XAXIDMA_IRQ_ALL_MASK;
1421  /* Finally write to the Rx channel control register */
1422  axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1423 
1424  /* Start updating the Tx channel control register */
1425  cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1426  /* Update the interrupt coalesce count */
1427  cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1429  /* Update the delay timer count */
1430  cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1432  /* Enable coalesce, delay timer and error interrupts */
1433  cr |= XAXIDMA_IRQ_ALL_MASK;
1434  /* Finally write to the Tx channel control register */
1435  axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1436 
1437  /* Populate the tail pointer and bring the Rx Axi DMA engine out of
1438  * halted state. This will make the Rx side ready for reception.*/
1439  axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1440  cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1441  axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1443  axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1444  (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
1445 
1446  /* Write to the RS (Run-stop) bit in the Tx channel control register.
1447  * Tx channel is now ready to run. But only after we write to the
1448  * tail pointer register that the Tx channel will start transmitting */
1449  axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1450  cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1451  axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1453 
1454  axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1455  axienet_status &= ~XAE_RCW1_RX_MASK;
1456  axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1457 
1458  axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1459  if (axienet_status & XAE_INT_RXRJECT_MASK)
1460  axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1461  axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1462 
1463  /* Sync default options with HW but leave receiver and
1464  * transmitter disabled.*/
1465  axienet_setoptions(ndev, lp->options &
1467  axienet_set_mac_address(ndev, NULL);
1468  axienet_set_multicast_list(ndev);
1469  axienet_setoptions(ndev, lp->options);
1470 }
1471 
1485 static int __devinit axienet_of_probe(struct platform_device *op)
1486 {
1487  __be32 *p;
1488  int size, ret = 0;
1489  struct device_node *np;
1490  struct axienet_local *lp;
1491  struct net_device *ndev;
1492  const void *addr;
1493 
1494  ndev = alloc_etherdev(sizeof(*lp));
1495  if (!ndev)
1496  return -ENOMEM;
1497 
1498  ether_setup(ndev);
1499  dev_set_drvdata(&op->dev, ndev);
1500 
1501  SET_NETDEV_DEV(ndev, &op->dev);
1502  ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1504  ndev->netdev_ops = &axienet_netdev_ops;
1505  ndev->ethtool_ops = &axienet_ethtool_ops;
1506 
1507  lp = netdev_priv(ndev);
1508  lp->ndev = ndev;
1509  lp->dev = &op->dev;
1511  /* Map device registers */
1512  lp->regs = of_iomap(op->dev.of_node, 0);
1513  if (!lp->regs) {
1514  dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
1515  goto nodev;
1516  }
1517  /* Setup checksum offload, but default to off if not specified */
1518  lp->features = 0;
1519 
1520  p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
1521  if (p) {
1522  switch (be32_to_cpup(p)) {
1523  case 1:
1527  /* Can checksum TCP/UDP over IPv4. */
1528  ndev->features |= NETIF_F_IP_CSUM;
1529  break;
1530  case 2:
1534  /* Can checksum TCP/UDP over IPv4. */
1535  ndev->features |= NETIF_F_IP_CSUM;
1536  break;
1537  default:
1539  }
1540  }
1541  p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1542  if (p) {
1543  switch (be32_to_cpup(p)) {
1544  case 1:
1548  break;
1549  case 2:
1553  break;
1554  default:
1556  }
1557  }
1558  /* For supporting jumbo frames, the Axi Ethernet hardware must have
1559  * a larger Rx/Tx Memory. Typically, the size must be more than or
1560  * equal to 16384 bytes, so that we can enable jumbo option and start
1561  * supporting jumbo frames. Here we check for memory allocated for
1562  * Rx/Tx in the hardware from the device-tree and accordingly set
1563  * flags. */
1564  p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
1565  if (p) {
1566  if ((be32_to_cpup(p)) >= 0x4000)
1567  lp->jumbo_support = 1;
1568  }
1569  p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
1570  NULL);
1571  if (p)
1572  lp->temac_type = be32_to_cpup(p);
1573  p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
1574  if (p)
1575  lp->phy_type = be32_to_cpup(p);
1576 
1577  /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1578  np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
1579  if (!np) {
1580  dev_err(&op->dev, "could not find DMA node\n");
1581  goto err_iounmap;
1582  }
1583  lp->dma_regs = of_iomap(np, 0);
1584  if (lp->dma_regs) {
1585  dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
1586  } else {
1587  dev_err(&op->dev, "unable to map DMA registers\n");
1588  of_node_put(np);
1589  }
1590  lp->rx_irq = irq_of_parse_and_map(np, 1);
1591  lp->tx_irq = irq_of_parse_and_map(np, 0);
1592  of_node_put(np);
1593  if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
1594  dev_err(&op->dev, "could not determine irqs\n");
1595  ret = -ENOMEM;
1596  goto err_iounmap_2;
1597  }
1598 
1599  /* Retrieve the MAC address */
1600  addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
1601  if ((!addr) || (size != 6)) {
1602  dev_err(&op->dev, "could not find MAC address\n");
1603  ret = -ENODEV;
1604  goto err_iounmap_2;
1605  }
1606  axienet_set_mac_address(ndev, (void *) addr);
1607 
1610 
1611  lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
1612  ret = axienet_mdio_setup(lp, op->dev.of_node);
1613  if (ret)
1614  dev_warn(&op->dev, "error registering MDIO bus\n");
1615 
1616  ret = register_netdev(lp->ndev);
1617  if (ret) {
1618  dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
1619  goto err_iounmap_2;
1620  }
1621 
1622  return 0;
1623 
1624 err_iounmap_2:
1625  if (lp->dma_regs)
1626  iounmap(lp->dma_regs);
1627 err_iounmap:
1628  iounmap(lp->regs);
1629 nodev:
1630  free_netdev(ndev);
1631  ndev = NULL;
1632  return ret;
1633 }
1634 
1635 static int __devexit axienet_of_remove(struct platform_device *op)
1636 {
1637  struct net_device *ndev = dev_get_drvdata(&op->dev);
1638  struct axienet_local *lp = netdev_priv(ndev);
1639 
1641  unregister_netdev(ndev);
1642 
1643  if (lp->phy_node)
1644  of_node_put(lp->phy_node);
1645  lp->phy_node = NULL;
1646 
1647  dev_set_drvdata(&op->dev, NULL);
1648 
1649  iounmap(lp->regs);
1650  if (lp->dma_regs)
1651  iounmap(lp->dma_regs);
1652  free_netdev(ndev);
1653 
1654  return 0;
1655 }
1656 
1657 static struct platform_driver axienet_of_driver = {
1658  .probe = axienet_of_probe,
1659  .remove = __devexit_p(axienet_of_remove),
1660  .driver = {
1661  .owner = THIS_MODULE,
1662  .name = "xilinx_axienet",
1663  .of_match_table = axienet_of_match,
1664  },
1665 };
1666 
1667 module_platform_driver(axienet_of_driver);
1668 
1669 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
1670 MODULE_AUTHOR("Xilinx");
1671 MODULE_LICENSE("GPL");