Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
cpmac.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2006, 2007 Eugene Konev
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/moduleparam.h>
23 
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/types.h>
29 #include <linux/delay.h>
30 
31 #include <linux/netdevice.h>
32 #include <linux/if_vlan.h>
33 #include <linux/etherdevice.h>
34 #include <linux/ethtool.h>
35 #include <linux/skbuff.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/phy_fixed.h>
39 #include <linux/platform_device.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/clk.h>
42 #include <linux/gpio.h>
43 #include <linux/atomic.h>
44 
45 MODULE_AUTHOR("Eugene Konev <[email protected]>");
46 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
47 MODULE_LICENSE("GPL");
48 MODULE_ALIAS("platform:cpmac");
49 
50 static int debug_level = 8;
51 static int dumb_switch;
52 
53 /* Next 2 are only used in cpmac_probe, so it's pointless to change them */
54 module_param(debug_level, int, 0444);
55 module_param(dumb_switch, int, 0444);
56 
57 MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
58 MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
59 
60 #define CPMAC_VERSION "0.5.2"
61 /* frame size + 802.1q tag + FCS size */
62 #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
63 #define CPMAC_QUEUES 8
64 
65 /* Ethernet registers */
66 #define CPMAC_TX_CONTROL 0x0004
67 #define CPMAC_TX_TEARDOWN 0x0008
68 #define CPMAC_RX_CONTROL 0x0014
69 #define CPMAC_RX_TEARDOWN 0x0018
70 #define CPMAC_MBP 0x0100
71 # define MBP_RXPASSCRC 0x40000000
72 # define MBP_RXQOS 0x20000000
73 # define MBP_RXNOCHAIN 0x10000000
74 # define MBP_RXCMF 0x01000000
75 # define MBP_RXSHORT 0x00800000
76 # define MBP_RXCEF 0x00400000
77 # define MBP_RXPROMISC 0x00200000
78 # define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
79 # define MBP_RXBCAST 0x00002000
80 # define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
81 # define MBP_RXMCAST 0x00000020
82 # define MBP_MCASTCHAN(channel) ((channel) & 0x7)
83 #define CPMAC_UNICAST_ENABLE 0x0104
84 #define CPMAC_UNICAST_CLEAR 0x0108
85 #define CPMAC_MAX_LENGTH 0x010c
86 #define CPMAC_BUFFER_OFFSET 0x0110
87 #define CPMAC_MAC_CONTROL 0x0160
88 # define MAC_TXPTYPE 0x00000200
89 # define MAC_TXPACE 0x00000040
90 # define MAC_MII 0x00000020
91 # define MAC_TXFLOW 0x00000010
92 # define MAC_RXFLOW 0x00000008
93 # define MAC_MTEST 0x00000004
94 # define MAC_LOOPBACK 0x00000002
95 # define MAC_FDX 0x00000001
96 #define CPMAC_MAC_STATUS 0x0164
97 # define MAC_STATUS_QOS 0x00000004
98 # define MAC_STATUS_RXFLOW 0x00000002
99 # define MAC_STATUS_TXFLOW 0x00000001
100 #define CPMAC_TX_INT_ENABLE 0x0178
101 #define CPMAC_TX_INT_CLEAR 0x017c
102 #define CPMAC_MAC_INT_VECTOR 0x0180
103 # define MAC_INT_STATUS 0x00080000
104 # define MAC_INT_HOST 0x00040000
105 # define MAC_INT_RX 0x00020000
106 # define MAC_INT_TX 0x00010000
107 #define CPMAC_MAC_EOI_VECTOR 0x0184
108 #define CPMAC_RX_INT_ENABLE 0x0198
109 #define CPMAC_RX_INT_CLEAR 0x019c
110 #define CPMAC_MAC_INT_ENABLE 0x01a8
111 #define CPMAC_MAC_INT_CLEAR 0x01ac
112 #define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
113 #define CPMAC_MAC_ADDR_MID 0x01d0
114 #define CPMAC_MAC_ADDR_HI 0x01d4
115 #define CPMAC_MAC_HASH_LO 0x01d8
116 #define CPMAC_MAC_HASH_HI 0x01dc
117 #define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
118 #define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
119 #define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
120 #define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
121 #define CPMAC_REG_END 0x0680
122 /*
123  * Rx/Tx statistics
124  * TODO: use some of them to fill stats in cpmac_stats()
125  */
126 #define CPMAC_STATS_RX_GOOD 0x0200
127 #define CPMAC_STATS_RX_BCAST 0x0204
128 #define CPMAC_STATS_RX_MCAST 0x0208
129 #define CPMAC_STATS_RX_PAUSE 0x020c
130 #define CPMAC_STATS_RX_CRC 0x0210
131 #define CPMAC_STATS_RX_ALIGN 0x0214
132 #define CPMAC_STATS_RX_OVER 0x0218
133 #define CPMAC_STATS_RX_JABBER 0x021c
134 #define CPMAC_STATS_RX_UNDER 0x0220
135 #define CPMAC_STATS_RX_FRAG 0x0224
136 #define CPMAC_STATS_RX_FILTER 0x0228
137 #define CPMAC_STATS_RX_QOSFILTER 0x022c
138 #define CPMAC_STATS_RX_OCTETS 0x0230
139 
140 #define CPMAC_STATS_TX_GOOD 0x0234
141 #define CPMAC_STATS_TX_BCAST 0x0238
142 #define CPMAC_STATS_TX_MCAST 0x023c
143 #define CPMAC_STATS_TX_PAUSE 0x0240
144 #define CPMAC_STATS_TX_DEFER 0x0244
145 #define CPMAC_STATS_TX_COLLISION 0x0248
146 #define CPMAC_STATS_TX_SINGLECOLL 0x024c
147 #define CPMAC_STATS_TX_MULTICOLL 0x0250
148 #define CPMAC_STATS_TX_EXCESSCOLL 0x0254
149 #define CPMAC_STATS_TX_LATECOLL 0x0258
150 #define CPMAC_STATS_TX_UNDERRUN 0x025c
151 #define CPMAC_STATS_TX_CARRIERSENSE 0x0260
152 #define CPMAC_STATS_TX_OCTETS 0x0264
153 
154 #define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
155 #define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
156  (reg)))
157 
158 /* MDIO bus */
159 #define CPMAC_MDIO_VERSION 0x0000
160 #define CPMAC_MDIO_CONTROL 0x0004
161 # define MDIOC_IDLE 0x80000000
162 # define MDIOC_ENABLE 0x40000000
163 # define MDIOC_PREAMBLE 0x00100000
164 # define MDIOC_FAULT 0x00080000
165 # define MDIOC_FAULTDETECT 0x00040000
166 # define MDIOC_INTTEST 0x00020000
167 # define MDIOC_CLKDIV(div) ((div) & 0xff)
168 #define CPMAC_MDIO_ALIVE 0x0008
169 #define CPMAC_MDIO_LINK 0x000c
170 #define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
171 # define MDIO_BUSY 0x80000000
172 # define MDIO_WRITE 0x40000000
173 # define MDIO_REG(reg) (((reg) & 0x1f) << 21)
174 # define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
175 # define MDIO_DATA(data) ((data) & 0xffff)
176 #define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
177 # define PHYSEL_LINKSEL 0x00000040
178 # define PHYSEL_LINKINT 0x00000020
179 
180 struct cpmac_desc {
187 #define CPMAC_SOP 0x8000
188 #define CPMAC_EOP 0x4000
189 #define CPMAC_OWN 0x2000
190 #define CPMAC_EOQ 0x1000
191  struct sk_buff *skb;
192  struct cpmac_desc *next;
193  struct cpmac_desc *prev;
196 };
197 
198 struct cpmac_priv {
205  void __iomem *regs;
206  struct mii_bus *mii_bus;
207  struct phy_device *phy;
211  struct net_device *dev;
216 };
217 
218 static irqreturn_t cpmac_irq(int, void *);
219 static void cpmac_hw_start(struct net_device *dev);
220 static void cpmac_hw_stop(struct net_device *dev);
221 static int cpmac_stop(struct net_device *dev);
222 static int cpmac_open(struct net_device *dev);
223 
224 static void cpmac_dump_regs(struct net_device *dev)
225 {
226  int i;
227  struct cpmac_priv *priv = netdev_priv(dev);
228  for (i = 0; i < CPMAC_REG_END; i += 4) {
229  if (i % 16 == 0) {
230  if (i)
231  pr_cont("\n");
232  printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
233  priv->regs + i);
234  }
235  printk(" %08x", cpmac_read(priv->regs, i));
236  }
237  printk("\n");
238 }
239 
240 static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
241 {
242  int i;
243  printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
244  for (i = 0; i < sizeof(*desc) / 4; i++)
245  printk(" %08x", ((u32 *)desc)[i]);
246  printk("\n");
247 }
248 
249 static void cpmac_dump_all_desc(struct net_device *dev)
250 {
251  struct cpmac_priv *priv = netdev_priv(dev);
252  struct cpmac_desc *dump = priv->rx_head;
253  do {
254  cpmac_dump_desc(dev, dump);
255  dump = dump->next;
256  } while (dump != priv->rx_head);
257 }
258 
259 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
260 {
261  int i;
262  printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
263  for (i = 0; i < skb->len; i++) {
264  if (i % 16 == 0) {
265  if (i)
266  pr_cont("\n");
267  printk(KERN_DEBUG "%s: data[%p]:", dev->name,
268  skb->data + i);
269  }
270  printk(" %02x", ((u8 *)skb->data)[i]);
271  }
272  printk("\n");
273 }
274 
275 static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
276 {
277  u32 val;
278 
279  while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
280  cpu_relax();
282  MDIO_PHY(phy_id));
283  while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
284  cpu_relax();
285  return MDIO_DATA(val);
286 }
287 
288 static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
289  int reg, u16 val)
290 {
291  while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
292  cpu_relax();
294  MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
295  return 0;
296 }
297 
298 static int cpmac_mdio_reset(struct mii_bus *bus)
299 {
300  struct clk *cpmac_clk;
301 
302  cpmac_clk = clk_get(&bus->dev, "cpmac");
303  if (IS_ERR(cpmac_clk)) {
304  printk(KERN_ERR "unable to get cpmac clock\n");
305  return -1;
306  }
307  ar7_device_reset(AR7_RESET_BIT_MDIO);
309  MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
310  return 0;
311 }
312 
313 static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
314 
315 static struct mii_bus *cpmac_mii;
316 
317 static int cpmac_config(struct net_device *dev, struct ifmap *map)
318 {
319  if (dev->flags & IFF_UP)
320  return -EBUSY;
321 
322  /* Don't allow changing the I/O address */
323  if (map->base_addr != dev->base_addr)
324  return -EOPNOTSUPP;
325 
326  /* ignore other fields */
327  return 0;
328 }
329 
330 static void cpmac_set_multicast_list(struct net_device *dev)
331 {
332  struct netdev_hw_addr *ha;
333  u8 tmp;
334  u32 mbp, bit, hash[2] = { 0, };
335  struct cpmac_priv *priv = netdev_priv(dev);
336 
337  mbp = cpmac_read(priv->regs, CPMAC_MBP);
338  if (dev->flags & IFF_PROMISC) {
339  cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
340  MBP_RXPROMISC);
341  } else {
342  cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
343  if (dev->flags & IFF_ALLMULTI) {
344  /* enable all multicast mode */
345  cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
346  cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
347  } else {
348  /*
349  * cpmac uses some strange mac address hashing
350  * (not crc32)
351  */
352  netdev_for_each_mc_addr(ha, dev) {
353  bit = 0;
354  tmp = ha->addr[0];
355  bit ^= (tmp >> 2) ^ (tmp << 4);
356  tmp = ha->addr[1];
357  bit ^= (tmp >> 4) ^ (tmp << 2);
358  tmp = ha->addr[2];
359  bit ^= (tmp >> 6) ^ tmp;
360  tmp = ha->addr[3];
361  bit ^= (tmp >> 2) ^ (tmp << 4);
362  tmp = ha->addr[4];
363  bit ^= (tmp >> 4) ^ (tmp << 2);
364  tmp = ha->addr[5];
365  bit ^= (tmp >> 6) ^ tmp;
366  bit &= 0x3f;
367  hash[bit / 32] |= 1 << (bit % 32);
368  }
369 
370  cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
371  cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
372  }
373  }
374 }
375 
376 static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
377  struct cpmac_desc *desc)
378 {
379  struct sk_buff *skb, *result = NULL;
380 
381  if (unlikely(netif_msg_hw(priv)))
382  cpmac_dump_desc(priv->dev, desc);
383  cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
384  if (unlikely(!desc->datalen)) {
385  if (netif_msg_rx_err(priv) && net_ratelimit())
386  printk(KERN_WARNING "%s: rx: spurious interrupt\n",
387  priv->dev->name);
388  return NULL;
389  }
390 
391  skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
392  if (likely(skb)) {
393  skb_put(desc->skb, desc->datalen);
394  desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
395  skb_checksum_none_assert(desc->skb);
396  priv->dev->stats.rx_packets++;
397  priv->dev->stats.rx_bytes += desc->datalen;
398  result = desc->skb;
399  dma_unmap_single(&priv->dev->dev, desc->data_mapping,
401  desc->skb = skb;
402  desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
405  desc->hw_data = (u32)desc->data_mapping;
406  if (unlikely(netif_msg_pktdata(priv))) {
407  printk(KERN_DEBUG "%s: received packet:\n",
408  priv->dev->name);
409  cpmac_dump_skb(priv->dev, result);
410  }
411  } else {
412  if (netif_msg_rx_err(priv) && net_ratelimit())
414  "%s: low on skbs, dropping packet\n",
415  priv->dev->name);
416  priv->dev->stats.rx_dropped++;
417  }
418 
419  desc->buflen = CPMAC_SKB_SIZE;
420  desc->dataflags = CPMAC_OWN;
421 
422  return result;
423 }
424 
425 static int cpmac_poll(struct napi_struct *napi, int budget)
426 {
427  struct sk_buff *skb;
428  struct cpmac_desc *desc, *restart;
429  struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
430  int received = 0, processed = 0;
431 
432  spin_lock(&priv->rx_lock);
433  if (unlikely(!priv->rx_head)) {
434  if (netif_msg_rx_err(priv) && net_ratelimit())
435  printk(KERN_WARNING "%s: rx: polling, but no queue\n",
436  priv->dev->name);
437  spin_unlock(&priv->rx_lock);
438  napi_complete(napi);
439  return 0;
440  }
441 
442  desc = priv->rx_head;
443  restart = NULL;
444  while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
445  processed++;
446 
447  if ((desc->dataflags & CPMAC_EOQ) != 0) {
448  /* The last update to eoq->hw_next didn't happen
449  * soon enough, and the receiver stopped here.
450  *Remember this descriptor so we can restart
451  * the receiver after freeing some space.
452  */
453  if (unlikely(restart)) {
454  if (netif_msg_rx_err(priv))
455  printk(KERN_ERR "%s: poll found a"
456  " duplicate EOQ: %p and %p\n",
457  priv->dev->name, restart, desc);
458  goto fatal_error;
459  }
460 
461  restart = desc->next;
462  }
463 
464  skb = cpmac_rx_one(priv, desc);
465  if (likely(skb)) {
466  netif_receive_skb(skb);
467  received++;
468  }
469  desc = desc->next;
470  }
471 
472  if (desc != priv->rx_head) {
473  /* We freed some buffers, but not the whole ring,
474  * add what we did free to the rx list */
475  desc->prev->hw_next = (u32)0;
476  priv->rx_head->prev->hw_next = priv->rx_head->mapping;
477  }
478 
479  /* Optimization: If we did not actually process an EOQ (perhaps because
480  * of quota limits), check to see if the tail of the queue has EOQ set.
481  * We should immediately restart in that case so that the receiver can
482  * restart and run in parallel with more packet processing.
483  * This lets us handle slightly larger bursts before running
484  * out of ring space (assuming dev->weight < ring_size) */
485 
486  if (!restart &&
487  (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
488  == CPMAC_EOQ &&
489  (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
490  /* reset EOQ so the poll loop (above) doesn't try to
491  * restart this when it eventually gets to this descriptor.
492  */
493  priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
494  restart = priv->rx_head;
495  }
496 
497  if (restart) {
498  priv->dev->stats.rx_errors++;
499  priv->dev->stats.rx_fifo_errors++;
500  if (netif_msg_rx_err(priv) && net_ratelimit())
501  printk(KERN_WARNING "%s: rx dma ring overrun\n",
502  priv->dev->name);
503 
504  if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
505  if (netif_msg_drv(priv))
506  printk(KERN_ERR "%s: cpmac_poll is trying to "
507  "restart rx from a descriptor that's "
508  "not free: %p\n",
509  priv->dev->name, restart);
510  goto fatal_error;
511  }
512 
513  cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
514  }
515 
516  priv->rx_head = desc;
517  spin_unlock(&priv->rx_lock);
518  if (unlikely(netif_msg_rx_status(priv)))
519  printk(KERN_DEBUG "%s: poll processed %d packets\n",
520  priv->dev->name, received);
521  if (processed == 0) {
522  /* we ran out of packets to read,
523  * revert to interrupt-driven mode */
524  napi_complete(napi);
526  return 0;
527  }
528 
529  return 1;
530 
531 fatal_error:
532  /* Something went horribly wrong.
533  * Reset hardware to try to recover rather than wedging. */
534 
535  if (netif_msg_drv(priv)) {
536  printk(KERN_ERR "%s: cpmac_poll is confused. "
537  "Resetting hardware\n", priv->dev->name);
538  cpmac_dump_all_desc(priv->dev);
539  printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
540  priv->dev->name,
541  cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
542  cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
543  }
544 
545  spin_unlock(&priv->rx_lock);
546  napi_complete(napi);
547  netif_tx_stop_all_queues(priv->dev);
548  napi_disable(&priv->napi);
549 
550  atomic_inc(&priv->reset_pending);
551  cpmac_hw_stop(priv->dev);
552  if (!schedule_work(&priv->reset_work))
553  atomic_dec(&priv->reset_pending);
554  return 0;
555 
556 }
557 
558 static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
559 {
560  int queue, len;
561  struct cpmac_desc *desc;
562  struct cpmac_priv *priv = netdev_priv(dev);
563 
564  if (unlikely(atomic_read(&priv->reset_pending)))
565  return NETDEV_TX_BUSY;
566 
567  if (unlikely(skb_padto(skb, ETH_ZLEN)))
568  return NETDEV_TX_OK;
569 
570  len = max(skb->len, ETH_ZLEN);
571  queue = skb_get_queue_mapping(skb);
572  netif_stop_subqueue(dev, queue);
573 
574  desc = &priv->desc_ring[queue];
575  if (unlikely(desc->dataflags & CPMAC_OWN)) {
576  if (netif_msg_tx_err(priv) && net_ratelimit())
577  printk(KERN_WARNING "%s: tx dma ring full\n",
578  dev->name);
579  return NETDEV_TX_BUSY;
580  }
581 
582  spin_lock(&priv->lock);
583  spin_unlock(&priv->lock);
585  desc->skb = skb;
586  desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
587  DMA_TO_DEVICE);
588  desc->hw_data = (u32)desc->data_mapping;
589  desc->datalen = len;
590  desc->buflen = len;
592  printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
593  skb->len);
594  if (unlikely(netif_msg_hw(priv)))
595  cpmac_dump_desc(dev, desc);
596  if (unlikely(netif_msg_pktdata(priv)))
597  cpmac_dump_skb(dev, skb);
598  cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
599 
600  return NETDEV_TX_OK;
601 }
602 
603 static void cpmac_end_xmit(struct net_device *dev, int queue)
604 {
605  struct cpmac_desc *desc;
606  struct cpmac_priv *priv = netdev_priv(dev);
607 
608  desc = &priv->desc_ring[queue];
609  cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
610  if (likely(desc->skb)) {
611  spin_lock(&priv->lock);
612  dev->stats.tx_packets++;
613  dev->stats.tx_bytes += desc->skb->len;
614  spin_unlock(&priv->lock);
615  dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
616  DMA_TO_DEVICE);
617 
618  if (unlikely(netif_msg_tx_done(priv)))
619  printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
620  desc->skb, desc->skb->len);
621 
622  dev_kfree_skb_irq(desc->skb);
623  desc->skb = NULL;
624  if (__netif_subqueue_stopped(dev, queue))
625  netif_wake_subqueue(dev, queue);
626  } else {
627  if (netif_msg_tx_err(priv) && net_ratelimit())
629  "%s: end_xmit: spurious interrupt\n", dev->name);
630  if (__netif_subqueue_stopped(dev, queue))
631  netif_wake_subqueue(dev, queue);
632  }
633 }
634 
635 static void cpmac_hw_stop(struct net_device *dev)
636 {
637  int i;
638  struct cpmac_priv *priv = netdev_priv(dev);
639  struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
640 
641  ar7_device_reset(pdata->reset_bit);
643  cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
645  cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
646  for (i = 0; i < 8; i++) {
647  cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
648  cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
649  }
650  cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
651  cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
652  cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
653  cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
656 }
657 
658 static void cpmac_hw_start(struct net_device *dev)
659 {
660  int i;
661  struct cpmac_priv *priv = netdev_priv(dev);
662  struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
663 
664  ar7_device_reset(pdata->reset_bit);
665  for (i = 0; i < 8; i++) {
666  cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
667  cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
668  }
669  cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
670 
672  MBP_RXMCAST);
674  for (i = 0; i < 8; i++)
675  cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
676  cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
677  cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
678  (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
679  (dev->dev_addr[3] << 24));
681  cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
682  cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
683  cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
684  cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
687  cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
689 
691  cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
693  cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
696  MAC_FDX);
697 }
698 
699 static void cpmac_clear_rx(struct net_device *dev)
700 {
701  struct cpmac_priv *priv = netdev_priv(dev);
702  struct cpmac_desc *desc;
703  int i;
704  if (unlikely(!priv->rx_head))
705  return;
706  desc = priv->rx_head;
707  for (i = 0; i < priv->ring_size; i++) {
708  if ((desc->dataflags & CPMAC_OWN) == 0) {
709  if (netif_msg_rx_err(priv) && net_ratelimit())
710  printk(KERN_WARNING "%s: packet dropped\n",
711  dev->name);
712  if (unlikely(netif_msg_hw(priv)))
713  cpmac_dump_desc(dev, desc);
714  desc->dataflags = CPMAC_OWN;
715  dev->stats.rx_dropped++;
716  }
717  desc->hw_next = desc->next->mapping;
718  desc = desc->next;
719  }
720  priv->rx_head->prev->hw_next = 0;
721 }
722 
723 static void cpmac_clear_tx(struct net_device *dev)
724 {
725  struct cpmac_priv *priv = netdev_priv(dev);
726  int i;
727  if (unlikely(!priv->desc_ring))
728  return;
729  for (i = 0; i < CPMAC_QUEUES; i++) {
730  priv->desc_ring[i].dataflags = 0;
731  if (priv->desc_ring[i].skb) {
732  dev_kfree_skb_any(priv->desc_ring[i].skb);
733  priv->desc_ring[i].skb = NULL;
734  }
735  }
736 }
737 
738 static void cpmac_hw_error(struct work_struct *work)
739 {
740  struct cpmac_priv *priv =
741  container_of(work, struct cpmac_priv, reset_work);
742 
743  spin_lock(&priv->rx_lock);
744  cpmac_clear_rx(priv->dev);
745  spin_unlock(&priv->rx_lock);
746  cpmac_clear_tx(priv->dev);
747  cpmac_hw_start(priv->dev);
748  barrier();
749  atomic_dec(&priv->reset_pending);
750 
751  netif_tx_wake_all_queues(priv->dev);
753 }
754 
755 static void cpmac_check_status(struct net_device *dev)
756 {
757  struct cpmac_priv *priv = netdev_priv(dev);
758 
759  u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
760  int rx_channel = (macstatus >> 8) & 7;
761  int rx_code = (macstatus >> 12) & 15;
762  int tx_channel = (macstatus >> 16) & 7;
763  int tx_code = (macstatus >> 20) & 15;
764 
765  if (rx_code || tx_code) {
766  if (netif_msg_drv(priv) && net_ratelimit()) {
767  /* Can't find any documentation on what these
768  *error codes actually are. So just log them and hope..
769  */
770  if (rx_code)
771  printk(KERN_WARNING "%s: host error %d on rx "
772  "channel %d (macstatus %08x), resetting\n",
773  dev->name, rx_code, rx_channel, macstatus);
774  if (tx_code)
775  printk(KERN_WARNING "%s: host error %d on tx "
776  "channel %d (macstatus %08x), resetting\n",
777  dev->name, tx_code, tx_channel, macstatus);
778  }
779 
780  netif_tx_stop_all_queues(dev);
781  cpmac_hw_stop(dev);
782  if (schedule_work(&priv->reset_work))
783  atomic_inc(&priv->reset_pending);
784  if (unlikely(netif_msg_hw(priv)))
785  cpmac_dump_regs(dev);
786  }
787  cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
788 }
789 
790 static irqreturn_t cpmac_irq(int irq, void *dev_id)
791 {
792  struct net_device *dev = dev_id;
793  struct cpmac_priv *priv;
794  int queue;
795  u32 status;
796 
797  priv = netdev_priv(dev);
798 
799  status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
800 
801  if (unlikely(netif_msg_intr(priv)))
802  printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
803  status);
804 
805  if (status & MAC_INT_TX)
806  cpmac_end_xmit(dev, (status & 7));
807 
808  if (status & MAC_INT_RX) {
809  queue = (status >> 8) & 7;
810  if (napi_schedule_prep(&priv->napi)) {
811  cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
812  __napi_schedule(&priv->napi);
813  }
814  }
815 
817 
818  if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
819  cpmac_check_status(dev);
820 
821  return IRQ_HANDLED;
822 }
823 
824 static void cpmac_tx_timeout(struct net_device *dev)
825 {
826  struct cpmac_priv *priv = netdev_priv(dev);
827 
828  spin_lock(&priv->lock);
829  dev->stats.tx_errors++;
830  spin_unlock(&priv->lock);
831  if (netif_msg_tx_err(priv) && net_ratelimit())
832  printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
833 
834  atomic_inc(&priv->reset_pending);
835  barrier();
836  cpmac_clear_tx(dev);
837  barrier();
838  atomic_dec(&priv->reset_pending);
839 
840  netif_tx_wake_all_queues(priv->dev);
841 }
842 
843 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
844 {
845  struct cpmac_priv *priv = netdev_priv(dev);
846  if (!(netif_running(dev)))
847  return -EINVAL;
848  if (!priv->phy)
849  return -EINVAL;
850 
851  return phy_mii_ioctl(priv->phy, ifr, cmd);
852 }
853 
854 static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
855 {
856  struct cpmac_priv *priv = netdev_priv(dev);
857 
858  if (priv->phy)
859  return phy_ethtool_gset(priv->phy, cmd);
860 
861  return -EINVAL;
862 }
863 
864 static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
865 {
866  struct cpmac_priv *priv = netdev_priv(dev);
867 
868  if (!capable(CAP_NET_ADMIN))
869  return -EPERM;
870 
871  if (priv->phy)
872  return phy_ethtool_sset(priv->phy, cmd);
873 
874  return -EINVAL;
875 }
876 
877 static void cpmac_get_ringparam(struct net_device *dev,
878  struct ethtool_ringparam *ring)
879 {
880  struct cpmac_priv *priv = netdev_priv(dev);
881 
882  ring->rx_max_pending = 1024;
883  ring->rx_mini_max_pending = 1;
884  ring->rx_jumbo_max_pending = 1;
885  ring->tx_max_pending = 1;
886 
887  ring->rx_pending = priv->ring_size;
888  ring->rx_mini_pending = 1;
889  ring->rx_jumbo_pending = 1;
890  ring->tx_pending = 1;
891 }
892 
893 static int cpmac_set_ringparam(struct net_device *dev,
894  struct ethtool_ringparam *ring)
895 {
896  struct cpmac_priv *priv = netdev_priv(dev);
897 
898  if (netif_running(dev))
899  return -EBUSY;
900  priv->ring_size = ring->rx_pending;
901  return 0;
902 }
903 
904 static void cpmac_get_drvinfo(struct net_device *dev,
905  struct ethtool_drvinfo *info)
906 {
907  strcpy(info->driver, "cpmac");
908  strcpy(info->version, CPMAC_VERSION);
909  info->fw_version[0] = '\0';
910  sprintf(info->bus_info, "%s", "cpmac");
911  info->regdump_len = 0;
912 }
913 
914 static const struct ethtool_ops cpmac_ethtool_ops = {
915  .get_settings = cpmac_get_settings,
916  .set_settings = cpmac_set_settings,
917  .get_drvinfo = cpmac_get_drvinfo,
918  .get_link = ethtool_op_get_link,
919  .get_ringparam = cpmac_get_ringparam,
920  .set_ringparam = cpmac_set_ringparam,
921 };
922 
923 static void cpmac_adjust_link(struct net_device *dev)
924 {
925  struct cpmac_priv *priv = netdev_priv(dev);
926  int new_state = 0;
927 
928  spin_lock(&priv->lock);
929  if (priv->phy->link) {
930  netif_tx_start_all_queues(dev);
931  if (priv->phy->duplex != priv->oldduplex) {
932  new_state = 1;
933  priv->oldduplex = priv->phy->duplex;
934  }
935 
936  if (priv->phy->speed != priv->oldspeed) {
937  new_state = 1;
938  priv->oldspeed = priv->phy->speed;
939  }
940 
941  if (!priv->oldlink) {
942  new_state = 1;
943  priv->oldlink = 1;
944  }
945  } else if (priv->oldlink) {
946  new_state = 1;
947  priv->oldlink = 0;
948  priv->oldspeed = 0;
949  priv->oldduplex = -1;
950  }
951 
952  if (new_state && netif_msg_link(priv) && net_ratelimit())
953  phy_print_status(priv->phy);
954 
955  spin_unlock(&priv->lock);
956 }
957 
958 static int cpmac_open(struct net_device *dev)
959 {
960  int i, size, res;
961  struct cpmac_priv *priv = netdev_priv(dev);
962  struct resource *mem;
963  struct cpmac_desc *desc;
964  struct sk_buff *skb;
965 
966  mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
967  if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
968  if (netif_msg_drv(priv))
969  printk(KERN_ERR "%s: failed to request registers\n",
970  dev->name);
971  res = -ENXIO;
972  goto fail_reserve;
973  }
974 
975  priv->regs = ioremap(mem->start, resource_size(mem));
976  if (!priv->regs) {
977  if (netif_msg_drv(priv))
978  printk(KERN_ERR "%s: failed to remap registers\n",
979  dev->name);
980  res = -ENXIO;
981  goto fail_remap;
982  }
983 
984  size = priv->ring_size + CPMAC_QUEUES;
985  priv->desc_ring = dma_alloc_coherent(&dev->dev,
986  sizeof(struct cpmac_desc) * size,
987  &priv->dma_ring,
988  GFP_KERNEL);
989  if (!priv->desc_ring) {
990  res = -ENOMEM;
991  goto fail_alloc;
992  }
993 
994  for (i = 0; i < size; i++)
995  priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
996 
997  priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
998  for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
999  skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
1000  if (unlikely(!skb)) {
1001  res = -ENOMEM;
1002  goto fail_desc;
1003  }
1004  desc->skb = skb;
1005  desc->data_mapping = dma_map_single(&dev->dev, skb->data,
1007  DMA_FROM_DEVICE);
1008  desc->hw_data = (u32)desc->data_mapping;
1009  desc->buflen = CPMAC_SKB_SIZE;
1010  desc->dataflags = CPMAC_OWN;
1011  desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
1012  desc->next->prev = desc;
1013  desc->hw_next = (u32)desc->next->mapping;
1014  }
1015 
1016  priv->rx_head->prev->hw_next = (u32)0;
1017 
1018  res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
1019  if (res) {
1020  if (netif_msg_drv(priv))
1021  printk(KERN_ERR "%s: failed to obtain irq\n",
1022  dev->name);
1023  goto fail_irq;
1024  }
1025 
1026  atomic_set(&priv->reset_pending, 0);
1027  INIT_WORK(&priv->reset_work, cpmac_hw_error);
1028  cpmac_hw_start(dev);
1029 
1030  napi_enable(&priv->napi);
1031  priv->phy->state = PHY_CHANGELINK;
1032  phy_start(priv->phy);
1033 
1034  return 0;
1035 
1036 fail_irq:
1037 fail_desc:
1038  for (i = 0; i < priv->ring_size; i++) {
1039  if (priv->rx_head[i].skb) {
1040  dma_unmap_single(&dev->dev,
1041  priv->rx_head[i].data_mapping,
1043  DMA_FROM_DEVICE);
1044  kfree_skb(priv->rx_head[i].skb);
1045  }
1046  }
1047 fail_alloc:
1048  kfree(priv->desc_ring);
1049  iounmap(priv->regs);
1050 
1051 fail_remap:
1052  release_mem_region(mem->start, resource_size(mem));
1053 
1054 fail_reserve:
1055  return res;
1056 }
1057 
1058 static int cpmac_stop(struct net_device *dev)
1059 {
1060  int i;
1061  struct cpmac_priv *priv = netdev_priv(dev);
1062  struct resource *mem;
1063 
1064  netif_tx_stop_all_queues(dev);
1065 
1066  cancel_work_sync(&priv->reset_work);
1067  napi_disable(&priv->napi);
1068  phy_stop(priv->phy);
1069 
1070  cpmac_hw_stop(dev);
1071 
1072  for (i = 0; i < 8; i++)
1073  cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
1074  cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
1075  cpmac_write(priv->regs, CPMAC_MBP, 0);
1076 
1077  free_irq(dev->irq, dev);
1078  iounmap(priv->regs);
1079  mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
1080  release_mem_region(mem->start, resource_size(mem));
1081  priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
1082  for (i = 0; i < priv->ring_size; i++) {
1083  if (priv->rx_head[i].skb) {
1084  dma_unmap_single(&dev->dev,
1085  priv->rx_head[i].data_mapping,
1087  DMA_FROM_DEVICE);
1088  kfree_skb(priv->rx_head[i].skb);
1089  }
1090  }
1091 
1092  dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
1093  (CPMAC_QUEUES + priv->ring_size),
1094  priv->desc_ring, priv->dma_ring);
1095  return 0;
1096 }
1097 
1098 static const struct net_device_ops cpmac_netdev_ops = {
1099  .ndo_open = cpmac_open,
1100  .ndo_stop = cpmac_stop,
1101  .ndo_start_xmit = cpmac_start_xmit,
1102  .ndo_tx_timeout = cpmac_tx_timeout,
1103  .ndo_set_rx_mode = cpmac_set_multicast_list,
1104  .ndo_do_ioctl = cpmac_ioctl,
1105  .ndo_set_config = cpmac_config,
1106  .ndo_change_mtu = eth_change_mtu,
1107  .ndo_validate_addr = eth_validate_addr,
1108  .ndo_set_mac_address = eth_mac_addr,
1109 };
1110 
1111 static int external_switch;
1112 
1113 static int __devinit cpmac_probe(struct platform_device *pdev)
1114 {
1115  int rc, phy_id;
1116  char mdio_bus_id[MII_BUS_ID_SIZE];
1117  struct resource *mem;
1118  struct cpmac_priv *priv;
1119  struct net_device *dev;
1120  struct plat_cpmac_data *pdata;
1121 
1122  pdata = pdev->dev.platform_data;
1123 
1124  if (external_switch || dumb_switch) {
1125  strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1126  phy_id = pdev->id;
1127  } else {
1128  for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1129  if (!(pdata->phy_mask & (1 << phy_id)))
1130  continue;
1131  if (!cpmac_mii->phy_map[phy_id])
1132  continue;
1133  strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
1134  break;
1135  }
1136  }
1137 
1138  if (phy_id == PHY_MAX_ADDR) {
1139  dev_err(&pdev->dev, "no PHY present, falling back "
1140  "to switch on MDIO bus 0\n");
1141  strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
1142  phy_id = pdev->id;
1143  }
1144 
1145  dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
1146  if (!dev)
1147  return -ENOMEM;
1148 
1149  platform_set_drvdata(pdev, dev);
1150  priv = netdev_priv(dev);
1151 
1152  priv->pdev = pdev;
1153  mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1154  if (!mem) {
1155  rc = -ENODEV;
1156  goto fail;
1157  }
1158 
1159  dev->irq = platform_get_irq_byname(pdev, "irq");
1160 
1161  dev->netdev_ops = &cpmac_netdev_ops;
1162  dev->ethtool_ops = &cpmac_ethtool_ops;
1163 
1164  netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
1165 
1166  spin_lock_init(&priv->lock);
1167  spin_lock_init(&priv->rx_lock);
1168  priv->dev = dev;
1169  priv->ring_size = 64;
1170  priv->msg_enable = netif_msg_init(debug_level, 0xff);
1171  memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
1172 
1174  mdio_bus_id, phy_id);
1175 
1176  priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
1178 
1179  if (IS_ERR(priv->phy)) {
1180  if (netif_msg_drv(priv))
1181  printk(KERN_ERR "%s: Could not attach to PHY\n",
1182  dev->name);
1183  rc = PTR_ERR(priv->phy);
1184  goto fail;
1185  }
1186 
1187  rc = register_netdev(dev);
1188  if (rc) {
1189  printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
1190  dev->name);
1191  goto fail;
1192  }
1193 
1194  if (netif_msg_probe(priv)) {
1196  "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
1197  "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
1198  priv->phy_name, dev->dev_addr);
1199  }
1200  return 0;
1201 
1202 fail:
1203  free_netdev(dev);
1204  return rc;
1205 }
1206 
1207 static int __devexit cpmac_remove(struct platform_device *pdev)
1208 {
1209  struct net_device *dev = platform_get_drvdata(pdev);
1210  unregister_netdev(dev);
1211  free_netdev(dev);
1212  return 0;
1213 }
1214 
1215 static struct platform_driver cpmac_driver = {
1216  .driver.name = "cpmac",
1217  .driver.owner = THIS_MODULE,
1218  .probe = cpmac_probe,
1219  .remove = __devexit_p(cpmac_remove),
1220 };
1221 
1223 {
1224  u32 mask;
1225  int i, res;
1226 
1227  cpmac_mii = mdiobus_alloc();
1228  if (cpmac_mii == NULL)
1229  return -ENOMEM;
1230 
1231  cpmac_mii->name = "cpmac-mii";
1232  cpmac_mii->read = cpmac_mdio_read;
1233  cpmac_mii->write = cpmac_mdio_write;
1234  cpmac_mii->reset = cpmac_mdio_reset;
1235  cpmac_mii->irq = mii_irqs;
1236 
1237  cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
1238 
1239  if (!cpmac_mii->priv) {
1240  printk(KERN_ERR "Can't ioremap mdio registers\n");
1241  res = -ENXIO;
1242  goto fail_alloc;
1243  }
1244 
1245 #warning FIXME: unhardcode gpio&reset bits
1246  ar7_gpio_disable(26);
1247  ar7_gpio_disable(27);
1248  ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1249  ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1250  ar7_device_reset(AR7_RESET_BIT_EPHY);
1251 
1252  cpmac_mii->reset(cpmac_mii);
1253 
1254  for (i = 0; i < 300; i++) {
1255  mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
1256  if (mask)
1257  break;
1258  else
1259  msleep(10);
1260  }
1261 
1262  mask &= 0x7fffffff;
1263  if (mask & (mask - 1)) {
1264  external_switch = 1;
1265  mask = 0;
1266  }
1267 
1268  cpmac_mii->phy_mask = ~(mask | 0x80000000);
1269  snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
1270 
1271  res = mdiobus_register(cpmac_mii);
1272  if (res)
1273  goto fail_mii;
1274 
1275  res = platform_driver_register(&cpmac_driver);
1276  if (res)
1277  goto fail_cpmac;
1278 
1279  return 0;
1280 
1281 fail_cpmac:
1282  mdiobus_unregister(cpmac_mii);
1283 
1284 fail_mii:
1285  iounmap(cpmac_mii->priv);
1286 
1287 fail_alloc:
1288  mdiobus_free(cpmac_mii);
1289 
1290  return res;
1291 }
1292 
1294 {
1295  platform_driver_unregister(&cpmac_driver);
1296  mdiobus_unregister(cpmac_mii);
1297  iounmap(cpmac_mii->priv);
1298  mdiobus_free(cpmac_mii);
1299 }
1300