Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mac-scc.c
Go to the documentation of this file.
1 /*
2  * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
3  *
4  * Copyright (c) 2003 Intracom S.A.
5  * by Pantelis Antoniou <[email protected]>
6  *
7  * 2005 (c) MontaVista Software, Inc.
8  * Vitaly Bordug <[email protected]>
9  *
10  * This file is licensed under the terms of the GNU General Public License
11  * version 2. This program is licensed "as is" without any warranty of any
12  * kind, whether express or implied.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/spinlock.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/bitops.h>
32 #include <linux/fs.h>
33 #include <linux/platform_device.h>
34 #include <linux/of_platform.h>
35 
36 #include <asm/irq.h>
37 #include <asm/uaccess.h>
38 
39 #ifdef CONFIG_8xx
40 #include <asm/8xx_immap.h>
41 #include <asm/pgtable.h>
42 #include <asm/mpc8xx.h>
43 #include <asm/cpm1.h>
44 #endif
45 
46 #include "fs_enet.h"
47 
48 /*************************************************/
49 #if defined(CONFIG_CPM1)
50 /* for a 8xx __raw_xxx's are sufficient */
51 #define __fs_out32(addr, x) __raw_writel(x, addr)
52 #define __fs_out16(addr, x) __raw_writew(x, addr)
53 #define __fs_out8(addr, x) __raw_writeb(x, addr)
54 #define __fs_in32(addr) __raw_readl(addr)
55 #define __fs_in16(addr) __raw_readw(addr)
56 #define __fs_in8(addr) __raw_readb(addr)
57 #else
58 /* for others play it safe */
59 #define __fs_out32(addr, x) out_be32(addr, x)
60 #define __fs_out16(addr, x) out_be16(addr, x)
61 #define __fs_in32(addr) in_be32(addr)
62 #define __fs_in16(addr) in_be16(addr)
63 #define __fs_out8(addr, x) out_8(addr, x)
64 #define __fs_in8(addr) in_8(addr)
65 #endif
66 
67 /* write, read, set bits, clear bits */
68 #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
69 #define R32(_p, _m) __fs_in32(&(_p)->_m)
70 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
71 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
72 
73 #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
74 #define R16(_p, _m) __fs_in16(&(_p)->_m)
75 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
76 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
77 
78 #define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
79 #define R8(_p, _m) __fs_in8(&(_p)->_m)
80 #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
81 #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
82 
83 #define SCC_MAX_MULTICAST_ADDRS 64
84 
85 /*
86  * Delay to wait for SCC reset command to complete (in us)
87  */
88 #define SCC_RESET_DELAY 50
89 
90 static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
91 {
92  const struct fs_platform_info *fpi = fep->fpi;
93 
94  return cpm_command(fpi->cp_command, op);
95 }
96 
97 static int do_pd_setup(struct fs_enet_private *fep)
98 {
99  struct platform_device *ofdev = to_platform_device(fep->dev);
100 
101  fep->interrupt = of_irq_to_resource(ofdev->dev.of_node, 0, NULL);
102  if (fep->interrupt == NO_IRQ)
103  return -EINVAL;
104 
105  fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
106  if (!fep->scc.sccp)
107  return -EINVAL;
108 
109  fep->scc.ep = of_iomap(ofdev->dev.of_node, 1);
110  if (!fep->scc.ep) {
111  iounmap(fep->scc.sccp);
112  return -EINVAL;
113  }
114 
115  return 0;
116 }
117 
118 #define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
119 #define SCC_RX_EVENT (SCCE_ENET_RXF)
120 #define SCC_TX_EVENT (SCCE_ENET_TXB)
121 #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
122 
123 static int setup_data(struct net_device *dev)
124 {
125  struct fs_enet_private *fep = netdev_priv(dev);
126 
127  do_pd_setup(fep);
128 
129  fep->scc.hthi = 0;
130  fep->scc.htlo = 0;
131 
133  fep->ev_rx = SCC_RX_EVENT;
135  fep->ev_err = SCC_ERR_EVENT_MSK;
136 
137  return 0;
138 }
139 
140 static int allocate_bd(struct net_device *dev)
141 {
142  struct fs_enet_private *fep = netdev_priv(dev);
143  const struct fs_platform_info *fpi = fep->fpi;
144 
145  fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
146  sizeof(cbd_t), 8);
147  if (IS_ERR_VALUE(fep->ring_mem_addr))
148  return -ENOMEM;
149 
150  fep->ring_base = (void __iomem __force*)
152 
153  return 0;
154 }
155 
156 static void free_bd(struct net_device *dev)
157 {
158  struct fs_enet_private *fep = netdev_priv(dev);
159 
160  if (fep->ring_base)
162 }
163 
164 static void cleanup_data(struct net_device *dev)
165 {
166  /* nothing */
167 }
168 
169 static void set_promiscuous_mode(struct net_device *dev)
170 {
171  struct fs_enet_private *fep = netdev_priv(dev);
172  scc_t __iomem *sccp = fep->scc.sccp;
173 
174  S16(sccp, scc_psmr, SCC_PSMR_PRO);
175 }
176 
177 static void set_multicast_start(struct net_device *dev)
178 {
179  struct fs_enet_private *fep = netdev_priv(dev);
180  scc_enet_t __iomem *ep = fep->scc.ep;
181 
182  W16(ep, sen_gaddr1, 0);
183  W16(ep, sen_gaddr2, 0);
184  W16(ep, sen_gaddr3, 0);
185  W16(ep, sen_gaddr4, 0);
186 }
187 
188 static void set_multicast_one(struct net_device *dev, const u8 * mac)
189 {
190  struct fs_enet_private *fep = netdev_priv(dev);
191  scc_enet_t __iomem *ep = fep->scc.ep;
192  u16 taddrh, taddrm, taddrl;
193 
194  taddrh = ((u16) mac[5] << 8) | mac[4];
195  taddrm = ((u16) mac[3] << 8) | mac[2];
196  taddrl = ((u16) mac[1] << 8) | mac[0];
197 
198  W16(ep, sen_taddrh, taddrh);
199  W16(ep, sen_taddrm, taddrm);
200  W16(ep, sen_taddrl, taddrl);
201  scc_cr_cmd(fep, CPM_CR_SET_GADDR);
202 }
203 
204 static void set_multicast_finish(struct net_device *dev)
205 {
206  struct fs_enet_private *fep = netdev_priv(dev);
207  scc_t __iomem *sccp = fep->scc.sccp;
208  scc_enet_t __iomem *ep = fep->scc.ep;
209 
210  /* clear promiscuous always */
211  C16(sccp, scc_psmr, SCC_PSMR_PRO);
212 
213  /* if all multi or too many multicasts; just enable all */
214  if ((dev->flags & IFF_ALLMULTI) != 0 ||
216 
217  W16(ep, sen_gaddr1, 0xffff);
218  W16(ep, sen_gaddr2, 0xffff);
219  W16(ep, sen_gaddr3, 0xffff);
220  W16(ep, sen_gaddr4, 0xffff);
221  }
222 }
223 
224 static void set_multicast_list(struct net_device *dev)
225 {
226  struct netdev_hw_addr *ha;
227 
228  if ((dev->flags & IFF_PROMISC) == 0) {
229  set_multicast_start(dev);
230  netdev_for_each_mc_addr(ha, dev)
231  set_multicast_one(dev, ha->addr);
232  set_multicast_finish(dev);
233  } else
234  set_promiscuous_mode(dev);
235 }
236 
237 /*
238  * This function is called to start or restart the FEC during a link
239  * change. This only happens when switching between half and full
240  * duplex.
241  */
242 static void restart(struct net_device *dev)
243 {
244  struct fs_enet_private *fep = netdev_priv(dev);
245  scc_t __iomem *sccp = fep->scc.sccp;
246  scc_enet_t __iomem *ep = fep->scc.ep;
247  const struct fs_platform_info *fpi = fep->fpi;
248  u16 paddrh, paddrm, paddrl;
249  const unsigned char *mac;
250  int i;
251 
252  C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
253 
254  /* clear everything (slow & steady does it) */
255  for (i = 0; i < sizeof(*ep); i++)
256  __fs_out8((u8 __iomem *)ep + i, 0);
257 
258  /* point to bds */
259  W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
260  W16(ep, sen_genscc.scc_tbase,
261  fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
262 
263  /* Initialize function code registers for big-endian.
264  */
265 #ifndef CONFIG_NOT_COHERENT_CACHE
266  W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL);
267  W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL);
268 #else
269  W8(ep, sen_genscc.scc_rfcr, SCC_EB);
270  W8(ep, sen_genscc.scc_tfcr, SCC_EB);
271 #endif
272 
273  /* Set maximum bytes per receive buffer.
274  * This appears to be an Ethernet frame size, not the buffer
275  * fragment size. It must be a multiple of four.
276  */
277  W16(ep, sen_genscc.scc_mrblr, 0x5f0);
278 
279  /* Set CRC preset and mask.
280  */
281  W32(ep, sen_cpres, 0xffffffff);
282  W32(ep, sen_cmask, 0xdebb20e3);
283 
284  W32(ep, sen_crcec, 0); /* CRC Error counter */
285  W32(ep, sen_alec, 0); /* alignment error counter */
286  W32(ep, sen_disfc, 0); /* discard frame counter */
287 
288  W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
289  W16(ep, sen_retlim, 15); /* Retry limit threshold */
290 
291  W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
292 
293  W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
294 
295  W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
296  W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
297 
298  /* Clear hash tables.
299  */
300  W16(ep, sen_gaddr1, 0);
301  W16(ep, sen_gaddr2, 0);
302  W16(ep, sen_gaddr3, 0);
303  W16(ep, sen_gaddr4, 0);
304  W16(ep, sen_iaddr1, 0);
305  W16(ep, sen_iaddr2, 0);
306  W16(ep, sen_iaddr3, 0);
307  W16(ep, sen_iaddr4, 0);
308 
309  /* set address
310  */
311  mac = dev->dev_addr;
312  paddrh = ((u16) mac[5] << 8) | mac[4];
313  paddrm = ((u16) mac[3] << 8) | mac[2];
314  paddrl = ((u16) mac[1] << 8) | mac[0];
315 
316  W16(ep, sen_paddrh, paddrh);
317  W16(ep, sen_paddrm, paddrm);
318  W16(ep, sen_paddrl, paddrl);
319 
320  W16(ep, sen_pper, 0);
321  W16(ep, sen_taddrl, 0);
322  W16(ep, sen_taddrm, 0);
323  W16(ep, sen_taddrh, 0);
324 
325  fs_init_bds(dev);
326 
327  scc_cr_cmd(fep, CPM_CR_INIT_TRX);
328 
329  W16(sccp, scc_scce, 0xffff);
330 
331  /* Enable interrupts we wish to service.
332  */
333  W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
334 
335  /* Set GSMR_H to enable all normal operating modes.
336  * Set GSMR_L to enable Ethernet to MC68160.
337  */
338  W32(sccp, scc_gsmrh, 0);
339  W32(sccp, scc_gsmrl,
342 
343  /* Set sync/delimiters.
344  */
345  W16(sccp, scc_dsr, 0xd555);
346 
347  /* Set processing mode. Use Ethernet CRC, catch broadcast, and
348  * start frame search 22 bit times after RENA.
349  */
350  W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
351 
352  /* Set full duplex mode if needed */
353  if (fep->phydev->duplex)
354  S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
355 
356  S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
357 }
358 
359 static void stop(struct net_device *dev)
360 {
361  struct fs_enet_private *fep = netdev_priv(dev);
362  scc_t __iomem *sccp = fep->scc.sccp;
363  int i;
364 
365  for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
366  udelay(1);
367 
368  if (i == SCC_RESET_DELAY)
369  dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
370 
371  W16(sccp, scc_sccm, 0);
372  C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
373 
374  fs_cleanup_bds(dev);
375 }
376 
377 static void napi_clear_rx_event(struct net_device *dev)
378 {
379  struct fs_enet_private *fep = netdev_priv(dev);
380  scc_t __iomem *sccp = fep->scc.sccp;
381 
382  W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
383 }
384 
385 static void napi_enable_rx(struct net_device *dev)
386 {
387  struct fs_enet_private *fep = netdev_priv(dev);
388  scc_t __iomem *sccp = fep->scc.sccp;
389 
390  S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
391 }
392 
393 static void napi_disable_rx(struct net_device *dev)
394 {
395  struct fs_enet_private *fep = netdev_priv(dev);
396  scc_t __iomem *sccp = fep->scc.sccp;
397 
398  C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
399 }
400 
401 static void rx_bd_done(struct net_device *dev)
402 {
403  /* nothing */
404 }
405 
406 static void tx_kickstart(struct net_device *dev)
407 {
408  /* nothing */
409 }
410 
411 static u32 get_int_events(struct net_device *dev)
412 {
413  struct fs_enet_private *fep = netdev_priv(dev);
414  scc_t __iomem *sccp = fep->scc.sccp;
415 
416  return (u32) R16(sccp, scc_scce);
417 }
418 
419 static void clear_int_events(struct net_device *dev, u32 int_events)
420 {
421  struct fs_enet_private *fep = netdev_priv(dev);
422  scc_t __iomem *sccp = fep->scc.sccp;
423 
424  W16(sccp, scc_scce, int_events & 0xffff);
425 }
426 
427 static void ev_error(struct net_device *dev, u32 int_events)
428 {
429  struct fs_enet_private *fep = netdev_priv(dev);
430 
431  dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
432 }
433 
434 static int get_regs(struct net_device *dev, void *p, int *sizep)
435 {
436  struct fs_enet_private *fep = netdev_priv(dev);
437 
438  if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t __iomem *))
439  return -EINVAL;
440 
441  memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
442  p = (char *)p + sizeof(scc_t);
443 
444  memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *));
445 
446  return 0;
447 }
448 
449 static int get_regs_len(struct net_device *dev)
450 {
451  return sizeof(scc_t) + sizeof(scc_enet_t __iomem *);
452 }
453 
454 static void tx_restart(struct net_device *dev)
455 {
456  struct fs_enet_private *fep = netdev_priv(dev);
457 
458  scc_cr_cmd(fep, CPM_CR_RESTART_TX);
459 }
460 
461 
462 
463 /*************************************************************************/
464 
465 const struct fs_ops fs_scc_ops = {
466  .setup_data = setup_data,
467  .cleanup_data = cleanup_data,
468  .set_multicast_list = set_multicast_list,
469  .restart = restart,
470  .stop = stop,
471  .napi_clear_rx_event = napi_clear_rx_event,
472  .napi_enable_rx = napi_enable_rx,
473  .napi_disable_rx = napi_disable_rx,
474  .rx_bd_done = rx_bd_done,
475  .tx_kickstart = tx_kickstart,
476  .get_int_events = get_int_events,
477  .clear_int_events = clear_int_events,
478  .ev_error = ev_error,
479  .get_regs = get_regs,
480  .get_regs_len = get_regs_len,
481  .tx_restart = tx_restart,
482  .allocate_bd = allocate_bd,
483  .free_bd = free_bd,
484 };