Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mal.c
Go to the documentation of this file.
1 /*
2  * drivers/net/ethernet/ibm/emac/mal.c
3  *
4  * Memory Access Layer (MAL) support
5  *
6  * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
8  *
9  * Based on the arch/ppc version of the driver:
10  *
11  * Copyright (c) 2004, 2005 Zultys Technologies.
12  * Eugene Surovegin <[email protected]> or <[email protected]>
13  *
14  * Based on original work by
15  * Benjamin Herrenschmidt <[email protected]>,
16  * David Gibson <[email protected]>,
17  *
18  * Armin Kuster <[email protected]>
19  * Copyright 2002 MontaVista Softare Inc.
20  *
21  * This program is free software; you can redistribute it and/or modify it
22  * under the terms of the GNU General Public License as published by the
23  * Free Software Foundation; either version 2 of the License, or (at your
24  * option) any later version.
25  *
26  */
27 
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 
31 #include "core.h"
32 #include <asm/dcr-regs.h>
33 
34 static int mal_count;
35 
37  struct mal_commac *commac)
38 {
39  unsigned long flags;
40 
41  spin_lock_irqsave(&mal->lock, flags);
42 
43  MAL_DBG(mal, "reg(%08x, %08x)" NL,
44  commac->tx_chan_mask, commac->rx_chan_mask);
45 
46  /* Don't let multiple commacs claim the same channel(s) */
47  if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
48  (mal->rx_chan_mask & commac->rx_chan_mask)) {
49  spin_unlock_irqrestore(&mal->lock, flags);
50  printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
51  mal->index);
52  return -EBUSY;
53  }
54 
55  if (list_empty(&mal->list))
56  napi_enable(&mal->napi);
57  mal->tx_chan_mask |= commac->tx_chan_mask;
58  mal->rx_chan_mask |= commac->rx_chan_mask;
59  list_add(&commac->list, &mal->list);
60 
61  spin_unlock_irqrestore(&mal->lock, flags);
62 
63  return 0;
64 }
65 
67  struct mal_commac *commac)
68 {
69  unsigned long flags;
70 
71  spin_lock_irqsave(&mal->lock, flags);
72 
73  MAL_DBG(mal, "unreg(%08x, %08x)" NL,
74  commac->tx_chan_mask, commac->rx_chan_mask);
75 
76  mal->tx_chan_mask &= ~commac->tx_chan_mask;
77  mal->rx_chan_mask &= ~commac->rx_chan_mask;
78  list_del_init(&commac->list);
79  if (list_empty(&mal->list))
80  napi_disable(&mal->napi);
81 
82  spin_unlock_irqrestore(&mal->lock, flags);
83 }
84 
85 int mal_set_rcbs(struct mal_instance *mal, int channel, unsigned long size)
86 {
88  size > MAL_MAX_RX_SIZE);
89 
90  MAL_DBG(mal, "set_rbcs(%d, %lu)" NL, channel, size);
91 
92  if (size & 0xf) {
94  "mal%d: incorrect RX size %lu for the channel %d\n",
95  mal->index, size, channel);
96  return -EINVAL;
97  }
98 
99  set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
100  return 0;
101 }
102 
103 int mal_tx_bd_offset(struct mal_instance *mal, int channel)
104 {
106 
107  return channel * NUM_TX_BUFF;
108 }
109 
110 int mal_rx_bd_offset(struct mal_instance *mal, int channel)
111 {
113  return mal->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
114 }
115 
117 {
118  unsigned long flags;
119 
120  spin_lock_irqsave(&mal->lock, flags);
121 
122  MAL_DBG(mal, "enable_tx(%d)" NL, channel);
123 
124  set_mal_dcrn(mal, MAL_TXCASR,
125  get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
126 
127  spin_unlock_irqrestore(&mal->lock, flags);
128 }
129 
131 {
132  set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
133 
134  MAL_DBG(mal, "disable_tx(%d)" NL, channel);
135 }
136 
138 {
139  unsigned long flags;
140 
141  /*
142  * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
143  * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
144  * for the bitmask
145  */
146  if (!(channel % 8))
147  channel >>= 3;
148 
149  spin_lock_irqsave(&mal->lock, flags);
150 
151  MAL_DBG(mal, "enable_rx(%d)" NL, channel);
152 
153  set_mal_dcrn(mal, MAL_RXCASR,
154  get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
155 
156  spin_unlock_irqrestore(&mal->lock, flags);
157 }
158 
160 {
161  /*
162  * On some 4xx PPC's (e.g. 460EX/GT), the rx channel is a multiple
163  * of 8, but enabling in MAL_RXCASR needs the divided by 8 value
164  * for the bitmask
165  */
166  if (!(channel % 8))
167  channel >>= 3;
168 
169  set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
170 
171  MAL_DBG(mal, "disable_rx(%d)" NL, channel);
172 }
173 
174 void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac)
175 {
176  unsigned long flags;
177 
178  spin_lock_irqsave(&mal->lock, flags);
179 
180  MAL_DBG(mal, "poll_add(%p)" NL, commac);
181 
182  /* starts disabled */
184 
185  list_add_tail(&commac->poll_list, &mal->poll_list);
186 
187  spin_unlock_irqrestore(&mal->lock, flags);
188 }
189 
190 void mal_poll_del(struct mal_instance *mal, struct mal_commac *commac)
191 {
192  unsigned long flags;
193 
194  spin_lock_irqsave(&mal->lock, flags);
195 
196  MAL_DBG(mal, "poll_del(%p)" NL, commac);
197 
198  list_del(&commac->poll_list);
199 
200  spin_unlock_irqrestore(&mal->lock, flags);
201 }
202 
203 /* synchronized by mal_poll() */
204 static inline void mal_enable_eob_irq(struct mal_instance *mal)
205 {
206  MAL_DBG2(mal, "enable_irq" NL);
207 
208  // XXX might want to cache MAL_CFG as the DCR read can be slooooow
209  set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
210 }
211 
212 /* synchronized by NAPI state */
213 static inline void mal_disable_eob_irq(struct mal_instance *mal)
214 {
215  // XXX might want to cache MAL_CFG as the DCR read can be slooooow
216  set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
217 
218  MAL_DBG2(mal, "disable_irq" NL);
219 }
220 
221 static irqreturn_t mal_serr(int irq, void *dev_instance)
222 {
223  struct mal_instance *mal = dev_instance;
224 
225  u32 esr = get_mal_dcrn(mal, MAL_ESR);
226 
227  /* Clear the error status register */
228  set_mal_dcrn(mal, MAL_ESR, esr);
229 
230  MAL_DBG(mal, "SERR %08x" NL, esr);
231 
232  if (esr & MAL_ESR_EVB) {
233  if (esr & MAL_ESR_DE) {
234  /* We ignore Descriptor error,
235  * TXDE or RXDE interrupt will be generated anyway.
236  */
237  return IRQ_HANDLED;
238  }
239 
240  if (esr & MAL_ESR_PEIN) {
241  /* PLB error, it's probably buggy hardware or
242  * incorrect physical address in BD (i.e. bug)
243  */
244  if (net_ratelimit())
246  "mal%d: system error, "
247  "PLB (ESR = 0x%08x)\n",
248  mal->index, esr);
249  return IRQ_HANDLED;
250  }
251 
252  /* OPB error, it's probably buggy hardware or incorrect
253  * EBC setup
254  */
255  if (net_ratelimit())
257  "mal%d: system error, OPB (ESR = 0x%08x)\n",
258  mal->index, esr);
259  }
260  return IRQ_HANDLED;
261 }
262 
263 static inline void mal_schedule_poll(struct mal_instance *mal)
264 {
265  if (likely(napi_schedule_prep(&mal->napi))) {
266  MAL_DBG2(mal, "schedule_poll" NL);
267  mal_disable_eob_irq(mal);
268  __napi_schedule(&mal->napi);
269  } else
270  MAL_DBG2(mal, "already in poll" NL);
271 }
272 
273 static irqreturn_t mal_txeob(int irq, void *dev_instance)
274 {
275  struct mal_instance *mal = dev_instance;
276 
277  u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
278 
279  MAL_DBG2(mal, "txeob %08x" NL, r);
280 
281  mal_schedule_poll(mal);
282  set_mal_dcrn(mal, MAL_TXEOBISR, r);
283 
284 #ifdef CONFIG_PPC_DCR_NATIVE
285  if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
286  mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
287  (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
288 #endif
289 
290  return IRQ_HANDLED;
291 }
292 
293 static irqreturn_t mal_rxeob(int irq, void *dev_instance)
294 {
295  struct mal_instance *mal = dev_instance;
296 
297  u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
298 
299  MAL_DBG2(mal, "rxeob %08x" NL, r);
300 
301  mal_schedule_poll(mal);
302  set_mal_dcrn(mal, MAL_RXEOBISR, r);
303 
304 #ifdef CONFIG_PPC_DCR_NATIVE
305  if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
306  mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
307  (mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
308 #endif
309 
310  return IRQ_HANDLED;
311 }
312 
313 static irqreturn_t mal_txde(int irq, void *dev_instance)
314 {
315  struct mal_instance *mal = dev_instance;
316 
317  u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
318  set_mal_dcrn(mal, MAL_TXDEIR, deir);
319 
320  MAL_DBG(mal, "txde %08x" NL, deir);
321 
322  if (net_ratelimit())
324  "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
325  mal->index, deir);
326 
327  return IRQ_HANDLED;
328 }
329 
330 static irqreturn_t mal_rxde(int irq, void *dev_instance)
331 {
332  struct mal_instance *mal = dev_instance;
333  struct list_head *l;
334 
335  u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
336 
337  MAL_DBG(mal, "rxde %08x" NL, deir);
338 
339  list_for_each(l, &mal->list) {
340  struct mal_commac *mc = list_entry(l, struct mal_commac, list);
341  if (deir & mc->rx_chan_mask) {
343  mc->ops->rxde(mc->dev);
344  }
345  }
346 
347  mal_schedule_poll(mal);
348  set_mal_dcrn(mal, MAL_RXDEIR, deir);
349 
350  return IRQ_HANDLED;
351 }
352 
353 static irqreturn_t mal_int(int irq, void *dev_instance)
354 {
355  struct mal_instance *mal = dev_instance;
356  u32 esr = get_mal_dcrn(mal, MAL_ESR);
357 
358  if (esr & MAL_ESR_EVB) {
359  /* descriptor error */
360  if (esr & MAL_ESR_DE) {
361  if (esr & MAL_ESR_CIDT)
362  return mal_rxde(irq, dev_instance);
363  else
364  return mal_txde(irq, dev_instance);
365  } else { /* SERR */
366  return mal_serr(irq, dev_instance);
367  }
368  }
369  return IRQ_HANDLED;
370 }
371 
372 void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac)
373 {
374  /* Spinlock-type semantics: only one caller disable poll at a time */
376  msleep(1);
377 
378  /* Synchronize with the MAL NAPI poller */
379  napi_synchronize(&mal->napi);
380 }
381 
382 void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
383 {
384  smp_wmb();
386 
387  /* Feels better to trigger a poll here to catch up with events that
388  * may have happened on this channel while disabled. It will most
389  * probably be delayed until the next interrupt but that's mostly a
390  * non-issue in the context where this is called.
391  */
392  napi_schedule(&mal->napi);
393 }
394 
395 static int mal_poll(struct napi_struct *napi, int budget)
396 {
397  struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
398  struct list_head *l;
399  int received = 0;
400  unsigned long flags;
401 
402  MAL_DBG2(mal, "poll(%d)" NL, budget);
403  again:
404  /* Process TX skbs */
405  list_for_each(l, &mal->poll_list) {
406  struct mal_commac *mc =
407  list_entry(l, struct mal_commac, poll_list);
408  mc->ops->poll_tx(mc->dev);
409  }
410 
411  /* Process RX skbs.
412  *
413  * We _might_ need something more smart here to enforce polling
414  * fairness.
415  */
416  list_for_each(l, &mal->poll_list) {
417  struct mal_commac *mc =
418  list_entry(l, struct mal_commac, poll_list);
419  int n;
421  continue;
422  n = mc->ops->poll_rx(mc->dev, budget);
423  if (n) {
424  received += n;
425  budget -= n;
426  if (budget <= 0)
427  goto more_work; // XXX What if this is the last one ?
428  }
429  }
430 
431  /* We need to disable IRQs to protect from RXDE IRQ here */
432  spin_lock_irqsave(&mal->lock, flags);
433  __napi_complete(napi);
434  mal_enable_eob_irq(mal);
435  spin_unlock_irqrestore(&mal->lock, flags);
436 
437  /* Check for "rotting" packet(s) */
438  list_for_each(l, &mal->poll_list) {
439  struct mal_commac *mc =
440  list_entry(l, struct mal_commac, poll_list);
442  continue;
443  if (unlikely(mc->ops->peek_rx(mc->dev) ||
445  MAL_DBG2(mal, "rotting packet" NL);
446  if (napi_reschedule(napi))
447  mal_disable_eob_irq(mal);
448  else
449  MAL_DBG2(mal, "already in poll list" NL);
450 
451  if (budget > 0)
452  goto again;
453  else
454  goto more_work;
455  }
456  mc->ops->poll_tx(mc->dev);
457  }
458 
459  more_work:
460  MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
461  return received;
462 }
463 
464 static void mal_reset(struct mal_instance *mal)
465 {
466  int n = 10;
467 
468  MAL_DBG(mal, "reset" NL);
469 
470  set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
471 
472  /* Wait for reset to complete (1 system clock) */
473  while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
474  --n;
475 
476  if (unlikely(!n))
477  printk(KERN_ERR "mal%d: reset timeout\n", mal->index);
478 }
479 
481 {
482  return sizeof(struct emac_ethtool_regs_subhdr) +
483  sizeof(struct mal_regs);
484 }
485 
486 void *mal_dump_regs(struct mal_instance *mal, void *buf)
487 {
488  struct emac_ethtool_regs_subhdr *hdr = buf;
489  struct mal_regs *regs = (struct mal_regs *)(hdr + 1);
490  int i;
491 
492  hdr->version = mal->version;
493  hdr->index = mal->index;
494 
495  regs->tx_count = mal->num_tx_chans;
496  regs->rx_count = mal->num_rx_chans;
497 
498  regs->cfg = get_mal_dcrn(mal, MAL_CFG);
499  regs->esr = get_mal_dcrn(mal, MAL_ESR);
500  regs->ier = get_mal_dcrn(mal, MAL_IER);
501  regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
502  regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
503  regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
504  regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
505  regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
506  regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
507  regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
508  regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
509 
510  for (i = 0; i < regs->tx_count; ++i)
511  regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
512 
513  for (i = 0; i < regs->rx_count; ++i) {
514  regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
515  regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
516  }
517  return regs + 1;
518 }
519 
520 static int __devinit mal_probe(struct platform_device *ofdev)
521 {
522  struct mal_instance *mal;
523  int err = 0, i, bd_size;
524  int index = mal_count++;
525  unsigned int dcr_base;
526  const u32 *prop;
527  u32 cfg;
528  unsigned long irqflags;
529  irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
530 
531  mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
532  if (!mal) {
534  "mal%d: out of memory allocating MAL structure!\n",
535  index);
536  return -ENOMEM;
537  }
538  mal->index = index;
539  mal->ofdev = ofdev;
540  mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
541 
542  MAL_DBG(mal, "probe" NL);
543 
544  prop = of_get_property(ofdev->dev.of_node, "num-tx-chans", NULL);
545  if (prop == NULL) {
547  "mal%d: can't find MAL num-tx-chans property!\n",
548  index);
549  err = -ENODEV;
550  goto fail;
551  }
552  mal->num_tx_chans = prop[0];
553 
554  prop = of_get_property(ofdev->dev.of_node, "num-rx-chans", NULL);
555  if (prop == NULL) {
557  "mal%d: can't find MAL num-rx-chans property!\n",
558  index);
559  err = -ENODEV;
560  goto fail;
561  }
562  mal->num_rx_chans = prop[0];
563 
564  dcr_base = dcr_resource_start(ofdev->dev.of_node, 0);
565  if (dcr_base == 0) {
567  "mal%d: can't find DCR resource!\n", index);
568  err = -ENODEV;
569  goto fail;
570  }
571  mal->dcr_host = dcr_map(ofdev->dev.of_node, dcr_base, 0x100);
572  if (!DCR_MAP_OK(mal->dcr_host)) {
574  "mal%d: failed to map DCRs !\n", index);
575  err = -ENODEV;
576  goto fail;
577  }
578 
579  if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-405ez")) {
580 #if defined(CONFIG_IBM_EMAC_MAL_CLR_ICINTSTAT) && \
581  defined(CONFIG_IBM_EMAC_MAL_COMMON_ERR)
584 #else
585  printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
586  ofdev->dev.of_node->full_name);
587  err = -ENODEV;
588  goto fail;
589 #endif
590  }
591 
592  mal->txeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
593  mal->rxeob_irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
594  mal->serr_irq = irq_of_parse_and_map(ofdev->dev.of_node, 2);
595 
596  if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
597  mal->txde_irq = mal->rxde_irq = mal->serr_irq;
598  } else {
599  mal->txde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 3);
600  mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
601  }
602 
603  if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
604  mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
605  mal->rxde_irq == NO_IRQ) {
607  "mal%d: failed to map interrupts !\n", index);
608  err = -ENODEV;
609  goto fail_unmap;
610  }
611 
612  INIT_LIST_HEAD(&mal->poll_list);
613  INIT_LIST_HEAD(&mal->list);
614  spin_lock_init(&mal->lock);
615 
617 
618  netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
619  CONFIG_IBM_EMAC_POLL_WEIGHT);
620 
621  /* Load power-on reset defaults */
622  mal_reset(mal);
623 
624  /* Set the MAL configuration register */
625  cfg = (mal->version == 2) ? MAL2_CFG_DEFAULT : MAL1_CFG_DEFAULT;
627 
628  /* Current Axon is not happy with priority being non-0, it can
629  * deadlock, fix it up here
630  */
631  if (of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal-axon"))
632  cfg &= ~(MAL2_CFG_RPP_10 | MAL2_CFG_WPP_10);
633 
634  /* Apply configuration */
635  set_mal_dcrn(mal, MAL_CFG, cfg);
636 
637  /* Allocate space for BD rings */
638  BUG_ON(mal->num_tx_chans <= 0 || mal->num_tx_chans > 32);
639  BUG_ON(mal->num_rx_chans <= 0 || mal->num_rx_chans > 32);
640 
641  bd_size = sizeof(struct mal_descriptor) *
642  (NUM_TX_BUFF * mal->num_tx_chans +
643  NUM_RX_BUFF * mal->num_rx_chans);
644  mal->bd_virt =
645  dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
646  GFP_KERNEL);
647  if (mal->bd_virt == NULL) {
649  "mal%d: out of memory allocating RX/TX descriptors!\n",
650  index);
651  err = -ENOMEM;
652  goto fail_unmap;
653  }
654  memset(mal->bd_virt, 0, bd_size);
655 
656  for (i = 0; i < mal->num_tx_chans; ++i)
657  set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
658  sizeof(struct mal_descriptor) *
659  mal_tx_bd_offset(mal, i));
660 
661  for (i = 0; i < mal->num_rx_chans; ++i)
662  set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
663  sizeof(struct mal_descriptor) *
664  mal_rx_bd_offset(mal, i));
665 
666  if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
667  irqflags = IRQF_SHARED;
668  hdlr_serr = hdlr_txde = hdlr_rxde = mal_int;
669  } else {
670  irqflags = 0;
671  hdlr_serr = mal_serr;
672  hdlr_txde = mal_txde;
673  hdlr_rxde = mal_rxde;
674  }
675 
676  err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
677  if (err)
678  goto fail2;
679  err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
680  if (err)
681  goto fail3;
682  err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
683  if (err)
684  goto fail4;
685  err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
686  if (err)
687  goto fail5;
688  err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
689  if (err)
690  goto fail6;
691 
692  /* Enable all MAL SERR interrupt sources */
693  if (mal->version == 2)
694  set_mal_dcrn(mal, MAL_IER, MAL2_IER_EVENTS);
695  else
696  set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
697 
698  /* Enable EOB interrupt */
699  mal_enable_eob_irq(mal);
700 
702  "MAL v%d %s, %d TX channels, %d RX channels\n",
703  mal->version, ofdev->dev.of_node->full_name,
704  mal->num_tx_chans, mal->num_rx_chans);
705 
706  /* Advertise this instance to the rest of the world */
707  wmb();
708  dev_set_drvdata(&ofdev->dev, mal);
709 
710  mal_dbg_register(mal);
711 
712  return 0;
713 
714  fail6:
715  free_irq(mal->rxde_irq, mal);
716  fail5:
717  free_irq(mal->txeob_irq, mal);
718  fail4:
719  free_irq(mal->txde_irq, mal);
720  fail3:
721  free_irq(mal->serr_irq, mal);
722  fail2:
723  dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
724  fail_unmap:
725  dcr_unmap(mal->dcr_host, 0x100);
726  fail:
727  kfree(mal);
728 
729  return err;
730 }
731 
732 static int __devexit mal_remove(struct platform_device *ofdev)
733 {
734  struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
735 
736  MAL_DBG(mal, "remove" NL);
737 
738  /* Synchronize with scheduled polling */
739  napi_disable(&mal->napi);
740 
741  if (!list_empty(&mal->list)) {
742  /* This is *very* bad */
744  "mal%d: commac list is not empty on remove!\n",
745  mal->index);
746  WARN_ON(1);
747  }
748 
749  dev_set_drvdata(&ofdev->dev, NULL);
750 
751  free_irq(mal->serr_irq, mal);
752  free_irq(mal->txde_irq, mal);
753  free_irq(mal->txeob_irq, mal);
754  free_irq(mal->rxde_irq, mal);
755  free_irq(mal->rxeob_irq, mal);
756 
757  mal_reset(mal);
758 
759  mal_dbg_unregister(mal);
760 
761  dma_free_coherent(&ofdev->dev,
762  sizeof(struct mal_descriptor) *
763  (NUM_TX_BUFF * mal->num_tx_chans +
764  NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
765  mal->bd_dma);
766  kfree(mal);
767 
768  return 0;
769 }
770 
771 static struct of_device_id mal_platform_match[] =
772 {
773  {
774  .compatible = "ibm,mcmal",
775  },
776  {
777  .compatible = "ibm,mcmal2",
778  },
779  /* Backward compat */
780  {
781  .type = "mcmal-dma",
782  .compatible = "ibm,mcmal",
783  },
784  {
785  .type = "mcmal-dma",
786  .compatible = "ibm,mcmal2",
787  },
788  {},
789 };
790 
791 static struct platform_driver mal_of_driver = {
792  .driver = {
793  .name = "mcmal",
794  .owner = THIS_MODULE,
795  .of_match_table = mal_platform_match,
796  },
797  .probe = mal_probe,
798  .remove = mal_remove,
799 };
800 
801 int __init mal_init(void)
802 {
803  return platform_driver_register(&mal_of_driver);
804 }
805 
806 void mal_exit(void)
807 {
808  platform_driver_unregister(&mal_of_driver);
809 }