Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
timb_dma.c
Go to the documentation of this file.
1 /*
2  * timb_dma.c timberdale FPGA DMA driver
3  * Copyright (c) 2010 Intel Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  */
18 
19 /* Supports:
20  * Timberdale FPGA DMA engine
21  */
22 
23 #include <linux/dmaengine.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/io.h>
28 #include <linux/module.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
31 
32 #include <linux/timb_dma.h>
33 
34 #include "dmaengine.h"
35 
36 #define DRIVER_NAME "timb-dma"
37 
38 /* Global DMA registers */
39 #define TIMBDMA_ACR 0x34
40 #define TIMBDMA_32BIT_ADDR 0x01
41 
42 #define TIMBDMA_ISR 0x080000
43 #define TIMBDMA_IPR 0x080004
44 #define TIMBDMA_IER 0x080008
45 
46 /* Channel specific registers */
47 /* RX instances base addresses are 0x00, 0x40, 0x80 ...
48  * TX instances base addresses are 0x18, 0x58, 0x98 ...
49  */
50 #define TIMBDMA_INSTANCE_OFFSET 0x40
51 #define TIMBDMA_INSTANCE_TX_OFFSET 0x18
52 
53 /* RX registers, relative the instance base */
54 #define TIMBDMA_OFFS_RX_DHAR 0x00
55 #define TIMBDMA_OFFS_RX_DLAR 0x04
56 #define TIMBDMA_OFFS_RX_LR 0x0C
57 #define TIMBDMA_OFFS_RX_BLR 0x10
58 #define TIMBDMA_OFFS_RX_ER 0x14
59 #define TIMBDMA_RX_EN 0x01
60 /* bytes per Row, video specific register
61  * which is placed after the TX registers...
62  */
63 #define TIMBDMA_OFFS_RX_BPRR 0x30
64 
65 /* TX registers, relative the instance base */
66 #define TIMBDMA_OFFS_TX_DHAR 0x00
67 #define TIMBDMA_OFFS_TX_DLAR 0x04
68 #define TIMBDMA_OFFS_TX_BLR 0x0C
69 #define TIMBDMA_OFFS_TX_LR 0x14
70 
71 
72 #define TIMB_DMA_DESC_SIZE 8
73 
74 struct timb_dma_desc {
78  unsigned int desc_list_len;
79  bool interrupt;
80 };
81 
82 struct timb_dma_chan {
83  struct dma_chan chan;
85  spinlock_t lock; /* Used to protect data structures,
86  especially the lists and descriptors,
87  from races between the tasklet and calls
88  from above */
89  bool ongoing;
91  struct list_head queue;
93  unsigned int bytes_per_line;
95  unsigned int descs; /* Descriptors to allocate */
96  unsigned int desc_elems; /* number of elems per descriptor */
97 };
98 
99 struct timb_dma {
100  struct dma_device dma;
104 };
105 
106 static struct device *chan2dev(struct dma_chan *chan)
107 {
108  return &chan->dev->device;
109 }
110 static struct device *chan2dmadev(struct dma_chan *chan)
111 {
112  return chan2dev(chan)->parent->parent;
113 }
114 
115 static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
116 {
117  int id = td_chan->chan.chan_id;
118  return (struct timb_dma *)((u8 *)td_chan -
119  id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
120 }
121 
122 /* Must be called with the spinlock held */
123 static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
124 {
125  int id = td_chan->chan.chan_id;
126  struct timb_dma *td = tdchantotd(td_chan);
127  u32 ier;
128 
129  /* enable interrupt for this channel */
130  ier = ioread32(td->membase + TIMBDMA_IER);
131  ier |= 1 << id;
132  dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
133  ier);
134  iowrite32(ier, td->membase + TIMBDMA_IER);
135 }
136 
137 /* Should be called with the spinlock held */
138 static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
139 {
140  int id = td_chan->chan.chan_id;
141  struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
142  id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
143  u32 isr;
144  bool done = false;
145 
146  dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
147 
148  isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
149  if (isr) {
150  iowrite32(isr, td->membase + TIMBDMA_ISR);
151  done = true;
152  }
153 
154  return done;
155 }
156 
157 static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
158  bool single)
159 {
161  int len;
162 
163  addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) |
164  dma_desc[4];
165 
166  len = (dma_desc[3] << 8) | dma_desc[2];
167 
168  if (single)
169  dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
170  DMA_TO_DEVICE);
171  else
172  dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
173  DMA_TO_DEVICE);
174 }
175 
176 static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
177 {
178  struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan,
179  struct timb_dma_chan, chan);
180  u8 *descs;
181 
182  for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) {
183  __td_unmap_desc(td_chan, descs, single);
184  if (descs[0] & 0x02)
185  break;
186  }
187 }
188 
189 static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
190  struct scatterlist *sg, bool last)
191 {
192  if (sg_dma_len(sg) > USHRT_MAX) {
193  dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
194  return -EINVAL;
195  }
196 
197  /* length must be word aligned */
198  if (sg_dma_len(sg) % sizeof(u32)) {
199  dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
200  sg_dma_len(sg));
201  return -EINVAL;
202  }
203 
204  dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
205  dma_desc, (unsigned long long)sg_dma_address(sg));
206 
207  dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
208  dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
209  dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
210  dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
211 
212  dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
213  dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
214 
215  dma_desc[1] = 0x00;
216  dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
217 
218  return 0;
219 }
220 
221 /* Must be called with the spinlock held */
222 static void __td_start_dma(struct timb_dma_chan *td_chan)
223 {
224  struct timb_dma_desc *td_desc;
225 
226  if (td_chan->ongoing) {
227  dev_err(chan2dev(&td_chan->chan),
228  "Transfer already ongoing\n");
229  return;
230  }
231 
232  td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
233  desc_node);
234 
235  dev_dbg(chan2dev(&td_chan->chan),
236  "td_chan: %p, chan: %d, membase: %p\n",
237  td_chan, td_chan->chan.chan_id, td_chan->membase);
238 
239  if (td_chan->direction == DMA_DEV_TO_MEM) {
240 
241  /* descriptor address */
242  iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
243  iowrite32(td_desc->txd.phys, td_chan->membase +
245  /* Bytes per line */
246  iowrite32(td_chan->bytes_per_line, td_chan->membase +
248  /* enable RX */
250  } else {
251  /* address high */
252  iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
253  iowrite32(td_desc->txd.phys, td_chan->membase +
255  }
256 
257  td_chan->ongoing = true;
258 
259  if (td_desc->interrupt)
260  __td_enable_chan_irq(td_chan);
261 }
262 
263 static void __td_finish(struct timb_dma_chan *td_chan)
264 {
266  void *param;
267  struct dma_async_tx_descriptor *txd;
268  struct timb_dma_desc *td_desc;
269 
270  /* can happen if the descriptor is canceled */
271  if (list_empty(&td_chan->active_list))
272  return;
273 
274  td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
275  desc_node);
276  txd = &td_desc->txd;
277 
278  dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
279  txd->cookie);
280 
281  /* make sure to stop the transfer */
282  if (td_chan->direction == DMA_DEV_TO_MEM)
283  iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
284 /* Currently no support for stopping DMA transfers
285  else
286  iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
287 */
288  dma_cookie_complete(txd);
289  td_chan->ongoing = false;
290 
291  callback = txd->callback;
292  param = txd->callback_param;
293 
294  list_move(&td_desc->desc_node, &td_chan->free_list);
295 
296  if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
297  __td_unmap_descs(td_desc,
299 
300  /*
301  * The API requires that no submissions are done from a
302  * callback, so we don't need to drop the lock here
303  */
304  if (callback)
305  callback(param);
306 }
307 
308 static u32 __td_ier_mask(struct timb_dma *td)
309 {
310  int i;
311  u32 ret = 0;
312 
313  for (i = 0; i < td->dma.chancnt; i++) {
314  struct timb_dma_chan *td_chan = td->channels + i;
315  if (td_chan->ongoing) {
316  struct timb_dma_desc *td_desc =
317  list_entry(td_chan->active_list.next,
318  struct timb_dma_desc, desc_node);
319  if (td_desc->interrupt)
320  ret |= 1 << i;
321  }
322  }
323 
324  return ret;
325 }
326 
327 static void __td_start_next(struct timb_dma_chan *td_chan)
328 {
329  struct timb_dma_desc *td_desc;
330 
331  BUG_ON(list_empty(&td_chan->queue));
332  BUG_ON(td_chan->ongoing);
333 
334  td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
335  desc_node);
336 
337  dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
338  __func__, td_desc->txd.cookie);
339 
340  list_move(&td_desc->desc_node, &td_chan->active_list);
341  __td_start_dma(td_chan);
342 }
343 
344 static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
345 {
346  struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
347  txd);
348  struct timb_dma_chan *td_chan = container_of(txd->chan,
349  struct timb_dma_chan, chan);
351 
352  spin_lock_bh(&td_chan->lock);
353  cookie = dma_cookie_assign(txd);
354 
355  if (list_empty(&td_chan->active_list)) {
356  dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
357  txd->cookie);
358  list_add_tail(&td_desc->desc_node, &td_chan->active_list);
359  __td_start_dma(td_chan);
360  } else {
361  dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
362  txd->cookie);
363 
364  list_add_tail(&td_desc->desc_node, &td_chan->queue);
365  }
366 
367  spin_unlock_bh(&td_chan->lock);
368 
369  return cookie;
370 }
371 
372 static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
373 {
374  struct dma_chan *chan = &td_chan->chan;
375  struct timb_dma_desc *td_desc;
376  int err;
377 
378  td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
379  if (!td_desc) {
380  dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
381  goto out;
382  }
383 
384  td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
385 
386  td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
387  if (!td_desc->desc_list) {
388  dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
389  goto err;
390  }
391 
392  dma_async_tx_descriptor_init(&td_desc->txd, chan);
393  td_desc->txd.tx_submit = td_tx_submit;
394  td_desc->txd.flags = DMA_CTRL_ACK;
395 
396  td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
397  td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
398 
399  err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
400  if (err) {
401  dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
402  goto err;
403  }
404 
405  return td_desc;
406 err:
407  kfree(td_desc->desc_list);
408  kfree(td_desc);
409 out:
410  return NULL;
411 
412 }
413 
414 static void td_free_desc(struct timb_dma_desc *td_desc)
415 {
416  dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
417  dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
418  td_desc->desc_list_len, DMA_TO_DEVICE);
419 
420  kfree(td_desc->desc_list);
421  kfree(td_desc);
422 }
423 
424 static void td_desc_put(struct timb_dma_chan *td_chan,
425  struct timb_dma_desc *td_desc)
426 {
427  dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
428 
429  spin_lock_bh(&td_chan->lock);
430  list_add(&td_desc->desc_node, &td_chan->free_list);
431  spin_unlock_bh(&td_chan->lock);
432 }
433 
434 static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
435 {
436  struct timb_dma_desc *td_desc, *_td_desc;
437  struct timb_dma_desc *ret = NULL;
438 
439  spin_lock_bh(&td_chan->lock);
440  list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
441  desc_node) {
442  if (async_tx_test_ack(&td_desc->txd)) {
443  list_del(&td_desc->desc_node);
444  ret = td_desc;
445  break;
446  }
447  dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
448  td_desc);
449  }
450  spin_unlock_bh(&td_chan->lock);
451 
452  return ret;
453 }
454 
455 static int td_alloc_chan_resources(struct dma_chan *chan)
456 {
457  struct timb_dma_chan *td_chan =
458  container_of(chan, struct timb_dma_chan, chan);
459  int i;
460 
461  dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
462 
463  BUG_ON(!list_empty(&td_chan->free_list));
464  for (i = 0; i < td_chan->descs; i++) {
465  struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
466  if (!td_desc) {
467  if (i)
468  break;
469  else {
470  dev_err(chan2dev(chan),
471  "Couldnt allocate any descriptors\n");
472  return -ENOMEM;
473  }
474  }
475 
476  td_desc_put(td_chan, td_desc);
477  }
478 
479  spin_lock_bh(&td_chan->lock);
480  dma_cookie_init(chan);
481  spin_unlock_bh(&td_chan->lock);
482 
483  return 0;
484 }
485 
486 static void td_free_chan_resources(struct dma_chan *chan)
487 {
488  struct timb_dma_chan *td_chan =
489  container_of(chan, struct timb_dma_chan, chan);
490  struct timb_dma_desc *td_desc, *_td_desc;
491  LIST_HEAD(list);
492 
493  dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
494 
495  /* check that all descriptors are free */
496  BUG_ON(!list_empty(&td_chan->active_list));
497  BUG_ON(!list_empty(&td_chan->queue));
498 
499  spin_lock_bh(&td_chan->lock);
500  list_splice_init(&td_chan->free_list, &list);
501  spin_unlock_bh(&td_chan->lock);
502 
503  list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
504  dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
505  td_desc);
506  td_free_desc(td_desc);
507  }
508 }
509 
510 static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
511  struct dma_tx_state *txstate)
512 {
513  enum dma_status ret;
514 
515  dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
516 
517  ret = dma_cookie_status(chan, cookie, txstate);
518 
519  dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
520 
521  return ret;
522 }
523 
524 static void td_issue_pending(struct dma_chan *chan)
525 {
526  struct timb_dma_chan *td_chan =
527  container_of(chan, struct timb_dma_chan, chan);
528 
529  dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
530  spin_lock_bh(&td_chan->lock);
531 
532  if (!list_empty(&td_chan->active_list))
533  /* transfer ongoing */
534  if (__td_dma_done_ack(td_chan))
535  __td_finish(td_chan);
536 
537  if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
538  __td_start_next(td_chan);
539 
540  spin_unlock_bh(&td_chan->lock);
541 }
542 
543 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
544  struct scatterlist *sgl, unsigned int sg_len,
545  enum dma_transfer_direction direction, unsigned long flags,
546  void *context)
547 {
548  struct timb_dma_chan *td_chan =
549  container_of(chan, struct timb_dma_chan, chan);
550  struct timb_dma_desc *td_desc;
551  struct scatterlist *sg;
552  unsigned int i;
553  unsigned int desc_usage = 0;
554 
555  if (!sgl || !sg_len) {
556  dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
557  return NULL;
558  }
559 
560  /* even channels are for RX, odd for TX */
561  if (td_chan->direction != direction) {
562  dev_err(chan2dev(chan),
563  "Requesting channel in wrong direction\n");
564  return NULL;
565  }
566 
567  td_desc = td_desc_get(td_chan);
568  if (!td_desc) {
569  dev_err(chan2dev(chan), "Not enough descriptors available\n");
570  return NULL;
571  }
572 
573  td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
574 
575  for_each_sg(sgl, sg, sg_len, i) {
576  int err;
577  if (desc_usage > td_desc->desc_list_len) {
578  dev_err(chan2dev(chan), "No descriptor space\n");
579  return NULL;
580  }
581 
582  err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
583  i == (sg_len - 1));
584  if (err) {
585  dev_err(chan2dev(chan), "Failed to update desc: %d\n",
586  err);
587  td_desc_put(td_chan, td_desc);
588  return NULL;
589  }
590  desc_usage += TIMB_DMA_DESC_SIZE;
591  }
592 
593  dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
594  td_desc->desc_list_len, DMA_MEM_TO_DEV);
595 
596  return &td_desc->txd;
597 }
598 
599 static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
600  unsigned long arg)
601 {
602  struct timb_dma_chan *td_chan =
603  container_of(chan, struct timb_dma_chan, chan);
604  struct timb_dma_desc *td_desc, *_td_desc;
605 
606  dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
607 
608  if (cmd != DMA_TERMINATE_ALL)
609  return -ENXIO;
610 
611  /* first the easy part, put the queue into the free list */
612  spin_lock_bh(&td_chan->lock);
613  list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
614  desc_node)
615  list_move(&td_desc->desc_node, &td_chan->free_list);
616 
617  /* now tear down the running */
618  __td_finish(td_chan);
619  spin_unlock_bh(&td_chan->lock);
620 
621  return 0;
622 }
623 
624 static void td_tasklet(unsigned long data)
625 {
626  struct timb_dma *td = (struct timb_dma *)data;
627  u32 isr;
628  u32 ipr;
629  u32 ier;
630  int i;
631 
632  isr = ioread32(td->membase + TIMBDMA_ISR);
633  ipr = isr & __td_ier_mask(td);
634 
635  /* ack the interrupts */
636  iowrite32(ipr, td->membase + TIMBDMA_ISR);
637 
638  for (i = 0; i < td->dma.chancnt; i++)
639  if (ipr & (1 << i)) {
640  struct timb_dma_chan *td_chan = td->channels + i;
641  spin_lock(&td_chan->lock);
642  __td_finish(td_chan);
643  if (!list_empty(&td_chan->queue))
644  __td_start_next(td_chan);
645  spin_unlock(&td_chan->lock);
646  }
647 
648  ier = __td_ier_mask(td);
649  iowrite32(ier, td->membase + TIMBDMA_IER);
650 }
651 
652 
653 static irqreturn_t td_irq(int irq, void *devid)
654 {
655  struct timb_dma *td = devid;
656  u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
657 
658  if (ipr) {
659  /* disable interrupts, will be re-enabled in tasklet */
660  iowrite32(0, td->membase + TIMBDMA_IER);
661 
662  tasklet_schedule(&td->tasklet);
663 
664  return IRQ_HANDLED;
665  } else
666  return IRQ_NONE;
667 }
668 
669 
670 static int __devinit td_probe(struct platform_device *pdev)
671 {
672  struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
673  struct timb_dma *td;
674  struct resource *iomem;
675  int irq;
676  int err;
677  int i;
678 
679  if (!pdata) {
680  dev_err(&pdev->dev, "No platform data\n");
681  return -EINVAL;
682  }
683 
684  iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
685  if (!iomem)
686  return -EINVAL;
687 
688  irq = platform_get_irq(pdev, 0);
689  if (irq < 0)
690  return irq;
691 
692  if (!request_mem_region(iomem->start, resource_size(iomem),
693  DRIVER_NAME))
694  return -EBUSY;
695 
696  td = kzalloc(sizeof(struct timb_dma) +
697  sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL);
698  if (!td) {
699  err = -ENOMEM;
700  goto err_release_region;
701  }
702 
703  dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
704 
705  td->membase = ioremap(iomem->start, resource_size(iomem));
706  if (!td->membase) {
707  dev_err(&pdev->dev, "Failed to remap I/O memory\n");
708  err = -ENOMEM;
709  goto err_free_mem;
710  }
711 
712  /* 32bit addressing */
714 
715  /* disable and clear any interrupts */
716  iowrite32(0x0, td->membase + TIMBDMA_IER);
717  iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
718 
719  tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td);
720 
721  err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
722  if (err) {
723  dev_err(&pdev->dev, "Failed to request IRQ\n");
724  goto err_tasklet_kill;
725  }
726 
727  td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
728  td->dma.device_free_chan_resources = td_free_chan_resources;
729  td->dma.device_tx_status = td_tx_status;
730  td->dma.device_issue_pending = td_issue_pending;
731 
732  dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
733  dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
734  td->dma.device_prep_slave_sg = td_prep_slave_sg;
735  td->dma.device_control = td_control;
736 
737  td->dma.dev = &pdev->dev;
738 
739  INIT_LIST_HEAD(&td->dma.channels);
740 
741  for (i = 0; i < pdata->nr_channels; i++) {
742  struct timb_dma_chan *td_chan = &td->channels[i];
743  struct timb_dma_platform_data_channel *pchan =
744  pdata->channels + i;
745 
746  /* even channels are RX, odd are TX */
747  if ((i % 2) == pchan->rx) {
748  dev_err(&pdev->dev, "Wrong channel configuration\n");
749  err = -EINVAL;
750  goto err_free_irq;
751  }
752 
753  td_chan->chan.device = &td->dma;
754  dma_cookie_init(&td_chan->chan);
755  spin_lock_init(&td_chan->lock);
756  INIT_LIST_HEAD(&td_chan->active_list);
757  INIT_LIST_HEAD(&td_chan->queue);
758  INIT_LIST_HEAD(&td_chan->free_list);
759 
760  td_chan->descs = pchan->descriptors;
761  td_chan->desc_elems = pchan->descriptor_elements;
762  td_chan->bytes_per_line = pchan->bytes_per_line;
763  td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
765 
766  td_chan->membase = td->membase +
767  (i / 2) * TIMBDMA_INSTANCE_OFFSET +
768  (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
769 
770  dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
771  i, td_chan->membase);
772 
773  list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
774  }
775 
776  err = dma_async_device_register(&td->dma);
777  if (err) {
778  dev_err(&pdev->dev, "Failed to register async device\n");
779  goto err_free_irq;
780  }
781 
782  platform_set_drvdata(pdev, td);
783 
784  dev_dbg(&pdev->dev, "Probe result: %d\n", err);
785  return err;
786 
787 err_free_irq:
788  free_irq(irq, td);
789 err_tasklet_kill:
790  tasklet_kill(&td->tasklet);
791  iounmap(td->membase);
792 err_free_mem:
793  kfree(td);
794 err_release_region:
795  release_mem_region(iomem->start, resource_size(iomem));
796 
797  return err;
798 
799 }
800 
801 static int __devexit td_remove(struct platform_device *pdev)
802 {
803  struct timb_dma *td = platform_get_drvdata(pdev);
804  struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
805  int irq = platform_get_irq(pdev, 0);
806 
808  free_irq(irq, td);
809  tasklet_kill(&td->tasklet);
810  iounmap(td->membase);
811  kfree(td);
812  release_mem_region(iomem->start, resource_size(iomem));
813 
814  platform_set_drvdata(pdev, NULL);
815 
816  dev_dbg(&pdev->dev, "Removed...\n");
817  return 0;
818 }
819 
820 static struct platform_driver td_driver = {
821  .driver = {
822  .name = DRIVER_NAME,
823  .owner = THIS_MODULE,
824  },
825  .probe = td_probe,
826  .remove = __exit_p(td_remove),
827 };
828 
829 module_platform_driver(td_driver);
830 
831 MODULE_LICENSE("GPL v2");
832 MODULE_DESCRIPTION("Timberdale DMA controller driver");
833 MODULE_AUTHOR("Pelagicore AB <[email protected]>");
834 MODULE_ALIAS("platform:"DRIVER_NAME);