Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
omap-dma.c
Go to the documentation of this file.
1 /*
2  * OMAP DMAengine support
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/omap-dma.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 
20 #include "virt-dma.h"
21 
22 #include <plat/cpu.h>
23 #include <plat/dma.h>
24 
25 struct omap_dmadev {
26  struct dma_device ddev;
30 };
31 
32 struct omap_chan {
33  struct virt_dma_chan vc;
34  struct list_head node;
35 
37  unsigned dma_sig;
38  bool cyclic;
39  bool paused;
40 
41  int dma_ch;
42  struct omap_desc *desc;
43  unsigned sgidx;
44 };
45 
46 struct omap_sg {
48  uint32_t en; /* number of elements (24-bit) */
49  uint32_t fn; /* number of frames (16-bit) */
50 };
51 
52 struct omap_desc {
53  struct virt_dma_desc vd;
56 
57  int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
58  uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
59  uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
60  uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
61  uint8_t periph_port; /* Peripheral port */
62 
63  unsigned sglen;
64  struct omap_sg sg[0];
65 };
66 
67 static const unsigned es_bytes[] = {
71 };
72 
73 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
74 {
75  return container_of(d, struct omap_dmadev, ddev);
76 }
77 
78 static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
79 {
80  return container_of(c, struct omap_chan, vc.chan);
81 }
82 
83 static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
84 {
85  return container_of(t, struct omap_desc, vd.tx);
86 }
87 
88 static void omap_dma_desc_free(struct virt_dma_desc *vd)
89 {
90  kfree(container_of(vd, struct omap_desc, vd));
91 }
92 
93 static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
94  unsigned idx)
95 {
96  struct omap_sg *sg = d->sg + idx;
97 
98  if (d->dir == DMA_DEV_TO_MEM)
100  OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
101  else
103  OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
104 
105  omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
106  d->sync_mode, c->dma_sig, d->sync_type);
107 
109 }
110 
111 static void omap_dma_start_desc(struct omap_chan *c)
112 {
113  struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
114  struct omap_desc *d;
115 
116  if (!vd) {
117  c->desc = NULL;
118  return;
119  }
120 
121  list_del(&vd->node);
122 
123  c->desc = d = to_omap_dma_desc(&vd->tx);
124  c->sgidx = 0;
125 
126  if (d->dir == DMA_DEV_TO_MEM)
128  OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
129  else
131  OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
132 
133  omap_dma_start_sg(c, d, 0);
134 }
135 
136 static void omap_dma_callback(int ch, u16 status, void *data)
137 {
138  struct omap_chan *c = data;
139  struct omap_desc *d;
140  unsigned long flags;
141 
142  spin_lock_irqsave(&c->vc.lock, flags);
143  d = c->desc;
144  if (d) {
145  if (!c->cyclic) {
146  if (++c->sgidx < d->sglen) {
147  omap_dma_start_sg(c, d, c->sgidx);
148  } else {
149  omap_dma_start_desc(c);
150  vchan_cookie_complete(&d->vd);
151  }
152  } else {
153  vchan_cyclic_callback(&d->vd);
154  }
155  }
156  spin_unlock_irqrestore(&c->vc.lock, flags);
157 }
158 
159 /*
160  * This callback schedules all pending channels. We could be more
161  * clever here by postponing allocation of the real DMA channels to
162  * this point, and freeing them when our virtual channel becomes idle.
163  *
164  * We would then need to deal with 'all channels in-use'
165  */
166 static void omap_dma_sched(unsigned long data)
167 {
168  struct omap_dmadev *d = (struct omap_dmadev *)data;
169  LIST_HEAD(head);
170 
171  spin_lock_irq(&d->lock);
172  list_splice_tail_init(&d->pending, &head);
173  spin_unlock_irq(&d->lock);
174 
175  while (!list_empty(&head)) {
176  struct omap_chan *c = list_first_entry(&head,
177  struct omap_chan, node);
178 
179  spin_lock_irq(&c->vc.lock);
180  list_del_init(&c->node);
181  omap_dma_start_desc(c);
182  spin_unlock_irq(&c->vc.lock);
183  }
184 }
185 
186 static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
187 {
188  struct omap_chan *c = to_omap_dma_chan(chan);
189 
190  dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
191 
192  return omap_request_dma(c->dma_sig, "DMA engine",
193  omap_dma_callback, c, &c->dma_ch);
194 }
195 
196 static void omap_dma_free_chan_resources(struct dma_chan *chan)
197 {
198  struct omap_chan *c = to_omap_dma_chan(chan);
199 
200  vchan_free_chan_resources(&c->vc);
201  omap_free_dma(c->dma_ch);
202 
203  dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
204 }
205 
206 static size_t omap_dma_sg_size(struct omap_sg *sg)
207 {
208  return sg->en * sg->fn;
209 }
210 
211 static size_t omap_dma_desc_size(struct omap_desc *d)
212 {
213  unsigned i;
214  size_t size;
215 
216  for (size = i = 0; i < d->sglen; i++)
217  size += omap_dma_sg_size(&d->sg[i]);
218 
219  return size * es_bytes[d->es];
220 }
221 
222 static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
223 {
224  unsigned i;
225  size_t size, es_size = es_bytes[d->es];
226 
227  for (size = i = 0; i < d->sglen; i++) {
228  size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
229 
230  if (size)
231  size += this_size;
232  else if (addr >= d->sg[i].addr &&
233  addr < d->sg[i].addr + this_size)
234  size += d->sg[i].addr + this_size - addr;
235  }
236  return size;
237 }
238 
239 static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
240  dma_cookie_t cookie, struct dma_tx_state *txstate)
241 {
242  struct omap_chan *c = to_omap_dma_chan(chan);
243  struct virt_dma_desc *vd;
244  enum dma_status ret;
245  unsigned long flags;
246 
247  ret = dma_cookie_status(chan, cookie, txstate);
248  if (ret == DMA_SUCCESS || !txstate)
249  return ret;
250 
251  spin_lock_irqsave(&c->vc.lock, flags);
252  vd = vchan_find_desc(&c->vc, cookie);
253  if (vd) {
254  txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
255  } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
256  struct omap_desc *d = c->desc;
257  dma_addr_t pos;
258 
259  if (d->dir == DMA_MEM_TO_DEV)
260  pos = omap_get_dma_src_pos(c->dma_ch);
261  else if (d->dir == DMA_DEV_TO_MEM)
262  pos = omap_get_dma_dst_pos(c->dma_ch);
263  else
264  pos = 0;
265 
266  txstate->residue = omap_dma_desc_size_pos(d, pos);
267  } else {
268  txstate->residue = 0;
269  }
270  spin_unlock_irqrestore(&c->vc.lock, flags);
271 
272  return ret;
273 }
274 
275 static void omap_dma_issue_pending(struct dma_chan *chan)
276 {
277  struct omap_chan *c = to_omap_dma_chan(chan);
278  unsigned long flags;
279 
280  spin_lock_irqsave(&c->vc.lock, flags);
281  if (vchan_issue_pending(&c->vc) && !c->desc) {
282  struct omap_dmadev *d = to_omap_dma_dev(chan->device);
283  spin_lock(&d->lock);
284  if (list_empty(&c->node))
285  list_add_tail(&c->node, &d->pending);
286  spin_unlock(&d->lock);
287  tasklet_schedule(&d->task);
288  }
289  spin_unlock_irqrestore(&c->vc.lock, flags);
290 }
291 
292 static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
293  struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
294  enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
295 {
296  struct omap_chan *c = to_omap_dma_chan(chan);
297  enum dma_slave_buswidth dev_width;
298  struct scatterlist *sgent;
299  struct omap_desc *d;
301  unsigned i, j = 0, es, en, frame_bytes, sync_type;
302  u32 burst;
303 
304  if (dir == DMA_DEV_TO_MEM) {
305  dev_addr = c->cfg.src_addr;
306  dev_width = c->cfg.src_addr_width;
307  burst = c->cfg.src_maxburst;
308  sync_type = OMAP_DMA_SRC_SYNC;
309  } else if (dir == DMA_MEM_TO_DEV) {
310  dev_addr = c->cfg.dst_addr;
311  dev_width = c->cfg.dst_addr_width;
312  burst = c->cfg.dst_maxburst;
313  sync_type = OMAP_DMA_DST_SYNC;
314  } else {
315  dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
316  return NULL;
317  }
318 
319  /* Bus width translates to the element size (ES) */
320  switch (dev_width) {
323  break;
326  break;
329  break;
330  default: /* not reached */
331  return NULL;
332  }
333 
334  /* Now allocate and setup the descriptor. */
335  d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
336  if (!d)
337  return NULL;
338 
339  d->dir = dir;
340  d->dev_addr = dev_addr;
341  d->es = es;
343  d->sync_type = sync_type;
345 
346  /*
347  * Build our scatterlist entries: each contains the address,
348  * the number of elements (EN) in each frame, and the number of
349  * frames (FN). Number of bytes for this entry = ES * EN * FN.
350  *
351  * Burst size translates to number of elements with frame sync.
352  * Note: DMA engine defines burst to be the number of dev-width
353  * transfers.
354  */
355  en = burst;
356  frame_bytes = es_bytes[es] * en;
357  for_each_sg(sgl, sgent, sglen, i) {
358  d->sg[j].addr = sg_dma_address(sgent);
359  d->sg[j].en = en;
360  d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
361  j++;
362  }
363 
364  d->sglen = j;
365 
366  return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
367 }
368 
369 static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
370  struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
371  size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
372  void *context)
373 {
374  struct omap_chan *c = to_omap_dma_chan(chan);
375  enum dma_slave_buswidth dev_width;
376  struct omap_desc *d;
378  unsigned es, sync_type;
379  u32 burst;
380 
381  if (dir == DMA_DEV_TO_MEM) {
382  dev_addr = c->cfg.src_addr;
383  dev_width = c->cfg.src_addr_width;
384  burst = c->cfg.src_maxburst;
385  sync_type = OMAP_DMA_SRC_SYNC;
386  } else if (dir == DMA_MEM_TO_DEV) {
387  dev_addr = c->cfg.dst_addr;
388  dev_width = c->cfg.dst_addr_width;
389  burst = c->cfg.dst_maxburst;
390  sync_type = OMAP_DMA_DST_SYNC;
391  } else {
392  dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
393  return NULL;
394  }
395 
396  /* Bus width translates to the element size (ES) */
397  switch (dev_width) {
400  break;
403  break;
406  break;
407  default: /* not reached */
408  return NULL;
409  }
410 
411  /* Now allocate and setup the descriptor. */
412  d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
413  if (!d)
414  return NULL;
415 
416  d->dir = dir;
417  d->dev_addr = dev_addr;
418  d->fi = burst;
419  d->es = es;
420  if (burst)
422  else
424  d->sync_type = sync_type;
426  d->sg[0].addr = buf_addr;
427  d->sg[0].en = period_len / es_bytes[es];
428  d->sg[0].fn = buf_len / period_len;
429  d->sglen = 1;
430 
431  if (!c->cyclic) {
432  c->cyclic = true;
434 
435  if (flags & DMA_PREP_INTERRUPT)
437 
439  }
440 
441  if (!cpu_class_is_omap1()) {
444  }
445 
446  return vchan_tx_prep(&c->vc, &d->vd, flags);
447 }
448 
449 static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
450 {
453  return -EINVAL;
454 
455  memcpy(&c->cfg, cfg, sizeof(c->cfg));
456 
457  return 0;
458 }
459 
460 static int omap_dma_terminate_all(struct omap_chan *c)
461 {
462  struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
463  unsigned long flags;
464  LIST_HEAD(head);
465 
466  spin_lock_irqsave(&c->vc.lock, flags);
467 
468  /* Prevent this channel being scheduled */
469  spin_lock(&d->lock);
470  list_del_init(&c->node);
471  spin_unlock(&d->lock);
472 
473  /*
474  * Stop DMA activity: we assume the callback will not be called
475  * after omap_stop_dma() returns (even if it does, it will see
476  * c->desc is NULL and exit.)
477  */
478  if (c->desc) {
479  c->desc = NULL;
480  /* Avoid stopping the dma twice */
481  if (!c->paused)
482  omap_stop_dma(c->dma_ch);
483  }
484 
485  if (c->cyclic) {
486  c->cyclic = false;
487  c->paused = false;
489  }
490 
491  vchan_get_all_descriptors(&c->vc, &head);
492  spin_unlock_irqrestore(&c->vc.lock, flags);
494 
495  return 0;
496 }
497 
498 static int omap_dma_pause(struct omap_chan *c)
499 {
500  /* Pause/Resume only allowed with cyclic mode */
501  if (!c->cyclic)
502  return -EINVAL;
503 
504  if (!c->paused) {
505  omap_stop_dma(c->dma_ch);
506  c->paused = true;
507  }
508 
509  return 0;
510 }
511 
512 static int omap_dma_resume(struct omap_chan *c)
513 {
514  /* Pause/Resume only allowed with cyclic mode */
515  if (!c->cyclic)
516  return -EINVAL;
517 
518  if (c->paused) {
520  c->paused = false;
521  }
522 
523  return 0;
524 }
525 
526 static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
527  unsigned long arg)
528 {
529  struct omap_chan *c = to_omap_dma_chan(chan);
530  int ret;
531 
532  switch (cmd) {
533  case DMA_SLAVE_CONFIG:
534  ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
535  break;
536 
537  case DMA_TERMINATE_ALL:
538  ret = omap_dma_terminate_all(c);
539  break;
540 
541  case DMA_PAUSE:
542  ret = omap_dma_pause(c);
543  break;
544 
545  case DMA_RESUME:
546  ret = omap_dma_resume(c);
547  break;
548 
549  default:
550  ret = -ENXIO;
551  break;
552  }
553 
554  return ret;
555 }
556 
557 static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
558 {
559  struct omap_chan *c;
560 
561  c = kzalloc(sizeof(*c), GFP_KERNEL);
562  if (!c)
563  return -ENOMEM;
564 
565  c->dma_sig = dma_sig;
566  c->vc.desc_free = omap_dma_desc_free;
567  vchan_init(&c->vc, &od->ddev);
568  INIT_LIST_HEAD(&c->node);
569 
570  od->ddev.chancnt++;
571 
572  return 0;
573 }
574 
575 static void omap_dma_free(struct omap_dmadev *od)
576 {
577  tasklet_kill(&od->task);
578  while (!list_empty(&od->ddev.channels)) {
579  struct omap_chan *c = list_first_entry(&od->ddev.channels,
580  struct omap_chan, vc.chan.device_node);
581 
582  list_del(&c->vc.chan.device_node);
583  tasklet_kill(&c->vc.task);
584  kfree(c);
585  }
586  kfree(od);
587 }
588 
589 static int omap_dma_probe(struct platform_device *pdev)
590 {
591  struct omap_dmadev *od;
592  int rc, i;
593 
594  od = kzalloc(sizeof(*od), GFP_KERNEL);
595  if (!od)
596  return -ENOMEM;
597 
598  dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
599  dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
600  od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
601  od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
602  od->ddev.device_tx_status = omap_dma_tx_status;
603  od->ddev.device_issue_pending = omap_dma_issue_pending;
604  od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
605  od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
606  od->ddev.device_control = omap_dma_control;
607  od->ddev.dev = &pdev->dev;
608  INIT_LIST_HEAD(&od->ddev.channels);
609  INIT_LIST_HEAD(&od->pending);
610  spin_lock_init(&od->lock);
611 
612  tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
613 
614  for (i = 0; i < 127; i++) {
615  rc = omap_dma_chan_init(od, i);
616  if (rc) {
617  omap_dma_free(od);
618  return rc;
619  }
620  }
621 
622  rc = dma_async_device_register(&od->ddev);
623  if (rc) {
624  pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
625  rc);
626  omap_dma_free(od);
627  } else {
628  platform_set_drvdata(pdev, od);
629  }
630 
631  dev_info(&pdev->dev, "OMAP DMA engine driver\n");
632 
633  return rc;
634 }
635 
636 static int omap_dma_remove(struct platform_device *pdev)
637 {
638  struct omap_dmadev *od = platform_get_drvdata(pdev);
639 
641  omap_dma_free(od);
642 
643  return 0;
644 }
645 
646 static struct platform_driver omap_dma_driver = {
647  .probe = omap_dma_probe,
648  .remove = omap_dma_remove,
649  .driver = {
650  .name = "omap-dma-engine",
651  .owner = THIS_MODULE,
652  },
653 };
654 
655 bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
656 {
657  if (chan->device->dev->driver == &omap_dma_driver.driver) {
658  struct omap_chan *c = to_omap_dma_chan(chan);
659  unsigned req = *(unsigned *)param;
660 
661  return req == c->dma_sig;
662  }
663  return false;
664 }
666 
667 static struct platform_device *pdev;
668 
669 static const struct platform_device_info omap_dma_dev_info = {
670  .name = "omap-dma-engine",
671  .id = -1,
672  .dma_mask = DMA_BIT_MASK(32),
673 };
674 
675 static int omap_dma_init(void)
676 {
677  int rc = platform_driver_register(&omap_dma_driver);
678 
679  if (rc == 0) {
680  pdev = platform_device_register_full(&omap_dma_dev_info);
681  if (IS_ERR(pdev)) {
682  platform_driver_unregister(&omap_dma_driver);
683  rc = PTR_ERR(pdev);
684  }
685  }
686  return rc;
687 }
688 subsys_initcall(omap_dma_init);
689 
690 static void __exit omap_dma_exit(void)
691 {
693  platform_driver_unregister(&omap_dma_driver);
694 }
695 module_exit(omap_dma_exit);
696 
697 MODULE_AUTHOR("Russell King");
698 MODULE_LICENSE("GPL");