Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
at_hdmac.c
Go to the documentation of this file.
1 /*
2  * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3  *
4  * Copyright (C) 2008 Atmel Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  *
12  * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13  * The only Atmel DMA Controller that is not covered by this driver is the one
14  * found on AT91SAM9263.
15  */
16 
17 #include <linux/clk.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dmapool.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 
28 #include "at_hdmac_regs.h"
29 #include "dmaengine.h"
30 
31 /*
32  * Glossary
33  * --------
34  *
35  * at_hdmac : Name of the ATmel AHB DMA Controller
36  * at_dma_ / atdma : ATmel DMA controller entity related
37  * atc_ / atchan : ATmel DMA Channel entity related
38  */
39 
40 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
41 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
42  |ATC_DIF(AT_DMA_MEM_IF))
43 
44 /*
45  * Initial number of descriptors to allocate for each channel. This could
46  * be increased during dma usage.
47  */
48 static unsigned int init_nr_desc_per_channel = 64;
49 module_param(init_nr_desc_per_channel, uint, 0644);
50 MODULE_PARM_DESC(init_nr_desc_per_channel,
51  "initial descriptors per channel (default: 64)");
52 
53 
54 /* prototypes */
55 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
56 
57 
58 /*----------------------------------------------------------------------*/
59 
60 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
61 {
62  return list_first_entry(&atchan->active_list,
63  struct at_desc, desc_node);
64 }
65 
66 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
67 {
68  return list_first_entry(&atchan->queue,
69  struct at_desc, desc_node);
70 }
71 
82 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
83  gfp_t gfp_flags)
84 {
85  struct at_desc *desc = NULL;
86  struct at_dma *atdma = to_at_dma(chan->device);
88 
89  desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
90  if (desc) {
91  memset(desc, 0, sizeof(struct at_desc));
92  INIT_LIST_HEAD(&desc->tx_list);
93  dma_async_tx_descriptor_init(&desc->txd, chan);
94  /* txd.flags will be overwritten in prep functions */
95  desc->txd.flags = DMA_CTRL_ACK;
96  desc->txd.tx_submit = atc_tx_submit;
97  desc->txd.phys = phys;
98  }
99 
100  return desc;
101 }
102 
107 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
108 {
109  struct at_desc *desc, *_desc;
110  struct at_desc *ret = NULL;
111  unsigned long flags;
112  unsigned int i = 0;
113  LIST_HEAD(tmp_list);
114 
115  spin_lock_irqsave(&atchan->lock, flags);
116  list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
117  i++;
118  if (async_tx_test_ack(&desc->txd)) {
119  list_del(&desc->desc_node);
120  ret = desc;
121  break;
122  }
123  dev_dbg(chan2dev(&atchan->chan_common),
124  "desc %p not ACKed\n", desc);
125  }
126  spin_unlock_irqrestore(&atchan->lock, flags);
127  dev_vdbg(chan2dev(&atchan->chan_common),
128  "scanned %u descriptors on freelist\n", i);
129 
130  /* no more descriptor available in initial pool: create one more */
131  if (!ret) {
132  ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
133  if (ret) {
134  spin_lock_irqsave(&atchan->lock, flags);
135  atchan->descs_allocated++;
136  spin_unlock_irqrestore(&atchan->lock, flags);
137  } else {
138  dev_err(chan2dev(&atchan->chan_common),
139  "not enough descriptors available\n");
140  }
141  }
142 
143  return ret;
144 }
145 
151 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
152 {
153  if (desc) {
154  struct at_desc *child;
155  unsigned long flags;
156 
157  spin_lock_irqsave(&atchan->lock, flags);
158  list_for_each_entry(child, &desc->tx_list, desc_node)
159  dev_vdbg(chan2dev(&atchan->chan_common),
160  "moving child desc %p to freelist\n",
161  child);
162  list_splice_init(&desc->tx_list, &atchan->free_list);
163  dev_vdbg(chan2dev(&atchan->chan_common),
164  "moving desc %p to freelist\n", desc);
165  list_add(&desc->desc_node, &atchan->free_list);
166  spin_unlock_irqrestore(&atchan->lock, flags);
167  }
168 }
169 
178 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
179  struct at_desc *desc)
180 {
181  if (!(*first)) {
182  *first = desc;
183  } else {
184  /* inform the HW lli about chaining */
185  (*prev)->lli.dscr = desc->txd.phys;
186  /* insert the link descriptor to the LD ring */
187  list_add_tail(&desc->desc_node,
188  &(*first)->tx_list);
189  }
190  *prev = desc;
191 }
192 
200 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
201 {
202  struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
203 
204  /* ASSERT: channel is idle */
205  if (atc_chan_is_enabled(atchan)) {
206  dev_err(chan2dev(&atchan->chan_common),
207  "BUG: Attempted to start non-idle channel\n");
208  dev_err(chan2dev(&atchan->chan_common),
209  " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
210  channel_readl(atchan, SADDR),
211  channel_readl(atchan, DADDR),
212  channel_readl(atchan, CTRLA),
213  channel_readl(atchan, CTRLB),
214  channel_readl(atchan, DSCR));
215 
216  /* The tasklet will hopefully advance the queue... */
217  return;
218  }
219 
220  vdbg_dump_regs(atchan);
221 
222  channel_writel(atchan, SADDR, 0);
223  channel_writel(atchan, DADDR, 0);
224  channel_writel(atchan, CTRLA, 0);
225  channel_writel(atchan, CTRLB, 0);
226  channel_writel(atchan, DSCR, first->txd.phys);
227  dma_writel(atdma, CHER, atchan->mask);
228 
229  vdbg_dump_regs(atchan);
230 }
231 
238 static void
239 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
240 {
241  struct dma_async_tx_descriptor *txd = &desc->txd;
242 
243  dev_vdbg(chan2dev(&atchan->chan_common),
244  "descriptor %u complete\n", txd->cookie);
245 
246  /* mark the descriptor as complete for non cyclic cases only */
247  if (!atc_chan_is_cyclic(atchan))
248  dma_cookie_complete(txd);
249 
250  /* move children to free_list */
251  list_splice_init(&desc->tx_list, &atchan->free_list);
252  /* move myself to free_list */
253  list_move(&desc->desc_node, &atchan->free_list);
254 
255  /* unmap dma addresses (not on slave channels) */
256  if (!atchan->chan_common.private) {
257  struct device *parent = chan2parent(&atchan->chan_common);
258  if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
260  dma_unmap_single(parent,
261  desc->lli.daddr,
262  desc->len, DMA_FROM_DEVICE);
263  else
264  dma_unmap_page(parent,
265  desc->lli.daddr,
266  desc->len, DMA_FROM_DEVICE);
267  }
268  if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
270  dma_unmap_single(parent,
271  desc->lli.saddr,
272  desc->len, DMA_TO_DEVICE);
273  else
274  dma_unmap_page(parent,
275  desc->lli.saddr,
276  desc->len, DMA_TO_DEVICE);
277  }
278  }
279 
280  /* for cyclic transfers,
281  * no need to replay callback function while stopping */
282  if (!atc_chan_is_cyclic(atchan)) {
284  void *param = txd->callback_param;
285 
286  /*
287  * The API requires that no submissions are done from a
288  * callback, so we don't need to drop the lock here
289  */
290  if (callback)
291  callback(param);
292  }
293 
295 }
296 
306 static void atc_complete_all(struct at_dma_chan *atchan)
307 {
308  struct at_desc *desc, *_desc;
309  LIST_HEAD(list);
310 
311  dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
312 
313  BUG_ON(atc_chan_is_enabled(atchan));
314 
315  /*
316  * Submit queued descriptors ASAP, i.e. before we go through
317  * the completed ones.
318  */
319  if (!list_empty(&atchan->queue))
320  atc_dostart(atchan, atc_first_queued(atchan));
321  /* empty active_list now it is completed */
322  list_splice_init(&atchan->active_list, &list);
323  /* empty queue list by moving descriptors (if any) to active_list */
324  list_splice_init(&atchan->queue, &atchan->active_list);
325 
326  list_for_each_entry_safe(desc, _desc, &list, desc_node)
327  atc_chain_complete(atchan, desc);
328 }
329 
336 static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
337 {
338  struct at_desc *desc, *_desc;
339  struct at_desc *child;
340 
341  dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
342 
343  list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
344  if (!(desc->lli.ctrla & ATC_DONE))
345  /* This one is currently in progress */
346  return;
347 
348  list_for_each_entry(child, &desc->tx_list, desc_node)
349  if (!(child->lli.ctrla & ATC_DONE))
350  /* Currently in progress */
351  return;
352 
353  /*
354  * No descriptors so far seem to be in progress, i.e.
355  * this chain must be done.
356  */
357  atc_chain_complete(atchan, desc);
358  }
359 }
360 
367 static void atc_advance_work(struct at_dma_chan *atchan)
368 {
369  dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
370 
371  if (list_empty(&atchan->active_list) ||
372  list_is_singular(&atchan->active_list)) {
373  atc_complete_all(atchan);
374  } else {
375  atc_chain_complete(atchan, atc_first_active(atchan));
376  /* advance work */
377  atc_dostart(atchan, atc_first_active(atchan));
378  }
379 }
380 
381 
388 static void atc_handle_error(struct at_dma_chan *atchan)
389 {
390  struct at_desc *bad_desc;
391  struct at_desc *child;
392 
393  /*
394  * The descriptor currently at the head of the active list is
395  * broked. Since we don't have any way to report errors, we'll
396  * just have to scream loudly and try to carry on.
397  */
398  bad_desc = atc_first_active(atchan);
399  list_del_init(&bad_desc->desc_node);
400 
401  /* As we are stopped, take advantage to push queued descriptors
402  * in active_list */
403  list_splice_init(&atchan->queue, atchan->active_list.prev);
404 
405  /* Try to restart the controller */
406  if (!list_empty(&atchan->active_list))
407  atc_dostart(atchan, atc_first_active(atchan));
408 
409  /*
410  * KERN_CRITICAL may seem harsh, but since this only happens
411  * when someone submits a bad physical address in a
412  * descriptor, we should consider ourselves lucky that the
413  * controller flagged an error instead of scribbling over
414  * random memory locations.
415  */
416  dev_crit(chan2dev(&atchan->chan_common),
417  "Bad descriptor submitted for DMA!\n");
418  dev_crit(chan2dev(&atchan->chan_common),
419  " cookie: %d\n", bad_desc->txd.cookie);
420  atc_dump_lli(atchan, &bad_desc->lli);
421  list_for_each_entry(child, &bad_desc->tx_list, desc_node)
422  atc_dump_lli(atchan, &child->lli);
423 
424  /* Pretend the descriptor completed successfully */
425  atc_chain_complete(atchan, bad_desc);
426 }
427 
434 static void atc_handle_cyclic(struct at_dma_chan *atchan)
435 {
436  struct at_desc *first = atc_first_active(atchan);
437  struct dma_async_tx_descriptor *txd = &first->txd;
439  void *param = txd->callback_param;
440 
441  dev_vdbg(chan2dev(&atchan->chan_common),
442  "new cyclic period llp 0x%08x\n",
443  channel_readl(atchan, DSCR));
444 
445  if (callback)
446  callback(param);
447 }
448 
449 /*-- IRQ & Tasklet ---------------------------------------------------*/
450 
451 static void atc_tasklet(unsigned long data)
452 {
453  struct at_dma_chan *atchan = (struct at_dma_chan *)data;
454  unsigned long flags;
455 
456  spin_lock_irqsave(&atchan->lock, flags);
457  if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
458  atc_handle_error(atchan);
459  else if (atc_chan_is_cyclic(atchan))
460  atc_handle_cyclic(atchan);
461  else
462  atc_advance_work(atchan);
463 
464  spin_unlock_irqrestore(&atchan->lock, flags);
465 }
466 
467 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
468 {
469  struct at_dma *atdma = (struct at_dma *)dev_id;
470  struct at_dma_chan *atchan;
471  int i;
472  u32 status, pending, imr;
473  int ret = IRQ_NONE;
474 
475  do {
476  imr = dma_readl(atdma, EBCIMR);
477  status = dma_readl(atdma, EBCISR);
478  pending = status & imr;
479 
480  if (!pending)
481  break;
482 
483  dev_vdbg(atdma->dma_common.dev,
484  "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
485  status, imr, pending);
486 
487  for (i = 0; i < atdma->dma_common.chancnt; i++) {
488  atchan = &atdma->chan[i];
489  if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
490  if (pending & AT_DMA_ERR(i)) {
491  /* Disable channel on AHB error */
492  dma_writel(atdma, CHDR,
493  AT_DMA_RES(i) | atchan->mask);
494  /* Give information to tasklet */
495  set_bit(ATC_IS_ERROR, &atchan->status);
496  }
497  tasklet_schedule(&atchan->tasklet);
498  ret = IRQ_HANDLED;
499  }
500  }
501 
502  } while (pending);
503 
504  return ret;
505 }
506 
507 
508 /*-- DMA Engine API --------------------------------------------------*/
509 
518 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
519 {
520  struct at_desc *desc = txd_to_at_desc(tx);
521  struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
523  unsigned long flags;
524 
525  spin_lock_irqsave(&atchan->lock, flags);
526  cookie = dma_cookie_assign(tx);
527 
528  if (list_empty(&atchan->active_list)) {
529  dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
530  desc->txd.cookie);
531  atc_dostart(atchan, desc);
532  list_add_tail(&desc->desc_node, &atchan->active_list);
533  } else {
534  dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
535  desc->txd.cookie);
536  list_add_tail(&desc->desc_node, &atchan->queue);
537  }
538 
539  spin_unlock_irqrestore(&atchan->lock, flags);
540 
541  return cookie;
542 }
543 
552 static struct dma_async_tx_descriptor *
553 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
554  size_t len, unsigned long flags)
555 {
556  struct at_dma_chan *atchan = to_at_dma_chan(chan);
557  struct at_desc *desc = NULL;
558  struct at_desc *first = NULL;
559  struct at_desc *prev = NULL;
560  size_t xfer_count;
561  size_t offset;
562  unsigned int src_width;
563  unsigned int dst_width;
564  u32 ctrla;
565  u32 ctrlb;
566 
567  dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
568  dest, src, len, flags);
569 
570  if (unlikely(!len)) {
571  dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
572  return NULL;
573  }
574 
575  ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
578  | ATC_FC_MEM2MEM;
579 
580  /*
581  * We can be a lot more clever here, but this should take care
582  * of the most common optimization.
583  */
584  if (!((src | dest | len) & 3)) {
586  src_width = dst_width = 2;
587  } else if (!((src | dest | len) & 1)) {
589  src_width = dst_width = 1;
590  } else {
592  src_width = dst_width = 0;
593  }
594 
595  for (offset = 0; offset < len; offset += xfer_count << src_width) {
596  xfer_count = min_t(size_t, (len - offset) >> src_width,
598 
599  desc = atc_desc_get(atchan);
600  if (!desc)
601  goto err_desc_get;
602 
603  desc->lli.saddr = src + offset;
604  desc->lli.daddr = dest + offset;
605  desc->lli.ctrla = ctrla | xfer_count;
606  desc->lli.ctrlb = ctrlb;
607 
608  desc->txd.cookie = 0;
609 
610  atc_desc_chain(&first, &prev, desc);
611  }
612 
613  /* First descriptor of the chain embedds additional information */
614  first->txd.cookie = -EBUSY;
615  first->len = len;
616 
617  /* set end-of-link to the last link descriptor of list*/
618  set_desc_eol(desc);
619 
620  first->txd.flags = flags; /* client is in control of this ack */
621 
622  return &first->txd;
623 
624 err_desc_get:
625  atc_desc_put(atchan, first);
626  return NULL;
627 }
628 
629 
639 static struct dma_async_tx_descriptor *
640 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
641  unsigned int sg_len, enum dma_transfer_direction direction,
642  unsigned long flags, void *context)
643 {
644  struct at_dma_chan *atchan = to_at_dma_chan(chan);
645  struct at_dma_slave *atslave = chan->private;
646  struct dma_slave_config *sconfig = &atchan->dma_sconfig;
647  struct at_desc *first = NULL;
648  struct at_desc *prev = NULL;
649  u32 ctrla;
650  u32 ctrlb;
651  dma_addr_t reg;
652  unsigned int reg_width;
653  unsigned int mem_width;
654  unsigned int i;
655  struct scatterlist *sg;
656  size_t total_len = 0;
657 
658  dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
659  sg_len,
660  direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
661  flags);
662 
663  if (unlikely(!atslave || !sg_len)) {
664  dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
665  return NULL;
666  }
667 
668  ctrla = ATC_SCSIZE(sconfig->src_maxburst)
669  | ATC_DCSIZE(sconfig->dst_maxburst);
670  ctrlb = ATC_IEN;
671 
672  switch (direction) {
673  case DMA_MEM_TO_DEV:
674  reg_width = convert_buswidth(sconfig->dst_addr_width);
675  ctrla |= ATC_DST_WIDTH(reg_width);
676  ctrlb |= ATC_DST_ADDR_MODE_FIXED
680  reg = sconfig->dst_addr;
681  for_each_sg(sgl, sg, sg_len, i) {
682  struct at_desc *desc;
683  u32 len;
684  u32 mem;
685 
686  desc = atc_desc_get(atchan);
687  if (!desc)
688  goto err_desc_get;
689 
690  mem = sg_dma_address(sg);
691  len = sg_dma_len(sg);
692  if (unlikely(!len)) {
693  dev_dbg(chan2dev(chan),
694  "prep_slave_sg: sg(%d) data length is zero\n", i);
695  goto err;
696  }
697  mem_width = 2;
698  if (unlikely(mem & 3 || len & 3))
699  mem_width = 0;
700 
701  desc->lli.saddr = mem;
702  desc->lli.daddr = reg;
703  desc->lli.ctrla = ctrla
704  | ATC_SRC_WIDTH(mem_width)
705  | len >> mem_width;
706  desc->lli.ctrlb = ctrlb;
707 
708  atc_desc_chain(&first, &prev, desc);
709  total_len += len;
710  }
711  break;
712  case DMA_DEV_TO_MEM:
713  reg_width = convert_buswidth(sconfig->src_addr_width);
714  ctrla |= ATC_SRC_WIDTH(reg_width);
715  ctrlb |= ATC_DST_ADDR_MODE_INCR
719 
720  reg = sconfig->src_addr;
721  for_each_sg(sgl, sg, sg_len, i) {
722  struct at_desc *desc;
723  u32 len;
724  u32 mem;
725 
726  desc = atc_desc_get(atchan);
727  if (!desc)
728  goto err_desc_get;
729 
730  mem = sg_dma_address(sg);
731  len = sg_dma_len(sg);
732  if (unlikely(!len)) {
733  dev_dbg(chan2dev(chan),
734  "prep_slave_sg: sg(%d) data length is zero\n", i);
735  goto err;
736  }
737  mem_width = 2;
738  if (unlikely(mem & 3 || len & 3))
739  mem_width = 0;
740 
741  desc->lli.saddr = reg;
742  desc->lli.daddr = mem;
743  desc->lli.ctrla = ctrla
744  | ATC_DST_WIDTH(mem_width)
745  | len >> reg_width;
746  desc->lli.ctrlb = ctrlb;
747 
748  atc_desc_chain(&first, &prev, desc);
749  total_len += len;
750  }
751  break;
752  default:
753  return NULL;
754  }
755 
756  /* set end-of-link to the last link descriptor of list*/
757  set_desc_eol(prev);
758 
759  /* First descriptor of the chain embedds additional information */
760  first->txd.cookie = -EBUSY;
761  first->len = total_len;
762 
763  /* first link descriptor of list is responsible of flags */
764  first->txd.flags = flags; /* client is in control of this ack */
765 
766  return &first->txd;
767 
768 err_desc_get:
769  dev_err(chan2dev(chan), "not enough descriptors available\n");
770 err:
771  atc_desc_put(atchan, first);
772  return NULL;
773 }
774 
779 static int
780 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
781  size_t period_len, enum dma_transfer_direction direction)
782 {
783  if (period_len > (ATC_BTSIZE_MAX << reg_width))
784  goto err_out;
785  if (unlikely(period_len & ((1 << reg_width) - 1)))
786  goto err_out;
787  if (unlikely(buf_addr & ((1 << reg_width) - 1)))
788  goto err_out;
789  if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
790  goto err_out;
791 
792  return 0;
793 
794 err_out:
795  return -EINVAL;
796 }
797 
801 static int
802 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
803  unsigned int period_index, dma_addr_t buf_addr,
804  unsigned int reg_width, size_t period_len,
805  enum dma_transfer_direction direction)
806 {
807  struct at_dma_chan *atchan = to_at_dma_chan(chan);
808  struct dma_slave_config *sconfig = &atchan->dma_sconfig;
809  u32 ctrla;
810 
811  /* prepare common CRTLA value */
812  ctrla = ATC_SCSIZE(sconfig->src_maxburst)
813  | ATC_DCSIZE(sconfig->dst_maxburst)
814  | ATC_DST_WIDTH(reg_width)
815  | ATC_SRC_WIDTH(reg_width)
816  | period_len >> reg_width;
817 
818  switch (direction) {
819  case DMA_MEM_TO_DEV:
820  desc->lli.saddr = buf_addr + (period_len * period_index);
821  desc->lli.daddr = sconfig->dst_addr;
822  desc->lli.ctrla = ctrla;
823  desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
828  break;
829 
830  case DMA_DEV_TO_MEM:
831  desc->lli.saddr = sconfig->src_addr;
832  desc->lli.daddr = buf_addr + (period_len * period_index);
833  desc->lli.ctrla = ctrla;
834  desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
839  break;
840 
841  default:
842  return -EINVAL;
843  }
844 
845  return 0;
846 }
847 
858 static struct dma_async_tx_descriptor *
859 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
860  size_t period_len, enum dma_transfer_direction direction,
861  unsigned long flags, void *context)
862 {
863  struct at_dma_chan *atchan = to_at_dma_chan(chan);
864  struct at_dma_slave *atslave = chan->private;
865  struct dma_slave_config *sconfig = &atchan->dma_sconfig;
866  struct at_desc *first = NULL;
867  struct at_desc *prev = NULL;
868  unsigned long was_cyclic;
869  unsigned int reg_width;
870  unsigned int periods = buf_len / period_len;
871  unsigned int i;
872 
873  dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
874  direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
875  buf_addr,
876  periods, buf_len, period_len);
877 
878  if (unlikely(!atslave || !buf_len || !period_len)) {
879  dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
880  return NULL;
881  }
882 
883  was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
884  if (was_cyclic) {
885  dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
886  return NULL;
887  }
888 
889  if (sconfig->direction == DMA_MEM_TO_DEV)
890  reg_width = convert_buswidth(sconfig->dst_addr_width);
891  else
892  reg_width = convert_buswidth(sconfig->src_addr_width);
893 
894  /* Check for too big/unaligned periods and unaligned DMA buffer */
895  if (atc_dma_cyclic_check_values(reg_width, buf_addr,
896  period_len, direction))
897  goto err_out;
898 
899  /* build cyclic linked list */
900  for (i = 0; i < periods; i++) {
901  struct at_desc *desc;
902 
903  desc = atc_desc_get(atchan);
904  if (!desc)
905  goto err_desc_get;
906 
907  if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
908  reg_width, period_len, direction))
909  goto err_desc_get;
910 
911  atc_desc_chain(&first, &prev, desc);
912  }
913 
914  /* lets make a cyclic list */
915  prev->lli.dscr = first->txd.phys;
916 
917  /* First descriptor of the chain embedds additional information */
918  first->txd.cookie = -EBUSY;
919  first->len = buf_len;
920 
921  return &first->txd;
922 
923 err_desc_get:
924  dev_err(chan2dev(chan), "not enough descriptors available\n");
925  atc_desc_put(atchan, first);
926 err_out:
927  clear_bit(ATC_IS_CYCLIC, &atchan->status);
928  return NULL;
929 }
930 
931 static int set_runtime_config(struct dma_chan *chan,
932  struct dma_slave_config *sconfig)
933 {
934  struct at_dma_chan *atchan = to_at_dma_chan(chan);
935 
936  /* Check if it is chan is configured for slave transfers */
937  if (!chan->private)
938  return -EINVAL;
939 
940  memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
941 
942  convert_burst(&atchan->dma_sconfig.src_maxburst);
943  convert_burst(&atchan->dma_sconfig.dst_maxburst);
944 
945  return 0;
946 }
947 
948 
949 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
950  unsigned long arg)
951 {
952  struct at_dma_chan *atchan = to_at_dma_chan(chan);
953  struct at_dma *atdma = to_at_dma(chan->device);
954  int chan_id = atchan->chan_common.chan_id;
955  unsigned long flags;
956 
957  LIST_HEAD(list);
958 
959  dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
960 
961  if (cmd == DMA_PAUSE) {
962  spin_lock_irqsave(&atchan->lock, flags);
963 
964  dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
965  set_bit(ATC_IS_PAUSED, &atchan->status);
966 
967  spin_unlock_irqrestore(&atchan->lock, flags);
968  } else if (cmd == DMA_RESUME) {
969  if (!atc_chan_is_paused(atchan))
970  return 0;
971 
972  spin_lock_irqsave(&atchan->lock, flags);
973 
974  dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
975  clear_bit(ATC_IS_PAUSED, &atchan->status);
976 
977  spin_unlock_irqrestore(&atchan->lock, flags);
978  } else if (cmd == DMA_TERMINATE_ALL) {
979  struct at_desc *desc, *_desc;
980  /*
981  * This is only called when something went wrong elsewhere, so
982  * we don't really care about the data. Just disable the
983  * channel. We still have to poll the channel enable bit due
984  * to AHB/HSB limitations.
985  */
986  spin_lock_irqsave(&atchan->lock, flags);
987 
988  /* disabling channel: must also remove suspend state */
989  dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
990 
991  /* confirm that this channel is disabled */
992  while (dma_readl(atdma, CHSR) & atchan->mask)
993  cpu_relax();
994 
995  /* active_list entries will end up before queued entries */
996  list_splice_init(&atchan->queue, &list);
997  list_splice_init(&atchan->active_list, &list);
998 
999  /* Flush all pending and queued descriptors */
1000  list_for_each_entry_safe(desc, _desc, &list, desc_node)
1001  atc_chain_complete(atchan, desc);
1002 
1003  clear_bit(ATC_IS_PAUSED, &atchan->status);
1004  /* if channel dedicated to cyclic operations, free it */
1005  clear_bit(ATC_IS_CYCLIC, &atchan->status);
1006 
1007  spin_unlock_irqrestore(&atchan->lock, flags);
1008  } else if (cmd == DMA_SLAVE_CONFIG) {
1009  return set_runtime_config(chan, (struct dma_slave_config *)arg);
1010  } else {
1011  return -ENXIO;
1012  }
1013 
1014  return 0;
1015 }
1016 
1027 static enum dma_status
1028 atc_tx_status(struct dma_chan *chan,
1029  dma_cookie_t cookie,
1030  struct dma_tx_state *txstate)
1031 {
1032  struct at_dma_chan *atchan = to_at_dma_chan(chan);
1033  dma_cookie_t last_used;
1034  dma_cookie_t last_complete;
1035  unsigned long flags;
1036  enum dma_status ret;
1037 
1038  spin_lock_irqsave(&atchan->lock, flags);
1039 
1040  ret = dma_cookie_status(chan, cookie, txstate);
1041  if (ret != DMA_SUCCESS) {
1042  atc_cleanup_descriptors(atchan);
1043 
1044  ret = dma_cookie_status(chan, cookie, txstate);
1045  }
1046 
1047  last_complete = chan->completed_cookie;
1048  last_used = chan->cookie;
1049 
1050  spin_unlock_irqrestore(&atchan->lock, flags);
1051 
1052  if (ret != DMA_SUCCESS)
1053  dma_set_residue(txstate, atc_first_active(atchan)->len);
1054 
1055  if (atc_chan_is_paused(atchan))
1056  ret = DMA_PAUSED;
1057 
1058  dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1059  ret, cookie, last_complete ? last_complete : 0,
1060  last_used ? last_used : 0);
1061 
1062  return ret;
1063 }
1064 
1069 static void atc_issue_pending(struct dma_chan *chan)
1070 {
1071  struct at_dma_chan *atchan = to_at_dma_chan(chan);
1072  unsigned long flags;
1073 
1074  dev_vdbg(chan2dev(chan), "issue_pending\n");
1075 
1076  /* Not needed for cyclic transfers */
1077  if (atc_chan_is_cyclic(atchan))
1078  return;
1079 
1080  spin_lock_irqsave(&atchan->lock, flags);
1081  if (!atc_chan_is_enabled(atchan)) {
1082  atc_advance_work(atchan);
1083  }
1084  spin_unlock_irqrestore(&atchan->lock, flags);
1085 }
1086 
1094 static int atc_alloc_chan_resources(struct dma_chan *chan)
1095 {
1096  struct at_dma_chan *atchan = to_at_dma_chan(chan);
1097  struct at_dma *atdma = to_at_dma(chan->device);
1098  struct at_desc *desc;
1099  struct at_dma_slave *atslave;
1100  unsigned long flags;
1101  int i;
1102  u32 cfg;
1103  LIST_HEAD(tmp_list);
1104 
1105  dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1106 
1107  /* ASSERT: channel is idle */
1108  if (atc_chan_is_enabled(atchan)) {
1109  dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1110  return -EIO;
1111  }
1112 
1113  cfg = ATC_DEFAULT_CFG;
1114 
1115  atslave = chan->private;
1116  if (atslave) {
1117  /*
1118  * We need controller-specific data to set up slave
1119  * transfers.
1120  */
1121  BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1122 
1123  /* if cfg configuration specified take it instad of default */
1124  if (atslave->cfg)
1125  cfg = atslave->cfg;
1126  }
1127 
1128  /* have we already been set up?
1129  * reconfigure channel but no need to reallocate descriptors */
1130  if (!list_empty(&atchan->free_list))
1131  return atchan->descs_allocated;
1132 
1133  /* Allocate initial pool of descriptors */
1134  for (i = 0; i < init_nr_desc_per_channel; i++) {
1135  desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1136  if (!desc) {
1137  dev_err(atdma->dma_common.dev,
1138  "Only %d initial descriptors\n", i);
1139  break;
1140  }
1141  list_add_tail(&desc->desc_node, &tmp_list);
1142  }
1143 
1144  spin_lock_irqsave(&atchan->lock, flags);
1145  atchan->descs_allocated = i;
1146  list_splice(&tmp_list, &atchan->free_list);
1147  dma_cookie_init(chan);
1148  spin_unlock_irqrestore(&atchan->lock, flags);
1149 
1150  /* channel parameters */
1151  channel_writel(atchan, CFG, cfg);
1152 
1153  dev_dbg(chan2dev(chan),
1154  "alloc_chan_resources: allocated %d descriptors\n",
1155  atchan->descs_allocated);
1156 
1157  return atchan->descs_allocated;
1158 }
1159 
1164 static void atc_free_chan_resources(struct dma_chan *chan)
1165 {
1166  struct at_dma_chan *atchan = to_at_dma_chan(chan);
1167  struct at_dma *atdma = to_at_dma(chan->device);
1168  struct at_desc *desc, *_desc;
1169  LIST_HEAD(list);
1170 
1171  dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1172  atchan->descs_allocated);
1173 
1174  /* ASSERT: channel is idle */
1175  BUG_ON(!list_empty(&atchan->active_list));
1176  BUG_ON(!list_empty(&atchan->queue));
1177  BUG_ON(atc_chan_is_enabled(atchan));
1178 
1179  list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1180  dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1181  list_del(&desc->desc_node);
1182  /* free link descriptor */
1183  dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1184  }
1185  list_splice_init(&atchan->free_list, &list);
1186  atchan->descs_allocated = 0;
1187  atchan->status = 0;
1188 
1189  dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1190 }
1191 
1192 
1193 /*-- Module Management -----------------------------------------------*/
1194 
1195 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1196 static struct at_dma_platform_data at91sam9rl_config = {
1197  .nr_channels = 2,
1198 };
1199 static struct at_dma_platform_data at91sam9g45_config = {
1200  .nr_channels = 8,
1201 };
1202 
1203 #if defined(CONFIG_OF)
1204 static const struct of_device_id atmel_dma_dt_ids[] = {
1205  {
1206  .compatible = "atmel,at91sam9rl-dma",
1207  .data = &at91sam9rl_config,
1208  }, {
1209  .compatible = "atmel,at91sam9g45-dma",
1210  .data = &at91sam9g45_config,
1211  }, {
1212  /* sentinel */
1213  }
1214 };
1215 
1216 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1217 #endif
1218 
1219 static const struct platform_device_id atdma_devtypes[] = {
1220  {
1221  .name = "at91sam9rl_dma",
1222  .driver_data = (unsigned long) &at91sam9rl_config,
1223  }, {
1224  .name = "at91sam9g45_dma",
1225  .driver_data = (unsigned long) &at91sam9g45_config,
1226  }, {
1227  /* sentinel */
1228  }
1229 };
1230 
1231 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1232  struct platform_device *pdev)
1233 {
1234  if (pdev->dev.of_node) {
1235  const struct of_device_id *match;
1236  match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1237  if (match == NULL)
1238  return NULL;
1239  return match->data;
1240  }
1241  return (struct at_dma_platform_data *)
1242  platform_get_device_id(pdev)->driver_data;
1243 }
1244 
1249 static void at_dma_off(struct at_dma *atdma)
1250 {
1251  dma_writel(atdma, EN, 0);
1252 
1253  /* disable all interrupts */
1254  dma_writel(atdma, EBCIDR, -1L);
1255 
1256  /* confirm that all channels are disabled */
1257  while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1258  cpu_relax();
1259 }
1260 
1261 static int __init at_dma_probe(struct platform_device *pdev)
1262 {
1263  struct resource *io;
1264  struct at_dma *atdma;
1265  size_t size;
1266  int irq;
1267  int err;
1268  int i;
1269  const struct at_dma_platform_data *plat_dat;
1270 
1271  /* setup platform data for each SoC */
1272  dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1273  dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1274  dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1275 
1276  /* get DMA parameters from controller type */
1277  plat_dat = at_dma_get_driver_data(pdev);
1278  if (!plat_dat)
1279  return -ENODEV;
1280 
1281  io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1282  if (!io)
1283  return -EINVAL;
1284 
1285  irq = platform_get_irq(pdev, 0);
1286  if (irq < 0)
1287  return irq;
1288 
1289  size = sizeof(struct at_dma);
1290  size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1291  atdma = kzalloc(size, GFP_KERNEL);
1292  if (!atdma)
1293  return -ENOMEM;
1294 
1295  /* discover transaction capabilities */
1296  atdma->dma_common.cap_mask = plat_dat->cap_mask;
1297  atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1298 
1299  size = resource_size(io);
1300  if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1301  err = -EBUSY;
1302  goto err_kfree;
1303  }
1304 
1305  atdma->regs = ioremap(io->start, size);
1306  if (!atdma->regs) {
1307  err = -ENOMEM;
1308  goto err_release_r;
1309  }
1310 
1311  atdma->clk = clk_get(&pdev->dev, "dma_clk");
1312  if (IS_ERR(atdma->clk)) {
1313  err = PTR_ERR(atdma->clk);
1314  goto err_clk;
1315  }
1316  clk_enable(atdma->clk);
1317 
1318  /* force dma off, just in case */
1319  at_dma_off(atdma);
1320 
1321  err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1322  if (err)
1323  goto err_irq;
1324 
1325  platform_set_drvdata(pdev, atdma);
1326 
1327  /* create a pool of consistent memory blocks for hardware descriptors */
1328  atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1329  &pdev->dev, sizeof(struct at_desc),
1330  4 /* word alignment */, 0);
1331  if (!atdma->dma_desc_pool) {
1332  dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1333  err = -ENOMEM;
1334  goto err_pool_create;
1335  }
1336 
1337  /* clear any pending interrupt */
1338  while (dma_readl(atdma, EBCISR))
1339  cpu_relax();
1340 
1341  /* initialize channels related values */
1342  INIT_LIST_HEAD(&atdma->dma_common.channels);
1343  for (i = 0; i < plat_dat->nr_channels; i++) {
1344  struct at_dma_chan *atchan = &atdma->chan[i];
1345 
1346  atchan->chan_common.device = &atdma->dma_common;
1347  dma_cookie_init(&atchan->chan_common);
1348  list_add_tail(&atchan->chan_common.device_node,
1349  &atdma->dma_common.channels);
1350 
1351  atchan->ch_regs = atdma->regs + ch_regs(i);
1352  spin_lock_init(&atchan->lock);
1353  atchan->mask = 1 << i;
1354 
1355  INIT_LIST_HEAD(&atchan->active_list);
1356  INIT_LIST_HEAD(&atchan->queue);
1357  INIT_LIST_HEAD(&atchan->free_list);
1358 
1359  tasklet_init(&atchan->tasklet, atc_tasklet,
1360  (unsigned long)atchan);
1361  atc_enable_chan_irq(atdma, i);
1362  }
1363 
1364  /* set base routines */
1365  atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1366  atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1367  atdma->dma_common.device_tx_status = atc_tx_status;
1368  atdma->dma_common.device_issue_pending = atc_issue_pending;
1369  atdma->dma_common.dev = &pdev->dev;
1370 
1371  /* set prep routines based on capability */
1372  if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1373  atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1374 
1375  if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1376  atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1377  /* controller can do slave DMA: can trigger cyclic transfers */
1378  dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1379  atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1380  atdma->dma_common.device_control = atc_control;
1381  }
1382 
1383  dma_writel(atdma, EN, AT_DMA_ENABLE);
1384 
1385  dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1386  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1387  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1388  plat_dat->nr_channels);
1389 
1391 
1392  return 0;
1393 
1394 err_pool_create:
1395  platform_set_drvdata(pdev, NULL);
1396  free_irq(platform_get_irq(pdev, 0), atdma);
1397 err_irq:
1398  clk_disable(atdma->clk);
1399  clk_put(atdma->clk);
1400 err_clk:
1401  iounmap(atdma->regs);
1402  atdma->regs = NULL;
1403 err_release_r:
1404  release_mem_region(io->start, size);
1405 err_kfree:
1406  kfree(atdma);
1407  return err;
1408 }
1409 
1410 static int __exit at_dma_remove(struct platform_device *pdev)
1411 {
1412  struct at_dma *atdma = platform_get_drvdata(pdev);
1413  struct dma_chan *chan, *_chan;
1414  struct resource *io;
1415 
1416  at_dma_off(atdma);
1418 
1420  platform_set_drvdata(pdev, NULL);
1421  free_irq(platform_get_irq(pdev, 0), atdma);
1422 
1423  list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1424  device_node) {
1425  struct at_dma_chan *atchan = to_at_dma_chan(chan);
1426 
1427  /* Disable interrupts */
1428  atc_disable_chan_irq(atdma, chan->chan_id);
1429  tasklet_disable(&atchan->tasklet);
1430 
1431  tasklet_kill(&atchan->tasklet);
1432  list_del(&chan->device_node);
1433  }
1434 
1435  clk_disable(atdma->clk);
1436  clk_put(atdma->clk);
1437 
1438  iounmap(atdma->regs);
1439  atdma->regs = NULL;
1440 
1441  io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1442  release_mem_region(io->start, resource_size(io));
1443 
1444  kfree(atdma);
1445 
1446  return 0;
1447 }
1448 
1449 static void at_dma_shutdown(struct platform_device *pdev)
1450 {
1451  struct at_dma *atdma = platform_get_drvdata(pdev);
1452 
1453  at_dma_off(platform_get_drvdata(pdev));
1454  clk_disable(atdma->clk);
1455 }
1456 
1457 static int at_dma_prepare(struct device *dev)
1458 {
1459  struct platform_device *pdev = to_platform_device(dev);
1460  struct at_dma *atdma = platform_get_drvdata(pdev);
1461  struct dma_chan *chan, *_chan;
1462 
1463  list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1464  device_node) {
1465  struct at_dma_chan *atchan = to_at_dma_chan(chan);
1466  /* wait for transaction completion (except in cyclic case) */
1467  if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1468  return -EAGAIN;
1469  }
1470  return 0;
1471 }
1472 
1473 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1474 {
1475  struct dma_chan *chan = &atchan->chan_common;
1476 
1477  /* Channel should be paused by user
1478  * do it anyway even if it is not done already */
1479  if (!atc_chan_is_paused(atchan)) {
1480  dev_warn(chan2dev(chan),
1481  "cyclic channel not paused, should be done by channel user\n");
1482  atc_control(chan, DMA_PAUSE, 0);
1483  }
1484 
1485  /* now preserve additional data for cyclic operations */
1486  /* next descriptor address in the cyclic list */
1487  atchan->save_dscr = channel_readl(atchan, DSCR);
1488 
1489  vdbg_dump_regs(atchan);
1490 }
1491 
1492 static int at_dma_suspend_noirq(struct device *dev)
1493 {
1494  struct platform_device *pdev = to_platform_device(dev);
1495  struct at_dma *atdma = platform_get_drvdata(pdev);
1496  struct dma_chan *chan, *_chan;
1497 
1498  /* preserve data */
1499  list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1500  device_node) {
1501  struct at_dma_chan *atchan = to_at_dma_chan(chan);
1502 
1503  if (atc_chan_is_cyclic(atchan))
1504  atc_suspend_cyclic(atchan);
1505  atchan->save_cfg = channel_readl(atchan, CFG);
1506  }
1507  atdma->save_imr = dma_readl(atdma, EBCIMR);
1508 
1509  /* disable DMA controller */
1510  at_dma_off(atdma);
1511  clk_disable(atdma->clk);
1512  return 0;
1513 }
1514 
1515 static void atc_resume_cyclic(struct at_dma_chan *atchan)
1516 {
1517  struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1518 
1519  /* restore channel status for cyclic descriptors list:
1520  * next descriptor in the cyclic list at the time of suspend */
1521  channel_writel(atchan, SADDR, 0);
1522  channel_writel(atchan, DADDR, 0);
1523  channel_writel(atchan, CTRLA, 0);
1524  channel_writel(atchan, CTRLB, 0);
1525  channel_writel(atchan, DSCR, atchan->save_dscr);
1526  dma_writel(atdma, CHER, atchan->mask);
1527 
1528  /* channel pause status should be removed by channel user
1529  * We cannot take the initiative to do it here */
1530 
1531  vdbg_dump_regs(atchan);
1532 }
1533 
1534 static int at_dma_resume_noirq(struct device *dev)
1535 {
1536  struct platform_device *pdev = to_platform_device(dev);
1537  struct at_dma *atdma = platform_get_drvdata(pdev);
1538  struct dma_chan *chan, *_chan;
1539 
1540  /* bring back DMA controller */
1541  clk_enable(atdma->clk);
1542  dma_writel(atdma, EN, AT_DMA_ENABLE);
1543 
1544  /* clear any pending interrupt */
1545  while (dma_readl(atdma, EBCISR))
1546  cpu_relax();
1547 
1548  /* restore saved data */
1549  dma_writel(atdma, EBCIER, atdma->save_imr);
1550  list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1551  device_node) {
1552  struct at_dma_chan *atchan = to_at_dma_chan(chan);
1553 
1554  channel_writel(atchan, CFG, atchan->save_cfg);
1555  if (atc_chan_is_cyclic(atchan))
1556  atc_resume_cyclic(atchan);
1557  }
1558  return 0;
1559 }
1560 
1561 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1562  .prepare = at_dma_prepare,
1563  .suspend_noirq = at_dma_suspend_noirq,
1564  .resume_noirq = at_dma_resume_noirq,
1565 };
1566 
1567 static struct platform_driver at_dma_driver = {
1568  .remove = __exit_p(at_dma_remove),
1569  .shutdown = at_dma_shutdown,
1570  .id_table = atdma_devtypes,
1571  .driver = {
1572  .name = "at_hdmac",
1573  .pm = &at_dma_dev_pm_ops,
1574  .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1575  },
1576 };
1577 
1578 static int __init at_dma_init(void)
1579 {
1580  return platform_driver_probe(&at_dma_driver, at_dma_probe);
1581 }
1582 subsys_initcall(at_dma_init);
1583 
1584 static void __exit at_dma_exit(void)
1585 {
1586  platform_driver_unregister(&at_dma_driver);
1587 }
1588 module_exit(at_dma_exit);
1589 
1590 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1591 MODULE_AUTHOR("Nicolas Ferre <[email protected]>");
1592 MODULE_LICENSE("GPL");
1593 MODULE_ALIAS("platform:at_hdmac");