Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
omap24xxcam-dma.c
Go to the documentation of this file.
1 /*
2  * drivers/media/platform/omap24xxcam-dma.c
3  *
4  * Copyright (C) 2004 MontaVista Software, Inc.
5  * Copyright (C) 2004 Texas Instruments.
6  * Copyright (C) 2007 Nokia Corporation.
7  *
8  * Contact: Sakari Ailus <[email protected]>
9  *
10  * Based on code from Andy Lowe <[email protected]> and
11  * David Cohen <[email protected]>.
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License
15  * version 2 as published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25  * 02110-1301 USA
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/io.h>
30 #include <linux/scatterlist.h>
31 
32 #include "omap24xxcam.h"
33 
34 /*
35  *
36  * DMA hardware.
37  *
38  */
39 
40 /* Ack all interrupt on CSR and IRQSTATUS_L0 */
41 static void omap24xxcam_dmahw_ack_all(void __iomem *base)
42 {
43  u32 csr;
44  int i;
45 
46  for (i = 0; i < NUM_CAMDMA_CHANNELS; ++i) {
47  csr = omap24xxcam_reg_in(base, CAMDMA_CSR(i));
48  /* ack interrupt in CSR */
49  omap24xxcam_reg_out(base, CAMDMA_CSR(i), csr);
50  }
51  omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, 0xf);
52 }
53 
54 /* Ack dmach on CSR and IRQSTATUS_L0 */
55 static u32 omap24xxcam_dmahw_ack_ch(void __iomem *base, int dmach)
56 {
57  u32 csr;
58 
59  csr = omap24xxcam_reg_in(base, CAMDMA_CSR(dmach));
60  /* ack interrupt in CSR */
61  omap24xxcam_reg_out(base, CAMDMA_CSR(dmach), csr);
62  /* ack interrupt in IRQSTATUS */
63  omap24xxcam_reg_out(base, CAMDMA_IRQSTATUS_L0, (1 << dmach));
64 
65  return csr;
66 }
67 
68 static int omap24xxcam_dmahw_running(void __iomem *base, int dmach)
69 {
70  return omap24xxcam_reg_in(base, CAMDMA_CCR(dmach)) & CAMDMA_CCR_ENABLE;
71 }
72 
73 static void omap24xxcam_dmahw_transfer_setup(void __iomem *base, int dmach,
74  dma_addr_t start, u32 len)
75 {
76  omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
85  omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(dmach), 0);
86  omap24xxcam_reg_out(base, CAMDMA_CEN(dmach), len);
87  omap24xxcam_reg_out(base, CAMDMA_CFN(dmach), 1);
88  omap24xxcam_reg_out(base, CAMDMA_CSDP(dmach),
95  omap24xxcam_reg_out(base, CAMDMA_CSSA(dmach), 0);
96  omap24xxcam_reg_out(base, CAMDMA_CDSA(dmach), start);
97  omap24xxcam_reg_out(base, CAMDMA_CSEI(dmach), 0);
98  omap24xxcam_reg_out(base, CAMDMA_CSFI(dmach), DMA_THRESHOLD);
99  omap24xxcam_reg_out(base, CAMDMA_CDEI(dmach), 0);
100  omap24xxcam_reg_out(base, CAMDMA_CDFI(dmach), 0);
101  omap24xxcam_reg_out(base, CAMDMA_CSR(dmach),
106  | CAMDMA_CSR_DROP);
107  omap24xxcam_reg_out(base, CAMDMA_CICR(dmach),
113 }
114 
115 static void omap24xxcam_dmahw_transfer_start(void __iomem *base, int dmach)
116 {
117  omap24xxcam_reg_out(base, CAMDMA_CCR(dmach),
119  | CAMDMA_CCR_BS
123  | CAMDMA_CCR_FS
125 }
126 
127 static void omap24xxcam_dmahw_transfer_chain(void __iomem *base, int dmach,
128  int free_dmach)
129 {
130  int prev_dmach, ch;
131 
132  if (dmach == 0)
133  prev_dmach = NUM_CAMDMA_CHANNELS - 1;
134  else
135  prev_dmach = dmach - 1;
136  omap24xxcam_reg_out(base, CAMDMA_CLNK_CTRL(prev_dmach),
138  /* Did we chain the DMA transfer before the previous one
139  * finished?
140  */
141  ch = (dmach + free_dmach) % NUM_CAMDMA_CHANNELS;
142  while (!(omap24xxcam_reg_in(base, CAMDMA_CCR(ch))
143  & CAMDMA_CCR_ENABLE)) {
144  if (ch == dmach) {
145  /* The previous transfer has ended and this one
146  * hasn't started, so we must not have chained
147  * to the previous one in time. We'll have to
148  * start it now.
149  */
150  omap24xxcam_dmahw_transfer_start(base, dmach);
151  break;
152  } else
153  ch = (ch + 1) % NUM_CAMDMA_CHANNELS;
154  }
155 }
156 
157 /* Abort all chained DMA transfers. After all transfers have been
158  * aborted and the DMA controller is idle, the completion routines for
159  * any aborted transfers will be called in sequence. The DMA
160  * controller may not be idle after this routine completes, because
161  * the completion routines might start new transfers.
162  */
163 static void omap24xxcam_dmahw_abort_ch(void __iomem *base, int dmach)
164 {
165  /* mask all interrupts from this channel */
166  omap24xxcam_reg_out(base, CAMDMA_CICR(dmach), 0);
167  /* unlink this channel */
168  omap24xxcam_reg_merge(base, CAMDMA_CLNK_CTRL(dmach), 0,
170  /* disable this channel */
171  omap24xxcam_reg_merge(base, CAMDMA_CCR(dmach), 0, CAMDMA_CCR_ENABLE);
172 }
173 
174 static void omap24xxcam_dmahw_init(void __iomem *base)
175 {
176  omap24xxcam_reg_out(base, CAMDMA_OCP_SYSCONFIG,
180 
181  omap24xxcam_reg_merge(base, CAMDMA_GCR, 0x10,
183 
184  omap24xxcam_reg_out(base, CAMDMA_IRQENABLE_L0, 0xf);
185 }
186 
187 /*
188  *
189  * Individual DMA channel handling.
190  *
191  */
192 
193 /* Start a DMA transfer from the camera to memory.
194  * Returns zero if the transfer was successfully started, or non-zero if all
195  * DMA channels are already in use or starting is currently inhibited.
196  */
197 static int omap24xxcam_dma_start(struct omap24xxcam_dma *dma, dma_addr_t start,
198  u32 len, dma_callback_t callback, void *arg)
199 {
200  unsigned long flags;
201  int dmach;
202 
203  spin_lock_irqsave(&dma->lock, flags);
204 
205  if (!dma->free_dmach || atomic_read(&dma->dma_stop)) {
206  spin_unlock_irqrestore(&dma->lock, flags);
207  return -EBUSY;
208  }
209 
210  dmach = dma->next_dmach;
211 
212  dma->ch_state[dmach].callback = callback;
213  dma->ch_state[dmach].arg = arg;
214 
215  omap24xxcam_dmahw_transfer_setup(dma->base, dmach, start, len);
216 
217  /* We're ready to start the DMA transfer. */
218 
219  if (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
220  /* A transfer is already in progress, so try to chain to it. */
221  omap24xxcam_dmahw_transfer_chain(dma->base, dmach,
222  dma->free_dmach);
223  } else {
224  /* No transfer is in progress, so we'll just start this one
225  * now.
226  */
227  omap24xxcam_dmahw_transfer_start(dma->base, dmach);
228  }
229 
230  dma->next_dmach = (dma->next_dmach + 1) % NUM_CAMDMA_CHANNELS;
231  dma->free_dmach--;
232 
233  spin_unlock_irqrestore(&dma->lock, flags);
234 
235  return 0;
236 }
237 
238 /* Abort all chained DMA transfers. After all transfers have been
239  * aborted and the DMA controller is idle, the completion routines for
240  * any aborted transfers will be called in sequence. The DMA
241  * controller may not be idle after this routine completes, because
242  * the completion routines might start new transfers.
243  */
244 static void omap24xxcam_dma_abort(struct omap24xxcam_dma *dma, u32 csr)
245 {
246  unsigned long flags;
247  int dmach, i, free_dmach;
249  void *arg;
250 
251  spin_lock_irqsave(&dma->lock, flags);
252 
253  /* stop any DMA transfers in progress */
254  dmach = (dma->next_dmach + dma->free_dmach) % NUM_CAMDMA_CHANNELS;
255  for (i = 0; i < NUM_CAMDMA_CHANNELS; i++) {
256  omap24xxcam_dmahw_abort_ch(dma->base, dmach);
257  dmach = (dmach + 1) % NUM_CAMDMA_CHANNELS;
258  }
259 
260  /* We have to be careful here because the callback routine
261  * might start a new DMA transfer, and we only want to abort
262  * transfers that were started before this routine was called.
263  */
264  free_dmach = dma->free_dmach;
265  while ((dma->free_dmach < NUM_CAMDMA_CHANNELS) &&
266  (free_dmach < NUM_CAMDMA_CHANNELS)) {
267  dmach = (dma->next_dmach + dma->free_dmach)
268  % NUM_CAMDMA_CHANNELS;
269  callback = dma->ch_state[dmach].callback;
270  arg = dma->ch_state[dmach].arg;
271  dma->free_dmach++;
272  free_dmach++;
273  if (callback) {
274  /* leave interrupts disabled during callback */
275  spin_unlock(&dma->lock);
276  (*callback) (dma, csr, arg);
277  spin_lock(&dma->lock);
278  }
279  }
280 
281  spin_unlock_irqrestore(&dma->lock, flags);
282 }
283 
284 /* Abort all chained DMA transfers. After all transfers have been
285  * aborted and the DMA controller is idle, the completion routines for
286  * any aborted transfers will be called in sequence. If the completion
287  * routines attempt to start a new DMA transfer it will fail, so the
288  * DMA controller will be idle after this routine completes.
289  */
290 static void omap24xxcam_dma_stop(struct omap24xxcam_dma *dma, u32 csr)
291 {
292  atomic_inc(&dma->dma_stop);
293  omap24xxcam_dma_abort(dma, csr);
294  atomic_dec(&dma->dma_stop);
295 }
296 
297 /* Camera DMA interrupt service routine. */
299 {
300  int dmach;
302  void *arg;
303  u32 csr;
304  const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
307 
308  spin_lock(&dma->lock);
309 
310  if (dma->free_dmach == NUM_CAMDMA_CHANNELS) {
311  /* A camera DMA interrupt occurred while all channels
312  * are idle, so we'll acknowledge the interrupt in the
313  * IRQSTATUS register and exit.
314  */
315  omap24xxcam_dmahw_ack_all(dma->base);
316  spin_unlock(&dma->lock);
317  return;
318  }
319 
320  while (dma->free_dmach < NUM_CAMDMA_CHANNELS) {
321  dmach = (dma->next_dmach + dma->free_dmach)
322  % NUM_CAMDMA_CHANNELS;
323  if (omap24xxcam_dmahw_running(dma->base, dmach)) {
324  /* This buffer hasn't finished yet, so we're done. */
325  break;
326  }
327  csr = omap24xxcam_dmahw_ack_ch(dma->base, dmach);
328  if (csr & csr_error) {
329  /* A DMA error occurred, so stop all DMA
330  * transfers in progress.
331  */
332  spin_unlock(&dma->lock);
333  omap24xxcam_dma_stop(dma, csr);
334  return;
335  } else {
336  callback = dma->ch_state[dmach].callback;
337  arg = dma->ch_state[dmach].arg;
338  dma->free_dmach++;
339  if (callback) {
340  spin_unlock(&dma->lock);
341  (*callback) (dma, csr, arg);
342  spin_lock(&dma->lock);
343  }
344  }
345  }
346 
347  spin_unlock(&dma->lock);
348 
350  container_of(dma, struct omap24xxcam_sgdma, dma));
351 }
352 
354 {
355  unsigned long flags;
356 
357  spin_lock_irqsave(&dma->lock, flags);
358 
359  omap24xxcam_dmahw_init(dma->base);
360 
361  spin_unlock_irqrestore(&dma->lock, flags);
362 }
363 
364 static void omap24xxcam_dma_init(struct omap24xxcam_dma *dma,
365  void __iomem *base)
366 {
367  int ch;
368 
369  /* group all channels on DMA IRQ0 and unmask irq */
370  spin_lock_init(&dma->lock);
371  dma->base = base;
373  dma->next_dmach = 0;
374  for (ch = 0; ch < NUM_CAMDMA_CHANNELS; ch++) {
375  dma->ch_state[ch].callback = NULL;
376  dma->ch_state[ch].arg = NULL;
377  }
378 }
379 
380 /*
381  *
382  * Scatter-gather DMA.
383  *
384  * High-level DMA construct for transferring whole picture frames to
385  * memory that is discontinuous.
386  *
387  */
388 
389 /* DMA completion routine for the scatter-gather DMA fragments. */
390 static void omap24xxcam_sgdma_callback(struct omap24xxcam_dma *dma, u32 csr,
391  void *arg)
392 {
393  struct omap24xxcam_sgdma *sgdma =
394  container_of(dma, struct omap24xxcam_sgdma, dma);
395  int sgslot = (int)arg;
396  struct sgdma_state *sg_state;
397  const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
400 
401  spin_lock(&sgdma->lock);
402 
403  /* We got an interrupt, we can remove the timer */
404  del_timer(&sgdma->reset_timer);
405 
406  sg_state = sgdma->sg_state + sgslot;
407  if (!sg_state->queued_sglist) {
408  spin_unlock(&sgdma->lock);
409  printk(KERN_ERR "%s: sgdma completed when none queued!\n",
410  __func__);
411  return;
412  }
413 
414  sg_state->csr |= csr;
415  if (!--sg_state->queued_sglist) {
416  /* Queue for this sglist is empty, so check to see if we're
417  * done.
418  */
419  if ((sg_state->next_sglist == sg_state->sglen)
420  || (sg_state->csr & csr_error)) {
421  sgdma_callback_t callback = sg_state->callback;
422  void *arg = sg_state->arg;
423  u32 sg_csr = sg_state->csr;
424  /* All done with this sglist */
425  sgdma->free_sgdma++;
426  if (callback) {
427  spin_unlock(&sgdma->lock);
428  (*callback) (sgdma, sg_csr, arg);
429  return;
430  }
431  }
432  }
433 
434  spin_unlock(&sgdma->lock);
435 }
436 
437 /* Start queued scatter-gather DMA transfers. */
439 {
440  unsigned long flags;
441  int queued_sgdma, sgslot;
442  struct sgdma_state *sg_state;
443  const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
446 
447  spin_lock_irqsave(&sgdma->lock, flags);
448 
449  queued_sgdma = NUM_SG_DMA - sgdma->free_sgdma;
450  sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
451  while (queued_sgdma > 0) {
452  sg_state = sgdma->sg_state + sgslot;
453  while ((sg_state->next_sglist < sg_state->sglen) &&
454  !(sg_state->csr & csr_error)) {
455  const struct scatterlist *sglist;
456  unsigned int len;
457 
458  sglist = sg_state->sglist + sg_state->next_sglist;
459  /* try to start the next DMA transfer */
460  if (sg_state->next_sglist + 1 == sg_state->sglen) {
461  /*
462  * On the last sg, we handle the case where
463  * cam->img.pix.sizeimage % PAGE_ALIGN != 0
464  */
465  len = sg_state->len - sg_state->bytes_read;
466  } else {
467  len = sg_dma_len(sglist);
468  }
469 
470  if (omap24xxcam_dma_start(&sgdma->dma,
471  sg_dma_address(sglist),
472  len,
473  omap24xxcam_sgdma_callback,
474  (void *)sgslot)) {
475  /* DMA start failed */
476  spin_unlock_irqrestore(&sgdma->lock, flags);
477  return;
478  } else {
479  unsigned long expires;
480  /* DMA start was successful */
481  sg_state->next_sglist++;
482  sg_state->bytes_read += len;
483  sg_state->queued_sglist++;
484 
485  /* We start the reset timer */
486  expires = jiffies + HZ;
487  mod_timer(&sgdma->reset_timer, expires);
488  }
489  }
490  queued_sgdma--;
491  sgslot = (sgslot + 1) % NUM_SG_DMA;
492  }
493 
494  spin_unlock_irqrestore(&sgdma->lock, flags);
495 }
496 
497 /*
498  * Queue a scatter-gather DMA transfer from the camera to memory.
499  * Returns zero if the transfer was successfully queued, or non-zero
500  * if all of the scatter-gather slots are already in use.
501  */
503  const struct scatterlist *sglist, int sglen,
504  int len, sgdma_callback_t callback, void *arg)
505 {
506  unsigned long flags;
507  struct sgdma_state *sg_state;
508 
509  if ((sglen < 0) || ((sglen > 0) && !sglist))
510  return -EINVAL;
511 
512  spin_lock_irqsave(&sgdma->lock, flags);
513 
514  if (!sgdma->free_sgdma) {
515  spin_unlock_irqrestore(&sgdma->lock, flags);
516  return -EBUSY;
517  }
518 
519  sg_state = sgdma->sg_state + sgdma->next_sgdma;
520 
521  sg_state->sglist = sglist;
522  sg_state->sglen = sglen;
523  sg_state->next_sglist = 0;
524  sg_state->bytes_read = 0;
525  sg_state->len = len;
526  sg_state->queued_sglist = 0;
527  sg_state->csr = 0;
528  sg_state->callback = callback;
529  sg_state->arg = arg;
530 
531  sgdma->next_sgdma = (sgdma->next_sgdma + 1) % NUM_SG_DMA;
532  sgdma->free_sgdma--;
533 
534  spin_unlock_irqrestore(&sgdma->lock, flags);
535 
537 
538  return 0;
539 }
540 
541 /* Sync scatter-gather DMA by aborting any DMA transfers currently in progress.
542  * Any queued scatter-gather DMA transactions that have not yet been started
543  * will remain queued. The DMA controller will be idle after this routine
544  * completes. When the scatter-gather queue is restarted, the next
545  * scatter-gather DMA transfer will begin at the start of a new transaction.
546  */
548 {
549  unsigned long flags;
550  int sgslot;
551  struct sgdma_state *sg_state;
553 
554  /* stop any DMA transfers in progress */
555  omap24xxcam_dma_stop(&sgdma->dma, csr);
556 
557  spin_lock_irqsave(&sgdma->lock, flags);
558 
559  if (sgdma->free_sgdma < NUM_SG_DMA) {
560  sgslot = (sgdma->next_sgdma + sgdma->free_sgdma) % NUM_SG_DMA;
561  sg_state = sgdma->sg_state + sgslot;
562  if (sg_state->next_sglist != 0) {
563  /* This DMA transfer was in progress, so abort it. */
564  sgdma_callback_t callback = sg_state->callback;
565  void *arg = sg_state->arg;
566  sgdma->free_sgdma++;
567  if (callback) {
568  /* leave interrupts masked */
569  spin_unlock(&sgdma->lock);
570  (*callback) (sgdma, csr, arg);
571  spin_lock(&sgdma->lock);
572  }
573  }
574  }
575 
576  spin_unlock_irqrestore(&sgdma->lock, flags);
577 }
578 
580  void __iomem *base,
581  void (*reset_callback)(unsigned long data),
582  unsigned long reset_callback_data)
583 {
584  int sg;
585 
586  spin_lock_init(&sgdma->lock);
587  sgdma->free_sgdma = NUM_SG_DMA;
588  sgdma->next_sgdma = 0;
589  for (sg = 0; sg < NUM_SG_DMA; sg++) {
590  sgdma->sg_state[sg].sglen = 0;
591  sgdma->sg_state[sg].next_sglist = 0;
592  sgdma->sg_state[sg].bytes_read = 0;
593  sgdma->sg_state[sg].queued_sglist = 0;
594  sgdma->sg_state[sg].csr = 0;
595  sgdma->sg_state[sg].callback = NULL;
596  sgdma->sg_state[sg].arg = NULL;
597  }
598 
599  omap24xxcam_dma_init(&sgdma->dma, base);
600  setup_timer(&sgdma->reset_timer, reset_callback, reset_callback_data);
601 }