Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma.c
Go to the documentation of this file.
1 /* dma.c: DMA controller management on FR401 and the like
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells ([email protected])
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/errno.h>
16 #include <linux/init.h>
17 #include <asm/dma.h>
18 #include <asm/gpio-regs.h>
19 #include <asm/irc-regs.h>
20 #include <asm/cpu-irqs.h>
21 
24 #define FRV_DMA_FLAGS_RESERVED 0x01
25 #define FRV_DMA_FLAGS_INUSE 0x02
26 #define FRV_DMA_FLAGS_PAUSED 0x04
27  uint8_t cap; /* capabilities available */
28  int irq; /* completion IRQ */
32  const unsigned long ioaddr; /* DMA controller regs addr */
33  const char *devname;
35  void *data;
36 };
37 
38 
39 #define __get_DMAC(IO,X) ({ *(volatile unsigned long *)((IO) + DMAC_##X##x); })
40 
41 #define __set_DMAC(IO,X,V) \
42 do { \
43  *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
44  mb(); \
45 } while(0)
46 
47 #define ___set_DMAC(IO,X,V) \
48 do { \
49  *(volatile unsigned long *)((IO) + DMAC_##X##x) = (V); \
50 } while(0)
51 
52 
53 static struct frv_dma_channel frv_dma_channels[FRV_DMA_NCHANS] = {
54  [0] = {
56  .irq = IRQ_CPU_DMA0,
57  .dreqbit = SIR_DREQ0_INPUT,
58  .dackbit = SOR_DACK0_OUTPUT,
59  .donebit = SOR_DONE0_OUTPUT,
60  .ioaddr = 0xfe000900,
61  },
62  [1] = {
64  .irq = IRQ_CPU_DMA1,
65  .dreqbit = SIR_DREQ1_INPUT,
66  .dackbit = SOR_DACK1_OUTPUT,
67  .donebit = SOR_DONE1_OUTPUT,
68  .ioaddr = 0xfe000980,
69  },
70  [2] = {
72  .irq = IRQ_CPU_DMA2,
73  .dreqbit = SIR_DREQ2_INPUT,
74  .dackbit = SOR_DACK2_OUTPUT,
75  .ioaddr = 0xfe000a00,
76  },
77  [3] = {
79  .irq = IRQ_CPU_DMA3,
80  .dreqbit = SIR_DREQ3_INPUT,
81  .dackbit = SOR_DACK3_OUTPUT,
82  .ioaddr = 0xfe000a80,
83  },
84  [4] = {
85  .cap = FRV_DMA_CAP_DREQ,
86  .irq = IRQ_CPU_DMA4,
87  .dreqbit = SIR_DREQ4_INPUT,
88  .ioaddr = 0xfe001000,
89  },
90  [5] = {
91  .cap = FRV_DMA_CAP_DREQ,
92  .irq = IRQ_CPU_DMA5,
93  .dreqbit = SIR_DREQ5_INPUT,
94  .ioaddr = 0xfe001080,
95  },
96  [6] = {
97  .cap = FRV_DMA_CAP_DREQ,
98  .irq = IRQ_CPU_DMA6,
99  .dreqbit = SIR_DREQ6_INPUT,
100  .ioaddr = 0xfe001100,
101  },
102  [7] = {
103  .cap = FRV_DMA_CAP_DREQ,
104  .irq = IRQ_CPU_DMA7,
105  .dreqbit = SIR_DREQ7_INPUT,
106  .ioaddr = 0xfe001180,
107  },
108 };
109 
110 static DEFINE_RWLOCK(frv_dma_channels_lock);
111 
112 unsigned long frv_dma_inprogress;
113 
114 #define frv_clear_dma_inprogress(channel) \
115  atomic_clear_mask(1 << (channel), &frv_dma_inprogress);
116 
117 #define frv_set_dma_inprogress(channel) \
118  atomic_set_mask(1 << (channel), &frv_dma_inprogress);
119 
120 /*****************************************************************************/
121 /*
122  * DMA irq handler - determine channel involved, grab status and call real handler
123  */
124 static irqreturn_t dma_irq_handler(int irq, void *_channel)
125 {
126  struct frv_dma_channel *channel = _channel;
127 
128  frv_clear_dma_inprogress(channel - frv_dma_channels);
129  return channel->handler(channel - frv_dma_channels,
130  __get_DMAC(channel->ioaddr, CSTR),
131  channel->data);
132 
133 } /* end dma_irq_handler() */
134 
135 /*****************************************************************************/
136 /*
137  * Determine which DMA controllers are present on this CPU
138  */
140 {
141  unsigned long psr = __get_PSR();
142  int num_dma, i;
143 
144  /* First, determine how many DMA channels are available */
145  switch (PSR_IMPLE(psr)) {
146  case PSR_IMPLE_FR405:
147  case PSR_IMPLE_FR451:
148  case PSR_IMPLE_FR501:
149  case PSR_IMPLE_FR551:
150  num_dma = FRV_DMA_8CHANS;
151  break;
152 
153  case PSR_IMPLE_FR401:
154  default:
155  num_dma = FRV_DMA_4CHANS;
156  break;
157  }
158 
159  /* Now mark all of the non-existent channels as reserved */
160  for(i = num_dma; i < FRV_DMA_NCHANS; i++)
161  frv_dma_channels[i].flags = FRV_DMA_FLAGS_RESERVED;
162 
163 } /* end frv_dma_init() */
164 
165 /*****************************************************************************/
166 /*
167  * allocate a DMA controller channel and the IRQ associated with it
168  */
169 int frv_dma_open(const char *devname,
170  unsigned long dmamask,
171  int dmacap,
173  unsigned long irq_flags,
174  void *data)
175 {
176  struct frv_dma_channel *channel;
177  int dma, ret;
178  uint32_t val;
179 
180  write_lock(&frv_dma_channels_lock);
181 
182  ret = -ENOSPC;
183 
184  for (dma = FRV_DMA_NCHANS - 1; dma >= 0; dma--) {
185  channel = &frv_dma_channels[dma];
186 
187  if (!test_bit(dma, &dmamask))
188  continue;
189 
190  if ((channel->cap & dmacap) != dmacap)
191  continue;
192 
193  if (!frv_dma_channels[dma].flags)
194  goto found;
195  }
196 
197  goto out;
198 
199  found:
200  ret = request_irq(channel->irq, dma_irq_handler, irq_flags, devname, channel);
201  if (ret < 0)
202  goto out;
203 
204  /* okay, we've allocated all the resources */
205  channel = &frv_dma_channels[dma];
206 
207  channel->flags |= FRV_DMA_FLAGS_INUSE;
208  channel->devname = devname;
209  channel->handler = handler;
210  channel->data = data;
211 
212  /* Now make sure we are set up for DMA and not GPIO */
213  /* SIR bit must be set for DMA to work */
214  __set_SIR(channel->dreqbit | __get_SIR());
215  /* SOR bits depend on what the caller requests */
216  val = __get_SOR();
217  if(dmacap & FRV_DMA_CAP_DACK)
218  val |= channel->dackbit;
219  else
220  val &= ~channel->dackbit;
221  if(dmacap & FRV_DMA_CAP_DONE)
222  val |= channel->donebit;
223  else
224  val &= ~channel->donebit;
225  __set_SOR(val);
226 
227  ret = dma;
228  out:
229  write_unlock(&frv_dma_channels_lock);
230  return ret;
231 } /* end frv_dma_open() */
232 
234 
235 /*****************************************************************************/
236 /*
237  * close a DMA channel and its associated interrupt
238  */
240 {
241  struct frv_dma_channel *channel = &frv_dma_channels[dma];
242  unsigned long flags;
243 
244  write_lock_irqsave(&frv_dma_channels_lock, flags);
245 
246  free_irq(channel->irq, channel);
247  frv_dma_stop(dma);
248 
249  channel->flags &= ~FRV_DMA_FLAGS_INUSE;
250 
251  write_unlock_irqrestore(&frv_dma_channels_lock, flags);
252 } /* end frv_dma_close() */
253 
255 
256 /*****************************************************************************/
257 /*
258  * set static configuration on a DMA channel
259  */
260 void frv_dma_config(int dma, unsigned long ccfr, unsigned long cctr, unsigned long apr)
261 {
262  unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
263 
264  ___set_DMAC(ioaddr, CCFR, ccfr);
265  ___set_DMAC(ioaddr, CCTR, cctr);
266  ___set_DMAC(ioaddr, APR, apr);
267  mb();
268 
269 } /* end frv_dma_config() */
270 
272 
273 /*****************************************************************************/
274 /*
275  * start a DMA channel
276  */
278  unsigned long sba, unsigned long dba,
279  unsigned long pix, unsigned long six, unsigned long bcl)
280 {
281  unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
282 
283  ___set_DMAC(ioaddr, SBA, sba);
284  ___set_DMAC(ioaddr, DBA, dba);
285  ___set_DMAC(ioaddr, PIX, pix);
286  ___set_DMAC(ioaddr, SIX, six);
287  ___set_DMAC(ioaddr, BCL, bcl);
288  ___set_DMAC(ioaddr, CSTR, 0);
289  mb();
290 
291  __set_DMAC(ioaddr, CCTR, __get_DMAC(ioaddr, CCTR) | DMAC_CCTRx_ACT);
293 
294 } /* end frv_dma_start() */
295 
297 
298 /*****************************************************************************/
299 /*
300  * restart a DMA channel that's been stopped in circular addressing mode by comparison-end
301  */
302 void frv_dma_restart_circular(int dma, unsigned long six)
303 {
304  unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
305 
306  ___set_DMAC(ioaddr, SIX, six);
307  ___set_DMAC(ioaddr, CSTR, __get_DMAC(ioaddr, CSTR) & ~DMAC_CSTRx_CE);
308  mb();
309 
310  __set_DMAC(ioaddr, CCTR, __get_DMAC(ioaddr, CCTR) | DMAC_CCTRx_ACT);
312 
313 } /* end frv_dma_restart_circular() */
314 
316 
317 /*****************************************************************************/
318 /*
319  * stop a DMA channel
320  */
321 void frv_dma_stop(int dma)
322 {
323  unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
324  uint32_t cctr;
325 
326  ___set_DMAC(ioaddr, CSTR, 0);
327  cctr = __get_DMAC(ioaddr, CCTR);
328  cctr &= ~(DMAC_CCTRx_IE | DMAC_CCTRx_ACT);
329  cctr |= DMAC_CCTRx_FC; /* fifo clear */
330  __set_DMAC(ioaddr, CCTR, cctr);
331  __set_DMAC(ioaddr, BCL, 0);
333 } /* end frv_dma_stop() */
334 
336 
337 /*****************************************************************************/
338 /*
339  * test interrupt status of DMA channel
340  */
342 {
343  unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
344 
345  return __get_DMAC(ioaddr, CSTR) & (1 << 23);
346 
347 } /* end is_frv_dma_interrupting() */
348 
350 
351 /*****************************************************************************/
352 /*
353  * dump data about a DMA channel
354  */
355 void frv_dma_dump(int dma)
356 {
357  unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
358  unsigned long cstr, pix, six, bcl;
359 
360  cstr = __get_DMAC(ioaddr, CSTR);
361  pix = __get_DMAC(ioaddr, PIX);
362  six = __get_DMAC(ioaddr, SIX);
363  bcl = __get_DMAC(ioaddr, BCL);
364 
365  printk("DMA[%d] cstr=%lx pix=%lx six=%lx bcl=%lx\n", dma, cstr, pix, six, bcl);
366 
367 } /* end frv_dma_dump() */
368 
370 
371 /*****************************************************************************/
372 /*
373  * pause all DMA controllers
374  * - called by clock mangling routines
375  * - caller must be holding interrupts disabled
376  */
378 {
379  struct frv_dma_channel *channel;
380  unsigned long ioaddr;
381  unsigned long cstr, cctr;
382  int dma;
383 
384  write_lock(&frv_dma_channels_lock);
385 
386  for (dma = FRV_DMA_NCHANS - 1; dma >= 0; dma--) {
387  channel = &frv_dma_channels[dma];
388 
389  if (!(channel->flags & FRV_DMA_FLAGS_INUSE))
390  continue;
391 
392  ioaddr = channel->ioaddr;
393  cctr = __get_DMAC(ioaddr, CCTR);
394  if (cctr & DMAC_CCTRx_ACT) {
395  cctr &= ~DMAC_CCTRx_ACT;
396  __set_DMAC(ioaddr, CCTR, cctr);
397 
398  do {
399  cstr = __get_DMAC(ioaddr, CSTR);
400  } while (cstr & DMAC_CSTRx_BUSY);
401 
402  if (cstr & DMAC_CSTRx_FED)
403  channel->flags |= FRV_DMA_FLAGS_PAUSED;
405  }
406  }
407 
408 } /* end frv_dma_pause_all() */
409 
411 
412 /*****************************************************************************/
413 /*
414  * resume paused DMA controllers
415  * - called by clock mangling routines
416  * - caller must be holding interrupts disabled
417  */
419 {
420  struct frv_dma_channel *channel;
421  unsigned long ioaddr;
422  unsigned long cstr, cctr;
423  int dma;
424 
425  for (dma = FRV_DMA_NCHANS - 1; dma >= 0; dma--) {
426  channel = &frv_dma_channels[dma];
427 
428  if (!(channel->flags & FRV_DMA_FLAGS_PAUSED))
429  continue;
430 
431  ioaddr = channel->ioaddr;
432  cstr = __get_DMAC(ioaddr, CSTR);
433  cstr &= ~(DMAC_CSTRx_FED | DMAC_CSTRx_INT);
434  __set_DMAC(ioaddr, CSTR, cstr);
435 
436  cctr = __get_DMAC(ioaddr, CCTR);
437  cctr |= DMAC_CCTRx_ACT;
438  __set_DMAC(ioaddr, CCTR, cctr);
439 
440  channel->flags &= ~FRV_DMA_FLAGS_PAUSED;
442  }
443 
444  write_unlock(&frv_dma_channels_lock);
445 
446 } /* end frv_dma_resume_all() */
447 
449 
450 /*****************************************************************************/
451 /*
452  * dma status clear
453  */
455 {
456  unsigned long ioaddr = frv_dma_channels[dma].ioaddr;
457  uint32_t cctr;
458  ___set_DMAC(ioaddr, CSTR, 0);
459 
460  cctr = __get_DMAC(ioaddr, CCTR);
461 } /* end frv_dma_status_clear() */
462