Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
tsi721_dma.c
Go to the documentation of this file.
1 /*
2  * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
3  *
4  * Copyright 2011 Integrated Device Technology, Inc.
5  * Alexandre Bounine <[email protected]>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the Free
9  * Software Foundation; either version 2 of the License, or (at your option)
10  * any later version.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc., 59
19  * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21 
22 #include <linux/io.h>
23 #include <linux/errno.h>
24 #include <linux/init.h>
25 #include <linux/ioport.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/rio.h>
30 #include <linux/rio_drv.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/interrupt.h>
33 #include <linux/kfifo.h>
34 #include <linux/delay.h>
35 
36 #include "tsi721.h"
37 
38 static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
39 {
40  return container_of(chan, struct tsi721_bdma_chan, dchan);
41 }
42 
43 static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
44 {
45  return container_of(ddev, struct rio_mport, dma)->priv;
46 }
47 
48 static inline
49 struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
50 {
51  return container_of(txd, struct tsi721_tx_desc, txd);
52 }
53 
54 static inline
55 struct tsi721_tx_desc *tsi721_dma_first_active(
56  struct tsi721_bdma_chan *bdma_chan)
57 {
58  return list_first_entry(&bdma_chan->active_list,
59  struct tsi721_tx_desc, desc_node);
60 }
61 
62 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
63 {
64  struct tsi721_dma_desc *bd_ptr;
65  struct device *dev = bdma_chan->dchan.device->dev;
66  u64 *sts_ptr;
67  dma_addr_t bd_phys;
68  dma_addr_t sts_phys;
69  int sts_size;
70  int bd_num = bdma_chan->bd_num;
71 
72  dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
73 
74  /* Allocate space for DMA descriptors */
75  bd_ptr = dma_zalloc_coherent(dev,
76  bd_num * sizeof(struct tsi721_dma_desc),
77  &bd_phys, GFP_KERNEL);
78  if (!bd_ptr)
79  return -ENOMEM;
80 
81  bdma_chan->bd_phys = bd_phys;
82  bdma_chan->bd_base = bd_ptr;
83 
84  dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
85  bd_ptr, (unsigned long long)bd_phys);
86 
87  /* Allocate space for descriptor status FIFO */
88  sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
89  bd_num : TSI721_DMA_MINSTSSZ;
90  sts_size = roundup_pow_of_two(sts_size);
91  sts_ptr = dma_zalloc_coherent(dev,
92  sts_size * sizeof(struct tsi721_dma_sts),
93  &sts_phys, GFP_KERNEL);
94  if (!sts_ptr) {
95  /* Free space allocated for DMA descriptors */
97  bd_num * sizeof(struct tsi721_dma_desc),
98  bd_ptr, bd_phys);
99  bdma_chan->bd_base = NULL;
100  return -ENOMEM;
101  }
102 
103  bdma_chan->sts_phys = sts_phys;
104  bdma_chan->sts_base = sts_ptr;
105  bdma_chan->sts_size = sts_size;
106 
107  dev_dbg(dev,
108  "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
109  sts_ptr, (unsigned long long)sts_phys, sts_size);
110 
111  /* Initialize DMA descriptors ring */
112  bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
113  bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
115  bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
116 
117  /* Setup DMA descriptor pointers */
118  iowrite32(((u64)bd_phys >> 32),
119  bdma_chan->regs + TSI721_DMAC_DPTRH);
120  iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
121  bdma_chan->regs + TSI721_DMAC_DPTRL);
122 
123  /* Setup descriptor status FIFO */
124  iowrite32(((u64)sts_phys >> 32),
125  bdma_chan->regs + TSI721_DMAC_DSBH);
126  iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
127  bdma_chan->regs + TSI721_DMAC_DSBL);
129  bdma_chan->regs + TSI721_DMAC_DSSZ);
130 
131  /* Clear interrupt bits */
133  bdma_chan->regs + TSI721_DMAC_INT);
134 
135  ioread32(bdma_chan->regs + TSI721_DMAC_INT);
136 
137  /* Toggle DMA channel initialization */
138  iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
139  ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
140  bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
141  bdma_chan->sts_rdptr = 0;
142  udelay(10);
143 
144  return 0;
145 }
146 
147 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
148 {
149  u32 ch_stat;
150 
151  if (bdma_chan->bd_base == NULL)
152  return 0;
153 
154  /* Check if DMA channel still running */
155  ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
156  if (ch_stat & TSI721_DMAC_STS_RUN)
157  return -EFAULT;
158 
159  /* Put DMA channel into init state */
160  iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
161 
162  /* Free space allocated for DMA descriptors */
163  dma_free_coherent(bdma_chan->dchan.device->dev,
164  bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
165  bdma_chan->bd_base, bdma_chan->bd_phys);
166  bdma_chan->bd_base = NULL;
167 
168  /* Free space allocated for status FIFO */
169  dma_free_coherent(bdma_chan->dchan.device->dev,
170  bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
171  bdma_chan->sts_base, bdma_chan->sts_phys);
172  bdma_chan->sts_base = NULL;
173  return 0;
174 }
175 
176 static void
177 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
178 {
179  if (enable) {
180  /* Clear pending BDMA channel interrupts */
182  bdma_chan->regs + TSI721_DMAC_INT);
183  ioread32(bdma_chan->regs + TSI721_DMAC_INT);
184  /* Enable BDMA channel interrupts */
186  bdma_chan->regs + TSI721_DMAC_INTE);
187  } else {
188  /* Disable BDMA channel interrupts */
189  iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
190  /* Clear pending BDMA channel interrupts */
192  bdma_chan->regs + TSI721_DMAC_INT);
193  }
194 
195 }
196 
197 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
198 {
199  u32 sts;
200 
201  sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
202  return ((sts & TSI721_DMAC_STS_RUN) == 0);
203 }
204 
205 void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
206 {
207  /* Disable BDMA channel interrupts */
208  iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
209 
210  tasklet_schedule(&bdma_chan->tasklet);
211 }
212 
213 #ifdef CONFIG_PCI_MSI
214 
221 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
222 {
223  struct tsi721_bdma_chan *bdma_chan = ptr;
224 
225  tsi721_bdma_handler(bdma_chan);
226  return IRQ_HANDLED;
227 }
228 #endif /* CONFIG_PCI_MSI */
229 
230 /* Must be called with the spinlock held */
231 static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
232 {
233  if (!tsi721_dma_is_idle(bdma_chan)) {
234  dev_err(bdma_chan->dchan.device->dev,
235  "BUG: Attempt to start non-idle channel\n");
236  return;
237  }
238 
239  if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
240  dev_err(bdma_chan->dchan.device->dev,
241  "BUG: Attempt to start DMA with no BDs ready\n");
242  return;
243  }
244 
245  dev_dbg(bdma_chan->dchan.device->dev,
246  "tx_chan: %p, chan: %d, regs: %p\n",
247  bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
248 
249  iowrite32(bdma_chan->wr_count_next,
250  bdma_chan->regs + TSI721_DMAC_DWRCNT);
251  ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
252 
253  bdma_chan->wr_count = bdma_chan->wr_count_next;
254 }
255 
256 static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
257  struct tsi721_tx_desc *desc)
258 {
259  dev_dbg(bdma_chan->dchan.device->dev,
260  "Put desc: %p into free list\n", desc);
261 
262  if (desc) {
263  spin_lock_bh(&bdma_chan->lock);
264  list_splice_init(&desc->tx_list, &bdma_chan->free_list);
265  list_add(&desc->desc_node, &bdma_chan->free_list);
266  bdma_chan->wr_count_next = bdma_chan->wr_count;
267  spin_unlock_bh(&bdma_chan->lock);
268  }
269 }
270 
271 static
272 struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
273 {
274  struct tsi721_tx_desc *tx_desc, *_tx_desc;
275  struct tsi721_tx_desc *ret = NULL;
276  int i;
277 
278  spin_lock_bh(&bdma_chan->lock);
279  list_for_each_entry_safe(tx_desc, _tx_desc,
280  &bdma_chan->free_list, desc_node) {
281  if (async_tx_test_ack(&tx_desc->txd)) {
282  list_del(&tx_desc->desc_node);
283  ret = tx_desc;
284  break;
285  }
286  dev_dbg(bdma_chan->dchan.device->dev,
287  "desc %p not ACKed\n", tx_desc);
288  }
289 
290  i = bdma_chan->wr_count_next % bdma_chan->bd_num;
291  if (i == bdma_chan->bd_num - 1) {
292  i = 0;
293  bdma_chan->wr_count_next++; /* skip link descriptor */
294  }
295 
296  bdma_chan->wr_count_next++;
297  tx_desc->txd.phys = bdma_chan->bd_phys +
298  i * sizeof(struct tsi721_dma_desc);
299  tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
300 
301  spin_unlock_bh(&bdma_chan->lock);
302 
303  return ret;
304 }
305 
306 static int
307 tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
308  struct tsi721_tx_desc *desc, struct scatterlist *sg,
309  enum dma_rtype rtype, u32 sys_size)
310 {
311  struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
312  u64 rio_addr;
313 
314  if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
315  dev_err(bdma_chan->dchan.device->dev,
316  "SG element is too large\n");
317  return -EINVAL;
318  }
319 
320  dev_dbg(bdma_chan->dchan.device->dev,
321  "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
322  (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
323  sg_dma_len(sg));
324 
325  dev_dbg(bdma_chan->dchan.device->dev,
326  "bd_ptr = %p did=%d raddr=0x%llx\n",
327  bd_ptr, desc->destid, desc->rio_addr);
328 
329  /* Initialize DMA descriptor */
330  bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
331  (rtype << 19) | desc->destid);
332  if (desc->interrupt)
333  bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
334  bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
335  (sys_size << 26) | sg_dma_len(sg));
336  rio_addr = (desc->rio_addr >> 2) |
337  ((u64)(desc->rio_addr_u & 0x3) << 62);
338  bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
339  bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
340  bd_ptr->t1.bufptr_lo = cpu_to_le32(
341  (u64)sg_dma_address(sg) & 0xffffffff);
342  bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
343  bd_ptr->t1.s_dist = 0;
344  bd_ptr->t1.s_size = 0;
345 
346  return 0;
347 }
348 
349 static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
350  struct tsi721_tx_desc *desc)
351 {
352  struct dma_async_tx_descriptor *txd = &desc->txd;
354  void *param = txd->callback_param;
355 
356  list_splice_init(&desc->tx_list, &bdma_chan->free_list);
357  list_move(&desc->desc_node, &bdma_chan->free_list);
358  bdma_chan->completed_cookie = txd->cookie;
359 
360  if (callback)
361  callback(param);
362 }
363 
364 static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
365 {
366  struct tsi721_tx_desc *desc, *_d;
367  LIST_HEAD(list);
368 
369  BUG_ON(!tsi721_dma_is_idle(bdma_chan));
370 
371  if (!list_empty(&bdma_chan->queue))
372  tsi721_start_dma(bdma_chan);
373 
374  list_splice_init(&bdma_chan->active_list, &list);
375  list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
376 
377  list_for_each_entry_safe(desc, _d, &list, desc_node)
378  tsi721_dma_chain_complete(bdma_chan, desc);
379 }
380 
381 static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
382 {
383  u32 srd_ptr;
384  u64 *sts_ptr;
385  int i, j;
386 
387  /* Check and clear descriptor status FIFO entries */
388  srd_ptr = bdma_chan->sts_rdptr;
389  sts_ptr = bdma_chan->sts_base;
390  j = srd_ptr * 8;
391  while (sts_ptr[j]) {
392  for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
393  sts_ptr[j] = 0;
394 
395  ++srd_ptr;
396  srd_ptr %= bdma_chan->sts_size;
397  j = srd_ptr * 8;
398  }
399 
400  iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
401  bdma_chan->sts_rdptr = srd_ptr;
402 }
403 
404 static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
405 {
406  if (list_empty(&bdma_chan->active_list) ||
407  list_is_singular(&bdma_chan->active_list)) {
408  dev_dbg(bdma_chan->dchan.device->dev,
409  "%s: Active_list empty\n", __func__);
410  tsi721_dma_complete_all(bdma_chan);
411  } else {
412  dev_dbg(bdma_chan->dchan.device->dev,
413  "%s: Active_list NOT empty\n", __func__);
414  tsi721_dma_chain_complete(bdma_chan,
415  tsi721_dma_first_active(bdma_chan));
416  tsi721_start_dma(bdma_chan);
417  }
418 }
419 
420 static void tsi721_dma_tasklet(unsigned long data)
421 {
422  struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
423  u32 dmac_int, dmac_sts;
424 
425  dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
426  dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
427  __func__, bdma_chan->id, dmac_int);
428  /* Clear channel interrupts */
429  iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
430 
431  if (dmac_int & TSI721_DMAC_INT_ERR) {
432  dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
433  dev_err(bdma_chan->dchan.device->dev,
434  "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
435  __func__, bdma_chan->id, dmac_sts);
436  }
437 
438  if (dmac_int & TSI721_DMAC_INT_STFULL) {
439  dev_err(bdma_chan->dchan.device->dev,
440  "%s: DMAC%d descriptor status FIFO is full\n",
441  __func__, bdma_chan->id);
442  }
443 
444  if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
445  tsi721_clr_stat(bdma_chan);
446  spin_lock(&bdma_chan->lock);
447  tsi721_advance_work(bdma_chan);
448  spin_unlock(&bdma_chan->lock);
449  }
450 
451  /* Re-Enable BDMA channel interrupts */
452  iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
453 }
454 
455 static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
456 {
457  struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
458  struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
460 
461  spin_lock_bh(&bdma_chan->lock);
462 
463  cookie = txd->chan->cookie;
464  if (++cookie < 0)
465  cookie = 1;
466  txd->chan->cookie = cookie;
467  txd->cookie = cookie;
468 
469  if (list_empty(&bdma_chan->active_list)) {
470  list_add_tail(&desc->desc_node, &bdma_chan->active_list);
471  tsi721_start_dma(bdma_chan);
472  } else {
473  list_add_tail(&desc->desc_node, &bdma_chan->queue);
474  }
475 
476  spin_unlock_bh(&bdma_chan->lock);
477  return cookie;
478 }
479 
480 static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
481 {
482  struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
483 #ifdef CONFIG_PCI_MSI
484  struct tsi721_device *priv = to_tsi721(dchan->device);
485 #endif
486  struct tsi721_tx_desc *desc = NULL;
487  LIST_HEAD(tmp_list);
488  int i;
489  int rc;
490 
491  if (bdma_chan->bd_base)
492  return bdma_chan->bd_num - 1;
493 
494  /* Initialize BDMA channel */
495  if (tsi721_bdma_ch_init(bdma_chan)) {
496  dev_err(dchan->device->dev, "Unable to initialize data DMA"
497  " channel %d, aborting\n", bdma_chan->id);
498  return -ENOMEM;
499  }
500 
501  /* Alocate matching number of logical descriptors */
502  desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
503  GFP_KERNEL);
504  if (!desc) {
505  dev_err(dchan->device->dev,
506  "Failed to allocate logical descriptors\n");
507  rc = -ENOMEM;
508  goto err_out;
509  }
510 
511  bdma_chan->tx_desc = desc;
512 
513  for (i = 0; i < bdma_chan->bd_num - 1; i++) {
514  dma_async_tx_descriptor_init(&desc[i].txd, dchan);
515  desc[i].txd.tx_submit = tsi721_tx_submit;
516  desc[i].txd.flags = DMA_CTRL_ACK;
517  INIT_LIST_HEAD(&desc[i].tx_list);
518  list_add_tail(&desc[i].desc_node, &tmp_list);
519  }
520 
521  spin_lock_bh(&bdma_chan->lock);
522  list_splice(&tmp_list, &bdma_chan->free_list);
523  bdma_chan->completed_cookie = dchan->cookie = 1;
524  spin_unlock_bh(&bdma_chan->lock);
525 
526 #ifdef CONFIG_PCI_MSI
527  if (priv->flags & TSI721_USING_MSIX) {
528  /* Request interrupt service if we are in MSI-X mode */
529  rc = request_irq(
530  priv->msix[TSI721_VECT_DMA0_DONE +
531  bdma_chan->id].vector,
532  tsi721_bdma_msix, 0,
533  priv->msix[TSI721_VECT_DMA0_DONE +
534  bdma_chan->id].irq_name,
535  (void *)bdma_chan);
536 
537  if (rc) {
538  dev_dbg(dchan->device->dev,
539  "Unable to allocate MSI-X interrupt for "
540  "BDMA%d-DONE\n", bdma_chan->id);
541  goto err_out;
542  }
543 
544  rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
545  bdma_chan->id].vector,
546  tsi721_bdma_msix, 0,
547  priv->msix[TSI721_VECT_DMA0_INT +
548  bdma_chan->id].irq_name,
549  (void *)bdma_chan);
550 
551  if (rc) {
552  dev_dbg(dchan->device->dev,
553  "Unable to allocate MSI-X interrupt for "
554  "BDMA%d-INT\n", bdma_chan->id);
555  free_irq(
556  priv->msix[TSI721_VECT_DMA0_DONE +
557  bdma_chan->id].vector,
558  (void *)bdma_chan);
559  rc = -EIO;
560  goto err_out;
561  }
562  }
563 #endif /* CONFIG_PCI_MSI */
564 
565  tasklet_enable(&bdma_chan->tasklet);
566  tsi721_bdma_interrupt_enable(bdma_chan, 1);
567 
568  return bdma_chan->bd_num - 1;
569 
570 err_out:
571  kfree(desc);
572  tsi721_bdma_ch_free(bdma_chan);
573  return rc;
574 }
575 
576 static void tsi721_free_chan_resources(struct dma_chan *dchan)
577 {
578  struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
579 #ifdef CONFIG_PCI_MSI
580  struct tsi721_device *priv = to_tsi721(dchan->device);
581 #endif
582  LIST_HEAD(list);
583 
584  dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
585 
586  if (bdma_chan->bd_base == NULL)
587  return;
588 
589  BUG_ON(!list_empty(&bdma_chan->active_list));
590  BUG_ON(!list_empty(&bdma_chan->queue));
591 
592  tasklet_disable(&bdma_chan->tasklet);
593 
594  spin_lock_bh(&bdma_chan->lock);
595  list_splice_init(&bdma_chan->free_list, &list);
596  spin_unlock_bh(&bdma_chan->lock);
597 
598  tsi721_bdma_interrupt_enable(bdma_chan, 0);
599 
600 #ifdef CONFIG_PCI_MSI
601  if (priv->flags & TSI721_USING_MSIX) {
602  free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
603  bdma_chan->id].vector, (void *)bdma_chan);
604  free_irq(priv->msix[TSI721_VECT_DMA0_INT +
605  bdma_chan->id].vector, (void *)bdma_chan);
606  }
607 #endif /* CONFIG_PCI_MSI */
608 
609  tsi721_bdma_ch_free(bdma_chan);
610  kfree(bdma_chan->tx_desc);
611 }
612 
613 static
614 enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
615  struct dma_tx_state *txstate)
616 {
617  struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
618  dma_cookie_t last_used;
619  dma_cookie_t last_completed;
620  int ret;
621 
622  spin_lock_bh(&bdma_chan->lock);
623  last_completed = bdma_chan->completed_cookie;
624  last_used = dchan->cookie;
625  spin_unlock_bh(&bdma_chan->lock);
626 
627  ret = dma_async_is_complete(cookie, last_completed, last_used);
628 
629  dma_set_tx_state(txstate, last_completed, last_used, 0);
630 
631  dev_dbg(dchan->device->dev,
632  "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
633  __func__, ret, last_completed, last_used);
634 
635  return ret;
636 }
637 
638 static void tsi721_issue_pending(struct dma_chan *dchan)
639 {
640  struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
641 
642  dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
643 
644  if (tsi721_dma_is_idle(bdma_chan)) {
645  spin_lock_bh(&bdma_chan->lock);
646  tsi721_advance_work(bdma_chan);
647  spin_unlock_bh(&bdma_chan->lock);
648  } else
649  dev_dbg(dchan->device->dev,
650  "%s: DMA channel still busy\n", __func__);
651 }
652 
653 static
654 struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
655  struct scatterlist *sgl, unsigned int sg_len,
656  enum dma_transfer_direction dir, unsigned long flags,
657  void *tinfo)
658 {
659  struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
660  struct tsi721_tx_desc *desc = NULL;
661  struct tsi721_tx_desc *first = NULL;
662  struct scatterlist *sg;
663  struct rio_dma_ext *rext = tinfo;
664  u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
665  unsigned int i;
666  u32 sys_size = dma_to_mport(dchan->device)->sys_size;
667  enum dma_rtype rtype;
668 
669  if (!sgl || !sg_len) {
670  dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
671  return NULL;
672  }
673 
674  if (dir == DMA_DEV_TO_MEM)
675  rtype = NREAD;
676  else if (dir == DMA_MEM_TO_DEV) {
677  switch (rext->wr_type) {
678  case RDW_ALL_NWRITE:
679  rtype = ALL_NWRITE;
680  break;
681  case RDW_ALL_NWRITE_R:
682  rtype = ALL_NWRITE_R;
683  break;
684  case RDW_LAST_NWRITE_R:
685  default:
686  rtype = LAST_NWRITE_R;
687  break;
688  }
689  } else {
690  dev_err(dchan->device->dev,
691  "%s: Unsupported DMA direction option\n", __func__);
692  return NULL;
693  }
694 
695  for_each_sg(sgl, sg, sg_len, i) {
696  int err;
697 
698  dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
699  desc = tsi721_desc_get(bdma_chan);
700  if (!desc) {
701  dev_err(dchan->device->dev,
702  "Not enough descriptors available\n");
703  goto err_desc_get;
704  }
705 
706  if (sg_is_last(sg))
707  desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
708  else
709  desc->interrupt = false;
710 
711  desc->destid = rext->destid;
712  desc->rio_addr = rio_addr;
713  desc->rio_addr_u = 0;
714 
715  err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
716  if (err) {
717  dev_err(dchan->device->dev,
718  "Failed to build desc: %d\n", err);
719  goto err_desc_get;
720  }
721 
722  rio_addr += sg_dma_len(sg);
723 
724  if (!first)
725  first = desc;
726  else
727  list_add_tail(&desc->desc_node, &first->tx_list);
728  }
729 
730  first->txd.cookie = -EBUSY;
731  desc->txd.flags = flags;
732 
733  return &first->txd;
734 
735 err_desc_get:
736  tsi721_desc_put(bdma_chan, first);
737  return NULL;
738 }
739 
740 static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
741  unsigned long arg)
742 {
743  struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
744  struct tsi721_tx_desc *desc, *_d;
745  LIST_HEAD(list);
746 
747  dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
748 
749  if (cmd != DMA_TERMINATE_ALL)
750  return -ENXIO;
751 
752  spin_lock_bh(&bdma_chan->lock);
753 
754  /* make sure to stop the transfer */
755  iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
756 
757  list_splice_init(&bdma_chan->active_list, &list);
758  list_splice_init(&bdma_chan->queue, &list);
759 
760  list_for_each_entry_safe(desc, _d, &list, desc_node)
761  tsi721_dma_chain_complete(bdma_chan, desc);
762 
763  spin_unlock_bh(&bdma_chan->lock);
764 
765  return 0;
766 }
767 
769 {
770  int i;
771  int nr_channels = TSI721_DMA_MAXCH;
772  int err;
773  struct rio_mport *mport = priv->mport;
774 
775  mport->dma.dev = &priv->pdev->dev;
776  mport->dma.chancnt = nr_channels;
777 
778  INIT_LIST_HEAD(&mport->dma.channels);
779 
780  for (i = 0; i < nr_channels; i++) {
781  struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
782 
783  if (i == TSI721_DMACH_MAINT)
784  continue;
785 
786  bdma_chan->bd_num = 64;
787  bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
788 
789  bdma_chan->dchan.device = &mport->dma;
790  bdma_chan->dchan.cookie = 1;
791  bdma_chan->dchan.chan_id = i;
792  bdma_chan->id = i;
793 
794  spin_lock_init(&bdma_chan->lock);
795 
796  INIT_LIST_HEAD(&bdma_chan->active_list);
797  INIT_LIST_HEAD(&bdma_chan->queue);
798  INIT_LIST_HEAD(&bdma_chan->free_list);
799 
800  tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
801  (unsigned long)bdma_chan);
802  tasklet_disable(&bdma_chan->tasklet);
803  list_add_tail(&bdma_chan->dchan.device_node,
804  &mport->dma.channels);
805  }
806 
807  dma_cap_zero(mport->dma.cap_mask);
808  dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
809  dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
810 
811  mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
812  mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
813  mport->dma.device_tx_status = tsi721_tx_status;
814  mport->dma.device_issue_pending = tsi721_issue_pending;
815  mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
816  mport->dma.device_control = tsi721_device_control;
817 
818  err = dma_async_device_register(&mport->dma);
819  if (err)
820  dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
821 
822  return err;
823 }