Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ivtv-udma.c
Go to the documentation of this file.
1 /*
2  User DMA
3 
4  Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
5  Copyright (C) 2004 Chris Kennedy <[email protected]>
6  Copyright (C) 2005-2007 Hans Verkuil <[email protected]>
7 
8  This program is free software; you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation; either version 2 of the License, or
11  (at your option) any later version.
12 
13  This program is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with this program; if not, write to the Free Software
20  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22 
23 #include "ivtv-driver.h"
24 #include "ivtv-udma.h"
25 
26 void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
27 {
28  dma_page->uaddr = first & PAGE_MASK;
29  dma_page->offset = first & ~PAGE_MASK;
30  dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
31  dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
32  dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
33  dma_page->page_count = dma_page->last - dma_page->first + 1;
34  if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
35 }
36 
37 int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
38 {
39  int i, offset;
40  unsigned long flags;
41 
42  if (map_offset < 0)
43  return map_offset;
44 
45  offset = dma_page->offset;
46 
47  /* Fill SG Array with new values */
48  for (i = 0; i < dma_page->page_count; i++) {
49  unsigned int len = (i == dma_page->page_count - 1) ?
50  dma_page->tail : PAGE_SIZE - offset;
51 
52  if (PageHighMem(dma->map[map_offset])) {
53  void *src;
54 
55  if (dma->bouncemap[map_offset] == NULL)
56  dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
57  if (dma->bouncemap[map_offset] == NULL)
58  return -1;
59  local_irq_save(flags);
60  src = kmap_atomic(dma->map[map_offset]) + offset;
61  memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
62  kunmap_atomic(src);
63  local_irq_restore(flags);
64  sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
65  }
66  else {
67  sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
68  }
69  offset = 0;
70  map_offset++;
71  }
72  return map_offset;
73 }
74 
75 void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
76  int i;
77  struct scatterlist *sg;
78 
79  for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg++) {
80  dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
81  dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
82  dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
83  buffer_offset += sg_dma_len(sg);
84 
85  split -= sg_dma_len(sg);
86  if (split == 0)
87  buffer_offset = buffer_offset_2;
88  }
89 }
90 
91 /* User DMA Buffers */
92 void ivtv_udma_alloc(struct ivtv *itv)
93 {
94  if (itv->udma.SG_handle == 0) {
95  /* Map DMA Page Array Buffer */
96  itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
97  sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
98  ivtv_udma_sync_for_cpu(itv);
99  }
100 }
101 
102 int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
103  void __user *userbuf, int size_in_bytes)
104 {
105  struct ivtv_dma_page_info user_dma;
106  struct ivtv_user_dma *dma = &itv->udma;
107  int i, err;
108 
109  IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
110 
111  /* Still in USE */
112  if (dma->SG_length || dma->page_count) {
113  IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
114  dma->SG_length, dma->page_count);
115  return -EBUSY;
116  }
117 
118  ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
119 
120  if (user_dma.page_count <= 0) {
121  IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
122  user_dma.page_count, size_in_bytes, user_dma.offset);
123  return -EINVAL;
124  }
125 
126  /* Get user pages for DMA Xfer */
127  down_read(&current->mm->mmap_sem);
128  err = get_user_pages(current, current->mm,
129  user_dma.uaddr, user_dma.page_count, 0, 1, dma->map, NULL);
130  up_read(&current->mm->mmap_sem);
131 
132  if (user_dma.page_count != err) {
133  IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
134  err, user_dma.page_count);
135  if (err >= 0) {
136  for (i = 0; i < err; i++)
137  put_page(dma->map[i]);
138  return -EINVAL;
139  }
140  return err;
141  }
142 
143  dma->page_count = user_dma.page_count;
144 
145  /* Fill SG List with new values */
146  if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
147  for (i = 0; i < dma->page_count; i++) {
148  put_page(dma->map[i]);
149  }
150  dma->page_count = 0;
151  return -ENOMEM;
152  }
153 
154  /* Map SG List */
155  dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
156 
157  /* Fill SG Array with new values */
158  ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
159 
160  /* Tag SG Array with Interrupt Bit */
161  dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
162 
163  ivtv_udma_sync_for_device(itv);
164  return dma->page_count;
165 }
166 
167 void ivtv_udma_unmap(struct ivtv *itv)
168 {
169  struct ivtv_user_dma *dma = &itv->udma;
170  int i;
171 
172  IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
173 
174  /* Nothing to free */
175  if (dma->page_count == 0)
176  return;
177 
178  /* Unmap Scatterlist */
179  if (dma->SG_length) {
180  pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
181  dma->SG_length = 0;
182  }
183  /* sync DMA */
184  ivtv_udma_sync_for_cpu(itv);
185 
186  /* Release User Pages */
187  for (i = 0; i < dma->page_count; i++) {
188  put_page(dma->map[i]);
189  }
190  dma->page_count = 0;
191 }
192 
193 void ivtv_udma_free(struct ivtv *itv)
194 {
195  int i;
196 
197  /* Unmap SG Array */
198  if (itv->udma.SG_handle) {
199  pci_unmap_single(itv->pdev, itv->udma.SG_handle,
200  sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
201  }
202 
203  /* Unmap Scatterlist */
204  if (itv->udma.SG_length) {
205  pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
206  }
207 
208  for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
209  if (itv->udma.bouncemap[i])
210  __free_page(itv->udma.bouncemap[i]);
211  }
212 }
213 
214 void ivtv_udma_start(struct ivtv *itv)
215 {
216  IVTV_DEBUG_DMA("start UDMA\n");
217  write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
219  set_bit(IVTV_F_I_DMA, &itv->i_flags);
220  set_bit(IVTV_F_I_UDMA, &itv->i_flags);
222 }
223 
224 void ivtv_udma_prepare(struct ivtv *itv)
225 {
226  unsigned long flags;
227 
228  spin_lock_irqsave(&itv->dma_reg_lock, flags);
229  if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
230  ivtv_udma_start(itv);
231  else
233  spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
234 }