26 #include <asm/cacheflush.h>
30 #include <linux/poll.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
65 #define ISP_CACHE_FLUSH_PAGES_MAX 0
72 if (buf->
vbuf.m.userptr == 0 || buf->
npages == 0 ||
78 outer_inv_range(buf->
vbuf.m.userptr,
79 buf->
vbuf.m.userptr + buf->
vbuf.length);
113 return lock ? -
EINVAL : 0;
115 start = buf->
vbuf.m.userptr;
116 end = buf->
vbuf.m.userptr + buf->
vbuf.length - 1;
119 spin_lock(&
current->mm->page_table_lock);
134 }
while (vma->
vm_end < end);
142 spin_unlock(&
current->mm->page_table_lock);
163 sglist =
vmalloc(npages *
sizeof(*sglist));
169 for (i = 0; i < npages; ++
i, addr +=
PAGE_SIZE) {
172 if (page ==
NULL || PageHighMem(page)) {
177 sg_set_page(&sglist[i], page,
PAGE_SIZE, 0);
203 for (i = 0; i < buf->
npages; ++
i) {
204 if (PageHighMem(buf->
pages[i])) {
229 unsigned int offset = buf->
offset;
239 for (i = 0; i < buf->
npages; ++
i, ++pfn) {
266 if (buf->
queue->ops->buffer_cleanup)
267 buf->
queue->ops->buffer_cleanup(buf);
281 isp_video_buffer_lock_vma(buf, 0);
283 for (i = 0; i < buf->
npages; ++
i)
324 data = buf->
vbuf.m.userptr;
329 buf->
npages = last - first + 1;
343 isp_video_buffer_cleanup(buf);
347 ret = isp_video_buffer_lock_vma(buf, 1);
349 isp_video_buffer_cleanup(buf);
365 unsigned long prev_pfn;
366 unsigned long this_pfn;
372 start = buf->
vbuf.m.userptr;
373 end = buf->
vbuf.m.userptr + buf->
vbuf.length - 1;
391 else if (this_pfn != prev_pfn + 1) {
427 start = buf->
vbuf.m.userptr;
428 end = buf->
vbuf.m.userptr + buf->
vbuf.length - 1;
437 if (start == buf->
vbuf.m.userptr) {
449 }
while (vma->
vm_end < end);
485 switch (buf->
vbuf.memory) {
487 ret = isp_video_buffer_sglist_kernel(buf);
491 ret = isp_video_buffer_prepare_vm_flags(buf);
496 ret = isp_video_buffer_prepare_pfnmap(buf);
500 ret = isp_video_buffer_sglist_pfnmap(buf);
502 ret = isp_video_buffer_prepare_user(buf);
506 ret = isp_video_buffer_sglist_user(buf);
522 if (ret != buf->
sglen) {
528 if (buf->
queue->ops->buffer_prepare)
529 ret = buf->
queue->ops->buffer_prepare(buf);
533 isp_video_buffer_cleanup(buf);
553 switch (buf->
state) {
577 static int isp_video_buffer_wait(
struct isp_video_buffer *buf,
int nonblocking)
609 for (i = 0; i < queue->
count; ++
i) {
610 if (queue->
buffers[i]->vma_use_count != 0)
614 for (i = 0; i < queue->
count; ++
i) {
617 isp_video_buffer_cleanup(buf);
626 INIT_LIST_HEAD(&queue->
queue);
637 unsigned int nbuffers,
646 ret = isp_video_queue_free(queue);
655 for (i = 0; i < nbuffers; ++
i) {
706 return isp_video_queue_free(queue);
735 INIT_LIST_HEAD(&queue->
queue);
775 unsigned int nbuffers = rb->
count;
782 queue->
ops->queue_prepare(queue, &nbuffers, &size);
790 ret = isp_video_queue_alloc(queue, nbuffers, size, rb->
memory);
827 isp_video_buffer_query(buf, vbuf);
876 isp_video_buffer_cleanup(buf);
882 ret = isp_video_buffer_prepare(buf);
888 isp_video_buffer_cache_sync(buf);
895 queue->
ops->buffer_queue(buf);
896 spin_unlock_irqrestore(&queue->
irqlock, flags);
931 if (list_empty(&queue->
queue)) {
937 ret = isp_video_buffer_wait(buf, nonblocking);
943 isp_video_buffer_query(buf, vbuf);
975 queue->
ops->buffer_queue(buf);
976 spin_unlock_irqrestore(&queue->
irqlock, flags);
1007 for (i = 0; i < queue->
count; ++
i) {
1015 spin_unlock_irqrestore(&queue->
irqlock, flags);
1017 INIT_LIST_HEAD(&queue->
queue);
1044 for (i = 0; i < queue->
count; ++
i) {
1069 static const struct vm_operations_struct isp_video_queue_vm_ops = {
1070 .open = isp_video_queue_vm_open,
1071 .close = isp_video_queue_vm_close,
1092 for (i = 0; i < queue->
count; ++
i) {
1098 if (i == queue->
count) {
1115 vma->
vm_ops = &isp_video_queue_vm_ops;
1117 isp_video_queue_vm_open(vma);
1137 unsigned int mask = 0;
1140 if (list_empty(&queue->
queue)) {
1146 poll_wait(file, &buf->
wait, wait);