Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
fb_defio.c
Go to the documentation of this file.
1 /*
2  * linux/drivers/video/fb_defio.c
3  *
4  * Copyright (C) 2006 Jaya Kumar
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License. See the file COPYING in the main directory of this archive
8  * for more details.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/fb.h>
20 #include <linux/list.h>
21 
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
25 
26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27 {
28  void *screen_base = (void __force *) info->screen_base;
29  struct page *page;
30 
31  if (is_vmalloc_addr(screen_base + offs))
32  page = vmalloc_to_page(screen_base + offs);
33  else
34  page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35 
36  return page;
37 }
38 
39 /* this is to find and return the vmalloc-ed fb pages */
40 static int fb_deferred_io_fault(struct vm_area_struct *vma,
41  struct vm_fault *vmf)
42 {
43  unsigned long offset;
44  struct page *page;
45  struct fb_info *info = vma->vm_private_data;
46 
47  offset = vmf->pgoff << PAGE_SHIFT;
48  if (offset >= info->fix.smem_len)
49  return VM_FAULT_SIGBUS;
50 
51  page = fb_deferred_io_page(info, offset);
52  if (!page)
53  return VM_FAULT_SIGBUS;
54 
55  get_page(page);
56 
57  if (vma->vm_file)
58  page->mapping = vma->vm_file->f_mapping;
59  else
60  printk(KERN_ERR "no mapping available\n");
61 
62  BUG_ON(!page->mapping);
63  page->index = vmf->pgoff;
64 
65  vmf->page = page;
66  return 0;
67 }
68 
69 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
70 {
71  struct fb_info *info = file->private_data;
72  struct inode *inode = file->f_path.dentry->d_inode;
73  int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
74  if (err)
75  return err;
76 
77  /* Skip if deferred io is compiled-in but disabled on this fbdev */
78  if (!info->fbdefio)
79  return 0;
80 
81  mutex_lock(&inode->i_mutex);
82  /* Kill off the delayed work */
83  cancel_delayed_work_sync(&info->deferred_work);
84 
85  /* Run it immediately */
86  err = schedule_delayed_work(&info->deferred_work, 0);
87  mutex_unlock(&inode->i_mutex);
88  return err;
89 }
91 
92 /* vm_ops->page_mkwrite handler */
93 static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
94  struct vm_fault *vmf)
95 {
96  struct page *page = vmf->page;
97  struct fb_info *info = vma->vm_private_data;
98  struct fb_deferred_io *fbdefio = info->fbdefio;
99  struct page *cur;
100 
101  /* this is a callback we get when userspace first tries to
102  write to the page. we schedule a workqueue. that workqueue
103  will eventually mkclean the touched pages and execute the
104  deferred framebuffer IO. then if userspace touches a page
105  again, we repeat the same scheme */
106 
108 
109  /* protect against the workqueue changing the page list */
110  mutex_lock(&fbdefio->lock);
111 
112  /* first write in this cycle, notify the driver */
113  if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
114  fbdefio->first_io(info);
115 
116  /*
117  * We want the page to remain locked from ->page_mkwrite until
118  * the PTE is marked dirty to avoid page_mkclean() being called
119  * before the PTE is updated, which would leave the page ignored
120  * by defio.
121  * Do this by locking the page here and informing the caller
122  * about it with VM_FAULT_LOCKED.
123  */
124  lock_page(page);
125 
126  /* we loop through the pagelist before adding in order
127  to keep the pagelist sorted */
128  list_for_each_entry(cur, &fbdefio->pagelist, lru) {
129  /* this check is to catch the case where a new
130  process could start writing to the same page
131  through a new pte. this new access can cause the
132  mkwrite even when the original ps's pte is marked
133  writable */
134  if (unlikely(cur == page))
135  goto page_already_added;
136  else if (cur->index > page->index)
137  break;
138  }
139 
140  list_add_tail(&page->lru, &cur->lru);
141 
142 page_already_added:
143  mutex_unlock(&fbdefio->lock);
144 
145  /* come back after delay to process the deferred IO */
146  schedule_delayed_work(&info->deferred_work, fbdefio->delay);
147  return VM_FAULT_LOCKED;
148 }
149 
150 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
151  .fault = fb_deferred_io_fault,
152  .page_mkwrite = fb_deferred_io_mkwrite,
153 };
154 
155 static int fb_deferred_io_set_page_dirty(struct page *page)
156 {
157  if (!PageDirty(page))
158  SetPageDirty(page);
159  return 0;
160 }
161 
162 static const struct address_space_operations fb_deferred_io_aops = {
163  .set_page_dirty = fb_deferred_io_set_page_dirty,
164 };
165 
166 static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
167 {
168  vma->vm_ops = &fb_deferred_io_vm_ops;
169  vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
170  if (!(info->flags & FBINFO_VIRTFB))
171  vma->vm_flags |= VM_IO;
172  vma->vm_private_data = info;
173  return 0;
174 }
175 
176 /* workqueue callback */
177 static void fb_deferred_io_work(struct work_struct *work)
178 {
179  struct fb_info *info = container_of(work, struct fb_info,
180  deferred_work.work);
181  struct list_head *node, *next;
182  struct page *cur;
183  struct fb_deferred_io *fbdefio = info->fbdefio;
184 
185  /* here we mkclean the pages, then do all deferred IO */
186  mutex_lock(&fbdefio->lock);
187  list_for_each_entry(cur, &fbdefio->pagelist, lru) {
188  lock_page(cur);
189  page_mkclean(cur);
190  unlock_page(cur);
191  }
192 
193  /* driver's callback with pagelist */
194  fbdefio->deferred_io(info, &fbdefio->pagelist);
195 
196  /* clear the list */
197  list_for_each_safe(node, next, &fbdefio->pagelist) {
198  list_del(node);
199  }
200  mutex_unlock(&fbdefio->lock);
201 }
202 
203 void fb_deferred_io_init(struct fb_info *info)
204 {
205  struct fb_deferred_io *fbdefio = info->fbdefio;
206 
207  BUG_ON(!fbdefio);
208  mutex_init(&fbdefio->lock);
209  info->fbops->fb_mmap = fb_deferred_io_mmap;
210  INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
211  INIT_LIST_HEAD(&fbdefio->pagelist);
212  if (fbdefio->delay == 0) /* set a default of 1 s */
213  fbdefio->delay = HZ;
214 }
216 
217 void fb_deferred_io_open(struct fb_info *info,
218  struct inode *inode,
219  struct file *file)
220 {
221  file->f_mapping->a_ops = &fb_deferred_io_aops;
222 }
224 
225 void fb_deferred_io_cleanup(struct fb_info *info)
226 {
227  struct fb_deferred_io *fbdefio = info->fbdefio;
228  struct page *page;
229  int i;
230 
231  BUG_ON(!fbdefio);
232  cancel_delayed_work_sync(&info->deferred_work);
233 
234  /* clear out the mapping that we setup */
235  for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
236  page = fb_deferred_io_page(info, i);
237  page->mapping = NULL;
238  }
239 
240  info->fbops->fb_mmap = NULL;
241  mutex_destroy(&fbdefio->lock);
242 }
244 
245 MODULE_LICENSE("GPL");