13 #include <linux/export.h>
14 #include <linux/uio.h>
17 #include <linux/sched.h>
21 #include <asm/tlbflush.h>
30 static struct page *__xip_sparse_page;
33 static struct page *xip_sparse_page(
void)
35 if (!__xip_sparse_page) {
39 __xip_sparse_page =
page;
41 return __xip_sparse_page;
63 size_t copied = 0,
error = 0;
71 isize = i_size_read(inode);
75 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
79 unsigned long xip_pfn;
84 if (index >= end_index) {
85 if (index > end_index)
93 if (nr > len - copied)
96 error = mapping->
a_ops->get_xip_mem(mapping, index, 0,
110 if (mapping_writably_mapped(mapping))
132 copied += (nr -
left);
133 offset += (nr -
left);
136 }
while (copied < len);
139 *ppos = pos + copied;
143 return (copied ? copied :
error);
152 return do_xip_mapping_read(filp->
f_mapping, &filp->
f_ra, filp,
178 count = read_seqcount_begin(&xip_sparse_seq);
180 page = __xip_sparse_page;
186 vma_interval_tree_foreach(vma, &mapping->
i_mmap, pgoff, pgoff) {
191 pte = page_check_address(page, mm, address, &ptl, 1);
199 pte_unmap_unlock(pte, ptl);
201 mmu_notifier_invalidate_page(mm, address);
209 }
else if (read_seqcount_retry(&xip_sparse_seq, count)) {
222 static int xip_file_fault(
struct vm_area_struct *vma,
struct vm_fault *vmf)
226 struct inode *inode = mapping->
host;
229 unsigned long xip_pfn;
236 if (vmf->pgoff >= size)
237 return VM_FAULT_SIGBUS;
239 error = mapping->
a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
247 if ((vma->
vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
248 (vma->
vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
254 error = mapping->
a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
258 return VM_FAULT_SIGBUS;
260 __xip_unmap(mapping, vmf->pgoff);
273 return VM_FAULT_NOPAGE;
275 int err,
ret = VM_FAULT_OOM;
278 write_seqcount_begin(&xip_sparse_seq);
279 error = mapping->
a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
282 write_seqcount_end(&xip_sparse_seq);
289 page = xip_sparse_page();
297 ret = VM_FAULT_NOPAGE;
299 write_seqcount_end(&xip_sparse_seq);
306 static const struct vm_operations_struct xip_file_vm_ops = {
307 .fault = xip_file_fault,
308 .page_mkwrite = filemap_page_mkwrite,
317 vma->
vm_ops = &xip_file_vm_ops;
324 __xip_file_write(
struct file *filp,
const char __user *buf,
325 size_t count, loff_t pos, loff_t *ppos)
329 struct inode *inode = mapping->
host;
341 unsigned long xip_pfn;
359 __xip_unmap(mapping, index);
366 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
389 if (pos > inode->
i_size) {
390 i_size_write(inode, pos);
391 mark_inode_dirty(inode);
394 return written ? written :
status;
402 struct inode *inode = mapping->
host;
407 sb_start_write(inode->
i_sb);
436 ret = __xip_file_write (filp, buf, count, pos, ppos);
442 sb_end_write(inode->
i_sb);
460 unsigned long xip_pfn;
465 blocksize = 1 << mapping->
host->i_blkbits;
466 length = offset & (blocksize - 1);
472 length = blocksize -
length;
474 err = mapping->
a_ops->get_xip_mem(mapping, index, 0,
483 memset(xip_mem + offset, 0, length);