Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
flush.c
Go to the documentation of this file.
1 /*
2  * Based on arch/arm/mm/flush.c
3  *
4  * Copyright (C) 1995-2002 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program. If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/export.h>
21 #include <linux/mm.h>
22 #include <linux/pagemap.h>
23 
24 #include <asm/cacheflush.h>
25 #include <asm/cachetype.h>
26 #include <asm/tlbflush.h>
27 
28 #include "mm.h"
29 
30 void flush_cache_mm(struct mm_struct *mm)
31 {
32 }
33 
34 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
35  unsigned long end)
36 {
37  if (vma->vm_flags & VM_EXEC)
38  __flush_icache_all();
39 }
40 
41 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
42  unsigned long pfn)
43 {
44 }
45 
46 static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
47  unsigned long uaddr, void *kaddr,
48  unsigned long len)
49 {
50  if (vma->vm_flags & VM_EXEC) {
51  unsigned long addr = (unsigned long)kaddr;
52  if (icache_is_aliasing()) {
53  __flush_dcache_area(kaddr, len);
54  __flush_icache_all();
55  } else {
56  flush_icache_range(addr, addr + len);
57  }
58  }
59 }
60 
61 /*
62  * Copy user data from/to a page which is mapped into a different processes
63  * address space. Really, we want to allow our "user space" model to handle
64  * this.
65  *
66  * Note that this code needs to run on the current CPU.
67  */
68 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
69  unsigned long uaddr, void *dst, const void *src,
70  unsigned long len)
71 {
72 #ifdef CONFIG_SMP
74 #endif
75  memcpy(dst, src, len);
76  flush_ptrace_access(vma, page, uaddr, dst, len);
77 #ifdef CONFIG_SMP
79 #endif
80 }
81 
82 void __flush_dcache_page(struct page *page)
83 {
85 }
86 
87 void __sync_icache_dcache(pte_t pte, unsigned long addr)
88 {
89  unsigned long pfn;
90  struct page *page;
91 
92  pfn = pte_pfn(pte);
93  if (!pfn_valid(pfn))
94  return;
95 
96  page = pfn_to_page(pfn);
97  if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
98  __flush_dcache_page(page);
99  __flush_icache_all();
100  } else if (icache_is_aivivt()) {
101  __flush_icache_all();
102  }
103 }
104 
105 /*
106  * Ensure cache coherency between kernel mapping and userspace mapping of this
107  * page.
108  */
109 void flush_dcache_page(struct page *page)
110 {
111  struct address_space *mapping;
112 
113  /*
114  * The zero page is never written to, so never has any dirty cache
115  * lines, and therefore never needs to be flushed.
116  */
117  if (page == ZERO_PAGE(0))
118  return;
119 
120  mapping = page_mapping(page);
121  if (mapping && mapping_mapped(mapping)) {
122  __flush_dcache_page(page);
123  __flush_icache_all();
124  set_bit(PG_dcache_clean, &page->flags);
125  } else {
127  }
128 }
130 
131 /*
132  * Additional functions defined in assembly.
133  */