Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pgtable.h
Go to the documentation of this file.
1 /*
2  * arch/arm/include/asm/pgtable.h
3  *
4  * Copyright (C) 1995-2002 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #ifndef _ASMARM_PGTABLE_H
11 #define _ASMARM_PGTABLE_H
12 
13 #include <linux/const.h>
14 #include <asm/proc-fns.h>
15 
16 #ifndef CONFIG_MMU
17 
19 #include <asm/pgtable-nommu.h>
20 
21 #else
22 
24 #include <asm/memory.h>
25 #include <asm/pgtable-hwdef.h>
26 
27 #ifdef CONFIG_ARM_LPAE
28 #include <asm/pgtable-3level.h>
29 #else
30 #include <asm/pgtable-2level.h>
31 #endif
32 
33 /*
34  * Just any arbitrary offset to the start of the vmalloc VM area: the
35  * current 8MB value just means that there will be a 8MB "hole" after the
36  * physical memory until the kernel virtual memory starts. That means that
37  * any out-of-bounds memory accesses will hopefully be caught.
38  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
39  * area for the same reason. ;)
40  */
41 #define VMALLOC_OFFSET (8*1024*1024)
42 #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
43 #define VMALLOC_END 0xff000000UL
44 
45 #define LIBRARY_TEXT_START 0x0c000000
46 
47 #ifndef __ASSEMBLY__
48 extern void __pte_error(const char *file, int line, pte_t);
49 extern void __pmd_error(const char *file, int line, pmd_t);
50 extern void __pgd_error(const char *file, int line, pgd_t);
51 
52 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
53 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
54 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
55 
56 /*
57  * This is the lowest virtual address we can permit any user space
58  * mapping to be mapped at. This is particularly important for
59  * non-high vector CPUs.
60  */
61 #define FIRST_USER_ADDRESS PAGE_SIZE
62 
63 /*
64  * The pgprot_* and protection_map entries will be fixed up in runtime
65  * to include the cachable and bufferable bits based on memory policy,
66  * as well as any architecture dependent bits like global/ASID and SMP
67  * shared mapping bits.
68  */
69 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
70 
71 extern pgprot_t pgprot_user;
72 extern pgprot_t pgprot_kernel;
73 
74 #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
75 
76 #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
77 #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
78 #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
79 #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
80 #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
81 #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
82 #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
83 #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
84 #define PAGE_KERNEL_EXEC pgprot_kernel
85 
86 #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
87 #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
88 #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
89 #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
90 #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
91 #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
92 #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
93 
94 #define __pgprot_modify(prot,mask,bits) \
95  __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
96 
97 #define pgprot_noncached(prot) \
98  __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
99 
100 #define pgprot_writecombine(prot) \
101  __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
102 
103 #define pgprot_stronglyordered(prot) \
104  __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
105 
106 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
107 #define pgprot_dmacoherent(prot) \
108  __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
109 #define __HAVE_PHYS_MEM_ACCESS_PROT
110 struct file;
111 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
112  unsigned long size, pgprot_t vma_prot);
113 #else
114 #define pgprot_dmacoherent(prot) \
115  __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
116 #endif
117 
118 #endif /* __ASSEMBLY__ */
119 
120 /*
121  * The table below defines the page protection levels that we insert into our
122  * Linux page table version. These get translated into the best that the
123  * architecture can perform. Note that on most ARM hardware:
124  * 1) We cannot do execute protection
125  * 2) If we could do execute protection, then read is implied
126  * 3) write implies read permissions
127  */
128 #define __P000 __PAGE_NONE
129 #define __P001 __PAGE_READONLY
130 #define __P010 __PAGE_COPY
131 #define __P011 __PAGE_COPY
132 #define __P100 __PAGE_READONLY_EXEC
133 #define __P101 __PAGE_READONLY_EXEC
134 #define __P110 __PAGE_COPY_EXEC
135 #define __P111 __PAGE_COPY_EXEC
136 
137 #define __S000 __PAGE_NONE
138 #define __S001 __PAGE_READONLY
139 #define __S010 __PAGE_SHARED
140 #define __S011 __PAGE_SHARED
141 #define __S100 __PAGE_READONLY_EXEC
142 #define __S101 __PAGE_READONLY_EXEC
143 #define __S110 __PAGE_SHARED_EXEC
144 #define __S111 __PAGE_SHARED_EXEC
145 
146 #ifndef __ASSEMBLY__
147 /*
148  * ZERO_PAGE is a global shared page that is always zero: used
149  * for zero-mapped memory areas etc..
150  */
151 extern struct page *empty_zero_page;
152 #define ZERO_PAGE(vaddr) (empty_zero_page)
153 
154 
156 
157 /* to find an entry in a page-table-directory */
158 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
159 
160 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
161 
162 /* to find an entry in a kernel page-table-directory */
163 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
164 
165 #define pmd_none(pmd) (!pmd_val(pmd))
166 #define pmd_present(pmd) (pmd_val(pmd))
167 
168 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
169 {
170  return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
171 }
172 
173 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
174 
175 #ifndef CONFIG_HIGHPTE
176 #define __pte_map(pmd) pmd_page_vaddr(*(pmd))
177 #define __pte_unmap(pte) do { } while (0)
178 #else
179 #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
180 #define __pte_unmap(pte) kunmap_atomic(pte)
181 #endif
182 
183 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
184 
185 #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
186 
187 #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
188 #define pte_unmap(pte) __pte_unmap(pte)
189 
190 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
191 #define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
192 
193 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
194 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
195 
196 #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
197 
198 #define pte_none(pte) (!pte_val(pte))
199 #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
200 #define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
201 #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
202 #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
203 #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
204 #define pte_special(pte) (0)
205 
206 #define pte_present_user(pte) \
207  ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
208  (L_PTE_PRESENT | L_PTE_USER))
209 
210 #if __LINUX_ARM_ARCH__ < 6
211 static inline void __sync_icache_dcache(pte_t pteval)
212 {
213 }
214 #else
215 extern void __sync_icache_dcache(pte_t pteval);
216 #endif
217 
218 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
219  pte_t *ptep, pte_t pteval)
220 {
221  unsigned long ext = 0;
222 
223  if (addr < TASK_SIZE && pte_present_user(pteval)) {
224  __sync_icache_dcache(pteval);
225  ext |= PTE_EXT_NG;
226  }
227 
228  set_pte_ext(ptep, pteval, ext);
229 }
230 
231 #define PTE_BIT_FUNC(fn,op) \
232 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
233 
234 PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
235 PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
236 PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
237 PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
238 PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
239 PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
240 
241 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
242 
243 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
244 {
246  pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
247  return pte;
248 }
249 
250 /*
251  * Encode and decode a swap entry. Swap entries are stored in the Linux
252  * page tables as follows:
253  *
254  * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
255  * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
256  * <--------------- offset ----------------------> < type -> 0 0 0
257  *
258  * This gives us up to 31 swap files and 64GB per swap file. Note that
259  * the offset field is always non-zero.
260  */
261 #define __SWP_TYPE_SHIFT 3
262 #define __SWP_TYPE_BITS 5
263 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
264 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
265 
266 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
267 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
268 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
269 
270 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
271 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
272 
273 /*
274  * It is an error for the kernel to have more swap files than we can
275  * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
276  * is increased beyond what we presently support.
277  */
278 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
279 
280 /*
281  * Encode and decode a file entry. File entries are stored in the Linux
282  * page tables as follows:
283  *
284  * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
285  * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
286  * <----------------------- offset ------------------------> 1 0 0
287  */
288 #define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
289 #define pte_to_pgoff(x) (pte_val(x) >> 3)
290 #define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
291 
292 #define PTE_FILE_MAX_BITS 29
293 
294 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
295 /* FIXME: this is not correct */
296 #define kern_addr_valid(addr) (1)
297 
298 #include <asm-generic/pgtable.h>
299 
300 /*
301  * We provide our own arch_get_unmapped_area to cope with VIPT caches.
302  */
303 #define HAVE_ARCH_UNMAPPED_AREA
304 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
305 
306 /*
307  * remap a physical page `pfn' of size `size' with page protection `prot'
308  * into virtual address `from'
309  */
310 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
311  remap_pfn_range(vma, from, pfn, size, prot)
312 
313 #define pgtable_cache_init() do { } while (0)
314 
315 #endif /* !__ASSEMBLY__ */
316 
317 #endif /* CONFIG_MMU */
318 
319 #endif /* _ASMARM_PGTABLE_H */