Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pgtable.h
Go to the documentation of this file.
1 /* MN10300 Page table manipulators and constants
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells ([email protected])
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  *
11  *
12  * The Linux memory management assumes a three-level page table setup. On
13  * the i386, we use that, but "fold" the mid level into the top-level page
14  * table, so that we physically have the same two-level page table as the
15  * i386 mmu expects.
16  *
17  * This file contains the functions and defines necessary to modify and use
18  * the i386 page table tree for the purposes of the MN10300 TLB handler
19  * functions.
20  */
21 #ifndef _ASM_PGTABLE_H
22 #define _ASM_PGTABLE_H
23 
24 #include <asm/cpu-regs.h>
25 
26 #ifndef __ASSEMBLY__
27 #include <asm/processor.h>
28 #include <asm/cache.h>
29 #include <linux/threads.h>
30 
31 #include <asm/bitops.h>
32 
33 #include <linux/slab.h>
34 #include <linux/list.h>
35 #include <linux/spinlock.h>
36 
37 /*
38  * ZERO_PAGE is a global shared page that is always zero: used
39  * for zero-mapped memory areas etc..
40  */
41 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
42 extern unsigned long empty_zero_page[1024];
43 extern spinlock_t pgd_lock;
44 extern struct page *pgd_list;
45 
46 extern void pmd_ctor(void *, struct kmem_cache *, unsigned long);
47 extern void pgtable_cache_init(void);
48 extern void paging_init(void);
49 
50 #endif /* !__ASSEMBLY__ */
51 
52 /*
53  * The Linux mn10300 paging architecture only implements both the traditional
54  * 2-level page tables
55  */
56 #define PGDIR_SHIFT 22
57 #define PTRS_PER_PGD 1024
58 #define PTRS_PER_PUD 1 /* we don't really have any PUD physically */
59 #define PTRS_PER_PMD 1 /* we don't really have any PMD physically */
60 #define PTRS_PER_PTE 1024
61 
62 #define PGD_SIZE PAGE_SIZE
63 #define PMD_SIZE (1UL << PMD_SHIFT)
64 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
65 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
66 
67 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
68 #define FIRST_USER_ADDRESS 0
69 
70 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
71 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS)
72 
73 #define TWOLEVEL_PGDIR_SHIFT 22
74 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
75 #define BOOT_KERNEL_PGD_PTRS (1024 - BOOT_USER_PGD_PTRS)
76 
77 #ifndef __ASSEMBLY__
79 #endif
80 
81 /*
82  * Unfortunately, due to the way the MMU works on the MN10300, the vmalloc VM
83  * area has to be in the lower half of the virtual address range (the upper
84  * half is not translated through the TLB).
85  *
86  * So in this case, the vmalloc area goes at the bottom of the address map
87  * (leaving a hole at the very bottom to catch addressing errors), and
88  * userspace starts immediately above.
89  *
90  * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
91  * area to catch addressing errors.
92  */
93 #ifndef __ASSEMBLY__
94 #define VMALLOC_OFFSET (8UL * 1024 * 1024)
95 #define VMALLOC_START (0x70000000UL)
96 #define VMALLOC_END (0x7C000000UL)
97 #else
98 #define VMALLOC_OFFSET (8 * 1024 * 1024)
99 #define VMALLOC_START (0x70000000)
100 #define VMALLOC_END (0x7C000000)
101 #endif
102 
103 #ifndef __ASSEMBLY__
105 #endif
106 
107 /* IPTEL2/DPTEL2 bit assignments */
108 #define _PAGE_BIT_VALID xPTEL2_V_BIT
109 #define _PAGE_BIT_CACHE xPTEL2_C_BIT
110 #define _PAGE_BIT_PRESENT xPTEL2_PV_BIT
111 #define _PAGE_BIT_DIRTY xPTEL2_D_BIT
112 #define _PAGE_BIT_GLOBAL xPTEL2_G_BIT
113 #define _PAGE_BIT_ACCESSED xPTEL2_UNUSED1_BIT /* mustn't be loaded into IPTEL2/DPTEL2 */
114 
115 #define _PAGE_VALID xPTEL2_V
116 #define _PAGE_CACHE xPTEL2_C
117 #define _PAGE_PRESENT xPTEL2_PV
118 #define _PAGE_DIRTY xPTEL2_D
119 #define _PAGE_PROT xPTEL2_PR
120 #define _PAGE_PROT_RKNU xPTEL2_PR_ROK
121 #define _PAGE_PROT_WKNU xPTEL2_PR_RWK
122 #define _PAGE_PROT_RKRU xPTEL2_PR_ROK_ROU
123 #define _PAGE_PROT_WKRU xPTEL2_PR_RWK_ROU
124 #define _PAGE_PROT_WKWU xPTEL2_PR_RWK_RWU
125 #define _PAGE_GLOBAL xPTEL2_G
126 #define _PAGE_PS_MASK xPTEL2_PS
127 #define _PAGE_PS_4Kb xPTEL2_PS_4Kb
128 #define _PAGE_PS_128Kb xPTEL2_PS_128Kb
129 #define _PAGE_PS_1Kb xPTEL2_PS_1Kb
130 #define _PAGE_PS_4Mb xPTEL2_PS_4Mb
131 #define _PAGE_PSE xPTEL2_PS_4Mb /* 4MB page */
132 #define _PAGE_CACHE_WT xPTEL2_CWT
133 #define _PAGE_ACCESSED xPTEL2_UNUSED1
134 #define _PAGE_NX 0 /* no-execute bit */
135 
136 /* If _PAGE_VALID is clear, we use these: */
137 #define _PAGE_FILE xPTEL2_C /* set:pagecache unset:swap */
138 #define _PAGE_PROTNONE 0x000 /* If not present */
139 
140 #define __PAGE_PROT_UWAUX 0x010
141 #define __PAGE_PROT_USER 0x020
142 #define __PAGE_PROT_WRITE 0x040
143 
144 #define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID)
145 
146 #ifndef __ASSEMBLY__
147 
148 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
149 
150 #define _PAGE_TABLE (_PAGE_PRESENTV | _PAGE_PROT_WKNU | _PAGE_ACCESSED | _PAGE_DIRTY)
151 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
152 
153 #define __PAGE_NONE (_PAGE_PRESENTV | _PAGE_PROT_RKNU | _PAGE_ACCESSED | _PAGE_CACHE)
154 #define __PAGE_SHARED (_PAGE_PRESENTV | _PAGE_PROT_WKWU | _PAGE_ACCESSED | _PAGE_CACHE)
155 #define __PAGE_COPY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE)
156 #define __PAGE_READONLY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE)
157 
158 #define PAGE_NONE __pgprot(__PAGE_NONE | _PAGE_NX)
159 #define PAGE_SHARED_NOEXEC __pgprot(__PAGE_SHARED | _PAGE_NX)
160 #define PAGE_COPY_NOEXEC __pgprot(__PAGE_COPY | _PAGE_NX)
161 #define PAGE_READONLY_NOEXEC __pgprot(__PAGE_READONLY | _PAGE_NX)
162 #define PAGE_SHARED_EXEC __pgprot(__PAGE_SHARED)
163 #define PAGE_COPY_EXEC __pgprot(__PAGE_COPY)
164 #define PAGE_READONLY_EXEC __pgprot(__PAGE_READONLY)
165 #define PAGE_COPY PAGE_COPY_NOEXEC
166 #define PAGE_READONLY PAGE_READONLY_NOEXEC
167 #define PAGE_SHARED PAGE_SHARED_EXEC
168 
169 #define __PAGE_KERNEL_BASE (_PAGE_PRESENTV | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
170 
171 #define __PAGE_KERNEL (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_CACHE | _PAGE_NX)
172 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_NX)
173 #define __PAGE_KERNEL_EXEC (__PAGE_KERNEL & ~_PAGE_NX)
174 #define __PAGE_KERNEL_RO (__PAGE_KERNEL_BASE | _PAGE_PROT_RKNU | _PAGE_CACHE | _PAGE_NX)
175 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
176 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
177 
178 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
179 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
180 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
181 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
182 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
183 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
184 
185 #define __PAGE_USERIO (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
186 #define PAGE_USERIO __pgprot(__PAGE_USERIO)
187 
188 /*
189  * Whilst the MN10300 can do page protection for execute (given separate data
190  * and insn TLBs), we are not supporting it at the moment. Write permission,
191  * however, always implies read permission (but not execute permission).
192  */
193 #define __P000 PAGE_NONE
194 #define __P001 PAGE_READONLY_NOEXEC
195 #define __P010 PAGE_COPY_NOEXEC
196 #define __P011 PAGE_COPY_NOEXEC
197 #define __P100 PAGE_READONLY_EXEC
198 #define __P101 PAGE_READONLY_EXEC
199 #define __P110 PAGE_COPY_EXEC
200 #define __P111 PAGE_COPY_EXEC
201 
202 #define __S000 PAGE_NONE
203 #define __S001 PAGE_READONLY_NOEXEC
204 #define __S010 PAGE_SHARED_NOEXEC
205 #define __S011 PAGE_SHARED_NOEXEC
206 #define __S100 PAGE_READONLY_EXEC
207 #define __S101 PAGE_READONLY_EXEC
208 #define __S110 PAGE_SHARED_EXEC
209 #define __S111 PAGE_SHARED_EXEC
210 
211 /*
212  * Define this to warn about kernel memory accesses that are
213  * done without a 'verify_area(VERIFY_WRITE,..)'
214  */
215 #undef TEST_VERIFY_AREA
216 
217 #define pte_present(x) (pte_val(x) & _PAGE_VALID)
218 #define pte_clear(mm, addr, xp) \
219 do { \
220  set_pte_at((mm), (addr), (xp), __pte(0)); \
221 } while (0)
222 
223 #define pmd_none(x) (!pmd_val(x))
224 #define pmd_present(x) (!pmd_none(x))
225 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
226 #define pmd_bad(x) 0
227 
228 
229 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
230 
231 #ifndef __ASSEMBLY__
232 
233 /*
234  * The following only work if pte_present() is true.
235  * Undefined behaviour if not..
236  */
237 static inline int pte_user(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER; }
238 static inline int pte_read(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER; }
239 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
240 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
241 static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; }
242 static inline int pte_special(pte_t pte){ return 0; }
243 
244 /*
245  * The following only works if pte_present() is not true.
246  */
247 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
248 
249 static inline pte_t pte_rdprotect(pte_t pte)
250 {
251  pte_val(pte) &= ~(__PAGE_PROT_USER|__PAGE_PROT_UWAUX); return pte;
252 }
253 static inline pte_t pte_exprotect(pte_t pte)
254 {
255  pte_val(pte) |= _PAGE_NX; return pte;
256 }
257 
258 static inline pte_t pte_wrprotect(pte_t pte)
259 {
260  pte_val(pte) &= ~(__PAGE_PROT_WRITE|__PAGE_PROT_UWAUX); return pte;
261 }
262 
263 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
264 static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
265 static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
266 static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
267 static inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_NX; return pte; }
268 
269 static inline pte_t pte_mkread(pte_t pte)
270 {
271  pte_val(pte) |= __PAGE_PROT_USER;
272  if (pte_write(pte))
273  pte_val(pte) |= __PAGE_PROT_UWAUX;
274  return pte;
275 }
276 static inline pte_t pte_mkwrite(pte_t pte)
277 {
278  pte_val(pte) |= __PAGE_PROT_WRITE;
279  if (pte_val(pte) & __PAGE_PROT_USER)
280  pte_val(pte) |= __PAGE_PROT_UWAUX;
281  return pte;
282 }
283 
284 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
285 
286 #define pte_ERROR(e) \
287  printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
288  __FILE__, __LINE__, pte_val(e))
289 #define pgd_ERROR(e) \
290  printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
291  __FILE__, __LINE__, pgd_val(e))
292 
293 /*
294  * The "pgd_xxx()" functions here are trivial for a folded two-level
295  * setup: the pgd is never bad, and a pmd always exists (as it's folded
296  * into the pgd entry)
297  */
298 #define pgd_clear(xp) do { } while (0)
299 
300 /*
301  * Certain architectures need to do special things when PTEs
302  * within a page table are directly modified. Thus, the following
303  * hook is made available.
304  */
305 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
306 #define set_pte_at(mm, addr, ptep, pteval) set_pte((ptep), (pteval))
307 #define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval))
308 
309 /*
310  * (pmds are folded into pgds so this doesn't get actually called,
311  * but the define is needed for a generic inline function.)
312  */
313 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
314 
315 #define ptep_get_and_clear(mm, addr, ptep) \
316  __pte(xchg(&(ptep)->pte, 0))
317 #define pte_same(a, b) (pte_val(a) == pte_val(b))
318 #define pte_page(x) pfn_to_page(pte_pfn(x))
319 #define pte_none(x) (!pte_val(x))
320 #define pte_pfn(x) ((unsigned long) (pte_val(x) >> PAGE_SHIFT))
321 #define __pfn_addr(pfn) ((pfn) << PAGE_SHIFT)
322 #define pfn_pte(pfn, prot) __pte(__pfn_addr(pfn) | pgprot_val(prot))
323 #define pfn_pmd(pfn, prot) __pmd(__pfn_addr(pfn) | pgprot_val(prot))
324 
325 /*
326  * All present user pages are user-executable:
327  */
328 static inline int pte_exec(pte_t pte)
329 {
330  return pte_user(pte);
331 }
332 
333 /*
334  * All present pages are kernel-executable:
335  */
336 static inline int pte_exec_kernel(pte_t pte)
337 {
338  return 1;
339 }
340 
341 #define PTE_FILE_MAX_BITS 30
342 
343 #define pte_to_pgoff(pte) (pte_val(pte) >> 2)
344 #define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE)
345 
346 /* Encode and de-code a swap entry */
347 #define __swp_type(x) (((x).val >> 2) & 0x3f)
348 #define __swp_offset(x) ((x).val >> 8)
349 #define __swp_entry(type, offset) \
350  ((swp_entry_t) { ((type) << 2) | ((offset) << 8) })
351 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
352 #define __swp_entry_to_pte(x) __pte((x).val)
353 
354 static inline
355 int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr,
356  pte_t *ptep)
357 {
358  if (!pte_dirty(*ptep))
359  return 0;
360  return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
361 }
362 
363 static inline
364 int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
365  pte_t *ptep)
366 {
367  if (!pte_young(*ptep))
368  return 0;
369  return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
370 }
371 
372 static inline
373 void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
374 {
376 }
377 
378 static inline void ptep_mkdirty(pte_t *ptep)
379 {
380  set_bit(_PAGE_BIT_DIRTY, &ptep->pte);
381 }
382 
383 /*
384  * Macro to mark a page protection value as "uncacheable". On processors which
385  * do not support it, this is a no-op.
386  */
387 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
388 
389 /*
390  * Macro to mark a page protection value as "Write-Through".
391  * On processors which do not support it, this is a no-op.
392  */
393 #define pgprot_through(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
394 
395 /*
396  * Conversion functions: convert a page and protection to a page entry,
397  * and a page entry and page directory to the page they refer to.
398  */
399 
400 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
401 #define mk_pte_huge(entry) \
402  ((entry).pte |= _PAGE_PRESENT | _PAGE_PSE | _PAGE_VALID)
403 
404 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
405 {
406  pte_val(pte) &= _PAGE_CHG_MASK;
407  pte_val(pte) |= pgprot_val(newprot);
408  return pte;
409 }
410 
411 #define page_pte(page) page_pte_prot((page), __pgprot(0))
412 
413 #define pmd_page_kernel(pmd) \
414  ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
415 
416 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
417 
418 #define pmd_large(pmd) \
419  ((pmd_val(pmd) & (_PAGE_PSE | _PAGE_PRESENT)) == \
420  (_PAGE_PSE | _PAGE_PRESENT))
421 
422 /*
423  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
424  *
425  * this macro returns the index of the entry in the pgd page which would
426  * control the given virtual address
427  */
428 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
429 
430 /*
431  * pgd_offset() returns a (pgd_t *)
432  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
433  */
434 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
435 
436 /*
437  * a shortcut which implies the use of the kernel's pgd, instead
438  * of a process's
439  */
440 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
441 
442 /*
443  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
444  *
445  * this macro returns the index of the entry in the pmd page which would
446  * control the given virtual address
447  */
448 #define pmd_index(address) \
449  (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
450 
451 /*
452  * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
453  *
454  * this macro returns the index of the entry in the pte page which would
455  * control the given virtual address
456  */
457 #define pte_index(address) \
458  (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
459 
460 #define pte_offset_kernel(dir, address) \
461  ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
462 
463 /*
464  * Make a given kernel text page executable/non-executable.
465  * Returns the previous executability setting of that page (which
466  * is used to restore the previous state). Used by the SMP bootup code.
467  * NOTE: this is an __init function for security reasons.
468  */
469 static inline int set_kernel_exec(unsigned long vaddr, int enable)
470 {
471  return 0;
472 }
473 
474 #define pte_offset_map(dir, address) \
475  ((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address))
476 #define pte_unmap(pte) do {} while (0)
477 
478 /*
479  * The MN10300 has external MMU info in the form of a TLB: this is adapted from
480  * the kernel page tables containing the necessary information by tlb-mn10300.S
481  */
482 extern void update_mmu_cache(struct vm_area_struct *vma,
483  unsigned long address, pte_t *ptep);
484 
485 #endif /* !__ASSEMBLY__ */
486 
487 #define kern_addr_valid(addr) (1)
488 
489 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
490  remap_pfn_range((vma), (vaddr), (pfn), (size), (prot))
491 
492 #define MK_IOSPACE_PFN(space, pfn) (pfn)
493 #define GET_IOSPACE(pfn) 0
494 #define GET_PFN(pfn) (pfn)
495 
496 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
497 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
498 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
499 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
500 #define __HAVE_ARCH_PTEP_MKDIRTY
501 #define __HAVE_ARCH_PTE_SAME
502 #include <asm-generic/pgtable.h>
503 
504 #endif /* !__ASSEMBLY__ */
505 
506 #endif /* _ASM_PGTABLE_H */