Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pgtable.h
Go to the documentation of this file.
1 /*
2  * include/asm-xtensa/pgtable.h
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * Copyright (C) 2001 - 2007 Tensilica Inc.
9  */
10 
11 #ifndef _XTENSA_PGTABLE_H
12 #define _XTENSA_PGTABLE_H
13 
15 #include <asm/page.h>
16 
17 /*
18  * We only use two ring levels, user and kernel space.
19  */
20 
21 #define USER_RING 1 /* user ring level */
22 #define KERNEL_RING 0 /* kernel ring level */
23 
24 /*
25  * The Xtensa architecture port of Linux has a two-level page table system,
26  * i.e. the logical three-level Linux page table layout is folded.
27  * Each task has the following memory page tables:
28  *
29  * PGD table (page directory), ie. 3rd-level page table:
30  * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
31  * (Architectures that don't have the PMD folded point to the PMD tables)
32  *
33  * The pointer to the PGD table for a given task can be retrieved from
34  * the task structure (struct task_struct*) t, e.g. current():
35  * (t->mm ? t->mm : t->active_mm)->pgd
36  *
37  * PMD tables (page middle-directory), ie. 2nd-level page tables:
38  * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
39  *
40  * PTE tables (page table entry), ie. 1st-level page tables:
41  * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
42  * invalid_pte_table for absent mappings.
43  *
44  * The individual pages are 4 kB big with special pages for the empty_zero_page.
45  */
46 
47 #define PGDIR_SHIFT 22
48 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
49 #define PGDIR_MASK (~(PGDIR_SIZE-1))
50 
51 /*
52  * Entries per page directory level: we use two-level, so
53  * we don't really have any PMD directory physically.
54  */
55 #define PTRS_PER_PTE 1024
56 #define PTRS_PER_PTE_SHIFT 10
57 #define PTRS_PER_PGD 1024
58 #define PGD_ORDER 0
59 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
60 #define FIRST_USER_ADDRESS 0
61 #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
62 
63 /*
64  * Virtual memory area. We keep a distance to other memory regions to be
65  * on the safe side. We also use this area for cache aliasing.
66  */
67 
68 #define VMALLOC_START 0xC0000000
69 #define VMALLOC_END 0xC7FEFFFF
70 #define TLBTEMP_BASE_1 0xC7FF0000
71 #define TLBTEMP_BASE_2 0xC7FF8000
72 
73 /*
74  * Xtensa Linux config PTE layout (when present):
75  * 31-12: PPN
76  * 11-6: Software
77  * 5-4: RING
78  * 3-0: CA
79  *
80  * Similar to the Alpha and MIPS ports, we need to keep track of the ref
81  * and mod bits in software. We have a software "you can read
82  * from this page" bit, and a hardware one which actually lets the
83  * process read from the page. On the same token we have a software
84  * writable bit and the real hardware one which actually lets the
85  * process write to the page.
86  *
87  * See further below for PTE layout for swapped-out pages.
88  */
89 
90 #define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */
91 #define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */
92 
93 #define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */
94 #define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */
95 
96 /* None of these cache modes include MP coherency: */
97 #define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */
98 #define _PAGE_CA_WB (1<<2) /* write-back */
99 #define _PAGE_CA_WT (2<<2) /* write-through */
100 #define _PAGE_CA_MASK (3<<2)
101 #define _PAGE_INVALID (3<<2)
102 
103 #define _PAGE_USER (1<<4) /* user access (ring=1) */
104 
105 /* Software */
106 #define _PAGE_WRITABLE_BIT 6
107 #define _PAGE_WRITABLE (1<<6) /* software: page writable */
108 #define _PAGE_DIRTY (1<<7) /* software: page dirty */
109 #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
110 
111 /* On older HW revisions, we always have to set bit 0 */
112 #if XCHAL_HW_VERSION_MAJOR < 2000
113 # define _PAGE_VALID (1<<0)
114 #else
115 # define _PAGE_VALID 0
116 #endif
117 
118 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
119 #define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED)
120 
121 #ifdef CONFIG_MMU
122 
123 #define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE)
124 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
125 #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
126 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
127 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC)
128 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE)
129 #define PAGE_SHARED_EXEC \
130  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC)
131 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE)
132 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC)
133 
134 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
135 # define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED)
136 #else
137 # define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB)
138 #endif
139 
140 #else /* no mmu */
141 
142 # define PAGE_NONE __pgprot(0)
143 # define PAGE_SHARED __pgprot(0)
144 # define PAGE_COPY __pgprot(0)
145 # define PAGE_READONLY __pgprot(0)
146 # define PAGE_KERNEL __pgprot(0)
147 
148 #endif
149 
150 /*
151  * On certain configurations of Xtensa MMUs (eg. the initial Linux config),
152  * the MMU can't do page protection for execute, and considers that the same as
153  * read. Also, write permissions may imply read permissions.
154  * What follows is the closest we can get by reasonable means..
155  * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
156  */
157 #define __P000 PAGE_NONE /* private --- */
158 #define __P001 PAGE_READONLY /* private --r */
159 #define __P010 PAGE_COPY /* private -w- */
160 #define __P011 PAGE_COPY /* private -wr */
161 #define __P100 PAGE_READONLY_EXEC /* private x-- */
162 #define __P101 PAGE_READONLY_EXEC /* private x-r */
163 #define __P110 PAGE_COPY_EXEC /* private xw- */
164 #define __P111 PAGE_COPY_EXEC /* private xwr */
165 
166 #define __S000 PAGE_NONE /* shared --- */
167 #define __S001 PAGE_READONLY /* shared --r */
168 #define __S010 PAGE_SHARED /* shared -w- */
169 #define __S011 PAGE_SHARED /* shared -wr */
170 #define __S100 PAGE_READONLY_EXEC /* shared x-- */
171 #define __S101 PAGE_READONLY_EXEC /* shared x-r */
172 #define __S110 PAGE_SHARED_EXEC /* shared xw- */
173 #define __S111 PAGE_SHARED_EXEC /* shared xwr */
174 
175 #ifndef __ASSEMBLY__
176 
177 #define pte_ERROR(e) \
178  printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
179 #define pgd_ERROR(e) \
180  printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
181 
182 extern unsigned long empty_zero_page[1024];
183 
184 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
185 
186 #ifdef CONFIG_MMU
187 extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
188 extern void paging_init(void);
189 extern void pgtable_cache_init(void);
190 #else
191 # define swapper_pg_dir NULL
192 static inline void paging_init(void) { }
193 static inline void pgtable_cache_init(void) { }
194 #endif
195 
196 /*
197  * The pmd contains the kernel virtual address of the pte page.
198  */
199 #define pmd_page_vaddr(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
200 #define pmd_page(pmd) virt_to_page(pmd_val(pmd))
201 
202 /*
203  * pte status.
204  */
205 #define pte_none(pte) (pte_val(pte) == _PAGE_INVALID)
206 #define pte_present(pte) \
207  (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \
208  || ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE))
209 #define pte_clear(mm,addr,ptep) \
210  do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0)
211 
212 #define pmd_none(pmd) (!pmd_val(pmd))
213 #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
214 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
215 #define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
216 
217 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
218 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
219 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
220 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
221 static inline int pte_special(pte_t pte) { return 0; }
222 
223 static inline pte_t pte_wrprotect(pte_t pte)
224  { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
225 static inline pte_t pte_mkclean(pte_t pte)
226  { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
227 static inline pte_t pte_mkold(pte_t pte)
228  { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
229 static inline pte_t pte_mkdirty(pte_t pte)
230  { pte_val(pte) |= _PAGE_DIRTY; return pte; }
231 static inline pte_t pte_mkyoung(pte_t pte)
232  { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
233 static inline pte_t pte_mkwrite(pte_t pte)
234  { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
235 static inline pte_t pte_mkspecial(pte_t pte)
236  { return pte; }
237 
238 /*
239  * Conversion functions: convert a page and protection to a page entry,
240  * and a page entry and page directory to the page they refer to.
241  */
242 
243 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
244 #define pte_same(a,b) (pte_val(a) == pte_val(b))
245 #define pte_page(x) pfn_to_page(pte_pfn(x))
246 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
247 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
248 
249 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
250 {
251  return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
252 }
253 
254 /*
255  * Certain architectures need to do special things when pte's
256  * within a page table are directly modified. Thus, the following
257  * hook is made available.
258  */
259 static inline void update_pte(pte_t *ptep, pte_t pteval)
260 {
261  *ptep = pteval;
262 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
263  __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep));
264 #endif
265 
266 }
267 
268 struct mm_struct;
269 
270 static inline void
271 set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
272 {
273  update_pte(ptep, pteval);
274 }
275 
276 
277 static inline void
278 set_pmd(pmd_t *pmdp, pmd_t pmdval)
279 {
280  *pmdp = pmdval;
281 }
282 
283 struct vm_area_struct;
284 
285 static inline int
286 ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
287  pte_t *ptep)
288 {
289  pte_t pte = *ptep;
290  if (!pte_young(pte))
291  return 0;
292  update_pte(ptep, pte_mkold(pte));
293  return 1;
294 }
295 
296 static inline pte_t
297 ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
298 {
299  pte_t pte = *ptep;
300  pte_clear(mm, addr, ptep);
301  return pte;
302 }
303 
304 static inline void
305 ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
306 {
307  pte_t pte = *ptep;
308  update_pte(ptep, pte_wrprotect(pte));
309 }
310 
311 /* to find an entry in a kernel page-table-directory */
312 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
313 
314 /* to find an entry in a page-table-directory */
315 #define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
316 
317 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
318 
319 /* Find an entry in the second-level page table.. */
320 #define pmd_offset(dir,address) ((pmd_t*)(dir))
321 
322 /* Find an entry in the third-level page table.. */
323 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
324 #define pte_offset_kernel(dir,addr) \
325  ((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
326 #define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
327 #define pte_unmap(pte) do { } while (0)
328 
329 
330 /*
331  * Encode and decode a swap entry.
332  *
333  * Format of swap pte:
334  * bit 0 MBZ
335  * bit 1 page-file (must be zero)
336  * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
337  * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
338  * bits 6 - 10 swap type (5 bits -> 32 types)
339  * bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB)
340 
341  * Format of file pte:
342  * bit 0 MBZ
343  * bit 1 page-file (must be one: _PAGE_FILE)
344  * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID)
345  * bits 4 - 5 ring protection (must be 01: _PAGE_USER)
346  * bits 6 - 31 file offset / PAGE_SIZE
347  */
348 
349 #define __swp_type(entry) (((entry).val >> 6) & 0x1f)
350 #define __swp_offset(entry) ((entry).val >> 11)
351 #define __swp_entry(type,offs) \
352  ((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID})
353 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
354 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
355 
356 #define PTE_FILE_MAX_BITS 28
357 #define pte_to_pgoff(pte) (pte_val(pte) >> 4)
358 #define pgoff_to_pte(off) \
359  ((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE })
360 
361 #endif /* !defined (__ASSEMBLY__) */
362 
363 
364 #ifdef __ASSEMBLY__
365 
366 /* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
367  * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
368  * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
369  * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
370  *
371  * Note: We require an additional temporary register which can be the same as
372  * the register that holds the address.
373  *
374  * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
375  *
376  */
377 #define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
378 #define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
379 
380 #define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
381  _PGD_INDEX(tmp, adr); \
382  addx4 mm, tmp, mm
383 
384 #define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
385  srli pmd, pmd, PAGE_SHIFT; \
386  slli pmd, pmd, PAGE_SHIFT; \
387  addx4 pmd, tmp, pmd
388 
389 #else
390 
391 #define kern_addr_valid(addr) (1)
392 
393 extern void update_mmu_cache(struct vm_area_struct * vma,
394  unsigned long address, pte_t *ptep);
395 
396 /*
397  * remap a physical page `pfn' of size `size' with page protection `prot'
398  * into virtual address `from'
399  */
400 
401 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
402  remap_pfn_range(vma, from, pfn, size, prot)
403 
404 typedef pte_t *pte_addr_t;
405 
406 #endif /* !defined (__ASSEMBLY__) */
407 
408 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
409 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
410 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
411 #define __HAVE_ARCH_PTEP_MKDIRTY
412 #define __HAVE_ARCH_PTE_SAME
413 
414 #include <asm-generic/pgtable.h>
415 
416 #endif /* _XTENSA_PGTABLE_H */