Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
alpha
include
asm
pgtable.h
Go to the documentation of this file.
1
#ifndef _ALPHA_PGTABLE_H
2
#define _ALPHA_PGTABLE_H
3
4
#include <
asm-generic/4level-fixup.h
>
5
6
/*
7
* This file contains the functions and defines necessary to modify and use
8
* the Alpha page table tree.
9
*
10
* This hopefully works with any standard Alpha page-size, as defined
11
* in <asm/page.h> (currently 8192).
12
*/
13
#include <
linux/mmzone.h
>
14
15
#include <asm/page.h>
16
#include <asm/processor.h>
/* For TASK_SIZE */
17
#include <asm/machvec.h>
18
#include <asm/setup.h>
19
20
struct
mm_struct
;
21
struct
vm_area_struct
;
22
23
/* Certain architectures need to do special things when PTEs
24
* within a page table are directly modified. Thus, the following
25
* hook is made available.
26
*/
27
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
28
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
29
30
/* PMD_SHIFT determines the size of the area a second-level page table can map */
31
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
32
#define PMD_SIZE (1UL << PMD_SHIFT)
33
#define PMD_MASK (~(PMD_SIZE-1))
34
35
/* PGDIR_SHIFT determines what a third-level page table entry can map */
36
#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
37
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
38
#define PGDIR_MASK (~(PGDIR_SIZE-1))
39
40
/*
41
* Entries per page directory level: the Alpha is three-level, with
42
* all levels having a one-page page table.
43
*/
44
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
45
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
46
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
47
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
48
#define FIRST_USER_ADDRESS 0
49
50
/* Number of pointers that fit on a page: this will go away. */
51
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
52
53
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
54
#define VMALLOC_START 0xfffffe0000000000
55
#else
56
#define VMALLOC_START (-2*PGDIR_SIZE)
57
#endif
58
#define VMALLOC_END (-PGDIR_SIZE)
59
60
/*
61
* OSF/1 PAL-code-imposed page table bits
62
*/
63
#define _PAGE_VALID 0x0001
64
#define _PAGE_FOR 0x0002
/* used for page protection (fault on read) */
65
#define _PAGE_FOW 0x0004
/* used for page protection (fault on write) */
66
#define _PAGE_FOE 0x0008
/* used for page protection (fault on exec) */
67
#define _PAGE_ASM 0x0010
68
#define _PAGE_KRE 0x0100
/* xxx - see below on the "accessed" bit */
69
#define _PAGE_URE 0x0200
/* xxx */
70
#define _PAGE_KWE 0x1000
/* used to do the dirty bit in software */
71
#define _PAGE_UWE 0x2000
/* used to do the dirty bit in software */
72
73
/* .. and these are ours ... */
74
#define _PAGE_DIRTY 0x20000
75
#define _PAGE_ACCESSED 0x40000
76
#define _PAGE_FILE 0x80000
/* set:pagecache, unset:swap */
77
78
/*
79
* NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
80
* by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
81
* Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
82
* the KRE/URE bits to watch for it. That way we don't need to overload the
83
* KWE/UWE bits with both handling dirty and accessed.
84
*
85
* Note that the kernel uses the accessed bit just to check whether to page
86
* out a page or not, so it doesn't have to be exact anyway.
87
*/
88
89
#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
90
#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
91
92
#define _PFN_MASK 0xFFFFFFFF00000000UL
93
94
#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
95
#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
96
97
/*
98
* All the normal masks have the "page accessed" bits on, as any time they are used,
99
* the page is accessed. They are cleared only by the page-out routines
100
*/
101
#define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
102
#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
103
#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
104
#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
105
#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
106
107
#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
108
109
#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
110
#define _PAGE_S(x) _PAGE_NORMAL(x)
111
112
/*
113
* The hardware can handle write-only mappings, but as the Alpha
114
* architecture does byte-wide writes with a read-modify-write
115
* sequence, it's not practical to have write-without-read privs.
116
* Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
117
* arch/alpha/mm/fault.c)
118
*/
119
/* xwr */
120
#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
121
#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
122
#define __P010 _PAGE_P(_PAGE_FOE)
123
#define __P011 _PAGE_P(_PAGE_FOE)
124
#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
125
#define __P101 _PAGE_P(_PAGE_FOW)
126
#define __P110 _PAGE_P(0)
127
#define __P111 _PAGE_P(0)
128
129
#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
130
#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
131
#define __S010 _PAGE_S(_PAGE_FOE)
132
#define __S011 _PAGE_S(_PAGE_FOE)
133
#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
134
#define __S101 _PAGE_S(_PAGE_FOW)
135
#define __S110 _PAGE_S(0)
136
#define __S111 _PAGE_S(0)
137
138
/*
139
* pgprot_noncached() is only for infiniband pci support, and a real
140
* implementation for RAM would be more complicated.
141
*/
142
#define pgprot_noncached(prot) (prot)
143
144
/*
145
* BAD_PAGETABLE is used when we need a bogus page-table, while
146
* BAD_PAGE is used for a bogus page.
147
*
148
* ZERO_PAGE is a global shared page that is always zero: used
149
* for zero-mapped memory areas etc..
150
*/
151
extern
pte_t
__bad_page
(
void
);
152
extern
pmd_t
*
__bad_pagetable
(
void
);
153
154
extern
unsigned
long
__zero_page
(
void
);
155
156
#define BAD_PAGETABLE __bad_pagetable()
157
#define BAD_PAGE __bad_page()
158
#define ZERO_PAGE(vaddr) (virt_to_page(ZERO_PGE))
159
160
/* number of bits that fit into a memory pointer */
161
#define BITS_PER_PTR (8*sizeof(unsigned long))
162
163
/* to align the pointer to a pointer address */
164
#define PTR_MASK (~(sizeof(void*)-1))
165
166
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
167
#define SIZEOF_PTR_LOG2 3
168
169
/* to find an entry in a page-table */
170
#define PAGE_PTR(address) \
171
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
172
173
/*
174
* On certain platforms whose physical address space can overlap KSEG,
175
* namely EV6 and above, we must re-twiddle the physaddr to restore the
176
* correct high-order bits.
177
*
178
* This is extremely confusing until you realize that this is actually
179
* just working around a userspace bug. The X server was intending to
180
* provide the physical address but instead provided the KSEG address.
181
* Or tried to, except it's not representable.
182
*
183
* On Tsunami there's nothing meaningful at 0x40000000000, so this is
184
* a safe thing to do. Come the first core logic that does put something
185
* in this area -- memory or whathaveyou -- then this hack will have
186
* to go away. So be prepared!
187
*/
188
189
#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
190
#error "EV6-only feature in a generic kernel"
191
#endif
192
#if defined(CONFIG_ALPHA_GENERIC) || \
193
(defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
194
#define KSEG_PFN (0xc0000000000UL >> PAGE_SHIFT)
195
#define PHYS_TWIDDLE(pfn) \
196
((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
197
? ((pfn) ^= KSEG_PFN) : (pfn))
198
#else
199
#define PHYS_TWIDDLE(pfn) (pfn)
200
#endif
201
202
/*
203
* Conversion functions: convert a page and protection to a page entry,
204
* and a page entry and page directory to the page they refer to.
205
*/
206
#ifndef CONFIG_DISCONTIGMEM
207
#define page_to_pa(page) (((page) - mem_map) << PAGE_SHIFT)
208
209
#define pte_pfn(pte) (pte_val(pte) >> 32)
210
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
211
#define mk_pte(page, pgprot) \
212
({ \
213
pte_t pte; \
214
\
215
pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
216
pte; \
217
})
218
#endif
219
220
extern
inline
pte_t
pfn_pte
(
unsigned
long
physpfn,
pgprot_t
pgprot
)
221
{
pte_t
pte
;
pte_val
(pte) = (
PHYS_TWIDDLE
(physpfn) << 32) |
pgprot_val
(pgprot);
return
pte
; }
222
223
extern
inline
pte_t
pte_modify
(
pte_t
pte
,
pgprot_t
newprot)
224
{
pte_val
(pte) = (
pte_val
(pte) &
_PAGE_CHG_MASK
) |
pgprot_val
(newprot);
return
pte
; }
225
226
extern
inline
void
pmd_set
(
pmd_t
* pmdp,
pte_t
* ptep)
227
{
pmd_val
(*pmdp) =
_PAGE_TABLE
| ((((
unsigned
long
) ptep) -
PAGE_OFFSET
) << (32-
PAGE_SHIFT
)); }
228
229
extern
inline
void
pgd_set
(
pgd_t
* pgdp,
pmd_t
* pmdp)
230
{
pgd_val
(*pgdp) =
_PAGE_TABLE
| ((((
unsigned
long
) pmdp) -
PAGE_OFFSET
) << (32-
PAGE_SHIFT
)); }
231
232
233
extern
inline
unsigned
long
234
pmd_page_vaddr
(
pmd_t
pmd
)
235
{
236
return
((
pmd_val
(pmd) &
_PFN_MASK
) >> (32-
PAGE_SHIFT
)) +
PAGE_OFFSET
;
237
}
238
239
#ifndef CONFIG_DISCONTIGMEM
240
#define pmd_page(pmd) (mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
241
#define pgd_page(pgd) (mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32))
242
#endif
243
244
extern
inline
unsigned
long
pgd_page_vaddr
(
pgd_t
pgd)
245
{
return
PAGE_OFFSET
+ ((
pgd_val
(pgd) &
_PFN_MASK
) >> (32-
PAGE_SHIFT
)); }
246
247
extern
inline
int
pte_none
(
pte_t
pte
) {
return
!
pte_val
(pte); }
248
extern
inline
int
pte_present
(
pte_t
pte
) {
return
pte_val
(pte) &
_PAGE_VALID
; }
249
extern
inline
void
pte_clear
(
struct
mm_struct
*mm,
unsigned
long
addr
,
pte_t
*ptep)
250
{
251
pte_val
(*ptep) = 0;
252
}
253
254
extern
inline
int
pmd_none
(
pmd_t
pmd
) {
return
!
pmd_val
(pmd); }
255
extern
inline
int
pmd_bad
(
pmd_t
pmd
) {
return
(
pmd_val
(pmd) & ~
_PFN_MASK
) !=
_PAGE_TABLE
; }
256
extern
inline
int
pmd_present
(
pmd_t
pmd
) {
return
pmd_val
(pmd) &
_PAGE_VALID
; }
257
extern
inline
void
pmd_clear
(
pmd_t
* pmdp) {
pmd_val
(*pmdp) = 0; }
258
259
extern
inline
int
pgd_none
(
pgd_t
pgd) {
return
!
pgd_val
(pgd); }
260
extern
inline
int
pgd_bad
(
pgd_t
pgd) {
return
(
pgd_val
(pgd) & ~
_PFN_MASK
) !=
_PAGE_TABLE
; }
261
extern
inline
int
pgd_present
(
pgd_t
pgd) {
return
pgd_val
(pgd) &
_PAGE_VALID
; }
262
extern
inline
void
pgd_clear
(
pgd_t
* pgdp) {
pgd_val
(*pgdp) = 0; }
263
264
/*
265
* The following only work if pte_present() is true.
266
* Undefined behaviour if not..
267
*/
268
extern
inline
int
pte_write
(
pte_t
pte
) {
return
!(
pte_val
(pte) &
_PAGE_FOW
); }
269
extern
inline
int
pte_dirty
(
pte_t
pte
) {
return
pte_val
(pte) &
_PAGE_DIRTY
; }
270
extern
inline
int
pte_young
(
pte_t
pte
) {
return
pte_val
(pte) &
_PAGE_ACCESSED
; }
271
extern
inline
int
pte_file
(
pte_t
pte
) {
return
pte_val
(pte) &
_PAGE_FILE
; }
272
extern
inline
int
pte_special
(
pte_t
pte
) {
return
0; }
273
274
extern
inline
pte_t
pte_wrprotect
(
pte_t
pte
) {
pte_val
(pte) |=
_PAGE_FOW
;
return
pte
; }
275
extern
inline
pte_t
pte_mkclean
(
pte_t
pte
) {
pte_val
(pte) &= ~(
__DIRTY_BITS
);
return
pte
; }
276
extern
inline
pte_t
pte_mkold
(
pte_t
pte
) {
pte_val
(pte) &= ~(
__ACCESS_BITS
);
return
pte
; }
277
extern
inline
pte_t
pte_mkwrite
(
pte_t
pte
) {
pte_val
(pte) &= ~
_PAGE_FOW
;
return
pte
; }
278
extern
inline
pte_t
pte_mkdirty
(
pte_t
pte
) {
pte_val
(pte) |=
__DIRTY_BITS
;
return
pte
; }
279
extern
inline
pte_t
pte_mkyoung
(
pte_t
pte
) {
pte_val
(pte) |=
__ACCESS_BITS
;
return
pte
; }
280
extern
inline
pte_t
pte_mkspecial
(
pte_t
pte
) {
return
pte
; }
281
282
#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
283
284
/* to find an entry in a kernel page-table-directory */
285
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
286
287
/* to find an entry in a page-table-directory. */
288
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
289
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
290
291
/*
292
* The smp_read_barrier_depends() in the following functions are required to
293
* order the load of *dir (the pointer in the top level page table) with any
294
* subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
295
*
296
* If this ordering is not enforced, the CPU might load an older value of
297
* *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
298
* more details.
299
*
300
* Note that we never change the mm->pgd pointer after the task is running, so
301
* pgd_offset does not require such a barrier.
302
*/
303
304
/* Find an entry in the second-level page table.. */
305
extern
inline
pmd_t
*
pmd_offset
(
pgd_t
* dir,
unsigned
long
address
)
306
{
307
pmd_t
*
ret
= (
pmd_t
*)
pgd_page_vaddr
(*dir) + ((address >>
PMD_SHIFT
) & (
PTRS_PER_PAGE
- 1));
308
smp_read_barrier_depends
();
/* see above */
309
return
ret
;
310
}
311
312
/* Find an entry in the third-level page table.. */
313
extern
inline
pte_t
*
pte_offset_kernel
(
pmd_t
* dir,
unsigned
long
address
)
314
{
315
pte_t
*
ret
= (
pte_t
*)
pmd_page_vaddr
(*dir)
316
+ ((address >>
PAGE_SHIFT
) & (
PTRS_PER_PAGE
- 1));
317
smp_read_barrier_depends
();
/* see above */
318
return
ret
;
319
}
320
321
#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
322
#define pte_unmap(pte) do { } while (0)
323
324
extern
pgd_t
swapper_pg_dir
[1024];
325
326
/*
327
* The Alpha doesn't have any external MMU info: the kernel page
328
* tables contain all the necessary information.
329
*/
330
extern
inline
void
update_mmu_cache
(
struct
vm_area_struct
* vma,
331
unsigned
long
address
,
pte_t
*ptep)
332
{
333
}
334
335
/*
336
* Non-present pages: high 24 bits are offset, next 8 bits type,
337
* low 32 bits zero.
338
*/
339
extern
inline
pte_t
mk_swap_pte
(
unsigned
long
type
,
unsigned
long
offset
)
340
{
pte_t
pte
;
pte_val
(pte) = (type << 32) | (offset << 40);
return
pte
; }
341
342
#define __swp_type(x) (((x).val >> 32) & 0xff)
343
#define __swp_offset(x) ((x).val >> 40)
344
#define __swp_entry(type, off) ((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
345
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
346
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
347
348
#define pte_to_pgoff(pte) (pte_val(pte) >> 32)
349
#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
350
351
#define PTE_FILE_MAX_BITS 32
352
353
#ifndef CONFIG_DISCONTIGMEM
354
#define kern_addr_valid(addr) (1)
355
#endif
356
357
#define io_remap_pfn_range(vma, start, pfn, size, prot) \
358
remap_pfn_range(vma, start, pfn, size, prot)
359
360
#define pte_ERROR(e) \
361
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
362
#define pmd_ERROR(e) \
363
printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
364
#define pgd_ERROR(e) \
365
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
366
367
extern
void
paging_init
(
void
);
368
369
#include <
asm-generic/pgtable.h
>
370
371
/*
372
* No page table caches to initialise
373
*/
374
#define pgtable_cache_init() do { } while (0)
375
376
/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
377
#define HAVE_ARCH_UNMAPPED_AREA
378
379
#endif
/* _ALPHA_PGTABLE_H */
Generated on Thu Jan 10 2013 12:50:13 for Linux Kernel by
1.8.2