Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
m68k
include
asm
mcf_pgtable.h
Go to the documentation of this file.
1
#ifndef _MCF_PGTABLE_H
2
#define _MCF_PGTABLE_H
3
4
#include <
asm/mcfmmu.h
>
5
#include <asm/page.h>
6
7
/*
8
* MMUDR bits, in proper place. We write these directly into the MMUDR
9
* after masking from the pte.
10
*/
11
#define CF_PAGE_LOCKED MMUDR_LK
/* 0x00000002 */
12
#define CF_PAGE_EXEC MMUDR_X
/* 0x00000004 */
13
#define CF_PAGE_WRITABLE MMUDR_W
/* 0x00000008 */
14
#define CF_PAGE_READABLE MMUDR_R
/* 0x00000010 */
15
#define CF_PAGE_SYSTEM MMUDR_SP
/* 0x00000020 */
16
#define CF_PAGE_COPYBACK MMUDR_CM_CCB
/* 0x00000040 */
17
#define CF_PAGE_NOCACHE MMUDR_CM_NCP
/* 0x00000080 */
18
19
#define CF_CACHEMASK (~MMUDR_CM_CCB)
20
#define CF_PAGE_MMUDR_MASK 0x000000fe
21
22
#define _PAGE_NOCACHE030 CF_PAGE_NOCACHE
23
24
/*
25
* MMUTR bits, need shifting down.
26
*/
27
#define CF_PAGE_MMUTR_MASK 0x00000c00
28
#define CF_PAGE_MMUTR_SHIFT 10
29
30
#define CF_PAGE_VALID (MMUTR_V << CF_PAGE_MMUTR_SHIFT)
31
#define CF_PAGE_SHARED (MMUTR_SG << CF_PAGE_MMUTR_SHIFT)
32
33
/*
34
* Fake bits, not implemented in CF, will get masked out before
35
* hitting hardware.
36
*/
37
#define CF_PAGE_DIRTY 0x00000001
38
#define CF_PAGE_FILE 0x00000200
39
#define CF_PAGE_ACCESSED 0x00001000
40
41
#define _PAGE_CACHE040 0x020
/* 68040 cache mode, cachable, copyback */
42
#define _PAGE_NOCACHE_S 0x040
/* 68040 no-cache mode, serialized */
43
#define _PAGE_NOCACHE 0x060
/* 68040 cache mode, non-serialized */
44
#define _PAGE_CACHE040W 0x000
/* 68040 cache mode, cachable, write-through */
45
#define _DESCTYPE_MASK 0x003
46
#define _CACHEMASK040 (~0x060)
47
#define _PAGE_GLOBAL040 0x400
/* 68040 global bit, used for kva descs */
48
49
/*
50
* Externally used page protection values.
51
*/
52
#define _PAGE_PRESENT (CF_PAGE_VALID)
53
#define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
54
#define _PAGE_DIRTY (CF_PAGE_DIRTY)
55
#define _PAGE_READWRITE (CF_PAGE_READABLE \
56
| CF_PAGE_WRITABLE \
57
| CF_PAGE_SYSTEM \
58
| CF_PAGE_SHARED)
59
60
/*
61
* Compound page protection values.
62
*/
63
#define PAGE_NONE __pgprot(CF_PAGE_VALID \
64
| CF_PAGE_ACCESSED)
65
66
#define PAGE_SHARED __pgprot(CF_PAGE_VALID \
67
| CF_PAGE_ACCESSED \
68
| CF_PAGE_SHARED)
69
70
#define PAGE_INIT __pgprot(CF_PAGE_VALID \
71
| CF_PAGE_READABLE \
72
| CF_PAGE_WRITABLE \
73
| CF_PAGE_EXEC \
74
| CF_PAGE_SYSTEM)
75
76
#define PAGE_KERNEL __pgprot(CF_PAGE_VALID \
77
| CF_PAGE_ACCESSED \
78
| CF_PAGE_READABLE \
79
| CF_PAGE_WRITABLE \
80
| CF_PAGE_EXEC \
81
| CF_PAGE_SYSTEM \
82
| CF_PAGE_SHARED)
83
84
#define PAGE_COPY __pgprot(CF_PAGE_VALID \
85
| CF_PAGE_ACCESSED \
86
| CF_PAGE_READABLE \
87
| CF_PAGE_DIRTY)
88
89
/*
90
* Page protections for initialising protection_map. See mm/mmap.c
91
* for use. In general, the bit positions are xwr, and P-items are
92
* private, the S-items are shared.
93
*/
94
#define __P000 PAGE_NONE
95
#define __P001 __pgprot(CF_PAGE_VALID \
96
| CF_PAGE_ACCESSED \
97
| CF_PAGE_READABLE)
98
#define __P010 __pgprot(CF_PAGE_VALID \
99
| CF_PAGE_ACCESSED \
100
| CF_PAGE_WRITABLE)
101
#define __P011 __pgprot(CF_PAGE_VALID \
102
| CF_PAGE_ACCESSED \
103
| CF_PAGE_READABLE \
104
| CF_PAGE_WRITABLE)
105
#define __P100 __pgprot(CF_PAGE_VALID \
106
| CF_PAGE_ACCESSED \
107
| CF_PAGE_EXEC)
108
#define __P101 __pgprot(CF_PAGE_VALID \
109
| CF_PAGE_ACCESSED \
110
| CF_PAGE_READABLE \
111
| CF_PAGE_EXEC)
112
#define __P110 __pgprot(CF_PAGE_VALID \
113
| CF_PAGE_ACCESSED \
114
| CF_PAGE_WRITABLE \
115
| CF_PAGE_EXEC)
116
#define __P111 __pgprot(CF_PAGE_VALID \
117
| CF_PAGE_ACCESSED \
118
| CF_PAGE_READABLE \
119
| CF_PAGE_WRITABLE \
120
| CF_PAGE_EXEC)
121
122
#define __S000 PAGE_NONE
123
#define __S001 __pgprot(CF_PAGE_VALID \
124
| CF_PAGE_ACCESSED \
125
| CF_PAGE_READABLE)
126
#define __S010 PAGE_SHARED
127
#define __S011 __pgprot(CF_PAGE_VALID \
128
| CF_PAGE_ACCESSED \
129
| CF_PAGE_SHARED \
130
| CF_PAGE_READABLE)
131
#define __S100 __pgprot(CF_PAGE_VALID \
132
| CF_PAGE_ACCESSED \
133
| CF_PAGE_EXEC)
134
#define __S101 __pgprot(CF_PAGE_VALID \
135
| CF_PAGE_ACCESSED \
136
| CF_PAGE_READABLE \
137
| CF_PAGE_EXEC)
138
#define __S110 __pgprot(CF_PAGE_VALID \
139
| CF_PAGE_ACCESSED \
140
| CF_PAGE_SHARED \
141
| CF_PAGE_EXEC)
142
#define __S111 __pgprot(CF_PAGE_VALID \
143
| CF_PAGE_ACCESSED \
144
| CF_PAGE_SHARED \
145
| CF_PAGE_READABLE \
146
| CF_PAGE_EXEC)
147
148
#define PTE_MASK PAGE_MASK
149
#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
150
151
#ifndef __ASSEMBLY__
152
153
/*
154
* Conversion functions: convert a page and protection to a page entry,
155
* and a page entry and page directory to the page they refer to.
156
*/
157
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
158
159
static
inline
pte_t
pte_modify
(
pte_t
pte
,
pgprot_t
newprot)
160
{
161
pte_val
(pte) = (
pte_val
(pte) &
CF_PAGE_CHG_MASK
) |
pgprot_val
(newprot);
162
return
pte
;
163
}
164
165
#define pmd_set(pmdp, ptep) do {} while (0)
166
167
static
inline
void
pgd_set
(
pgd_t
*pgdp,
pmd_t
*pmdp)
168
{
169
pgd_val
(*pgdp) =
virt_to_phys
(pmdp);
170
}
171
172
#define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK))
173
#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
174
175
static
inline
int
pte_none
(
pte_t
pte
)
176
{
177
return
!
pte_val
(pte);
178
}
179
180
static
inline
int
pte_present
(
pte_t
pte
)
181
{
182
return
pte_val
(pte) &
CF_PAGE_VALID
;
183
}
184
185
static
inline
void
pte_clear
(
struct
mm_struct
*mm,
unsigned
long
addr
,
186
pte_t
*ptep)
187
{
188
pte_val
(*ptep) = 0;
189
}
190
191
#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
192
#define pte_page(pte) virt_to_page(__pte_page(pte))
193
194
static
inline
int
pmd_none2(
pmd_t
*
pmd
) {
return
!
pmd_val
(*pmd); }
195
#define pmd_none(pmd) pmd_none2(&(pmd))
196
static
inline
int
pmd_bad2(
pmd_t
*
pmd
) {
return
0; }
197
#define pmd_bad(pmd) pmd_bad2(&(pmd))
198
#define pmd_present(pmd) (!pmd_none2(&(pmd)))
199
static
inline
void
pmd_clear
(
pmd_t
*pmdp) {
pmd_val
(*pmdp) = 0; }
200
201
static
inline
int
pgd_none
(
pgd_t
pgd) {
return
0; }
202
static
inline
int
pgd_bad
(
pgd_t
pgd) {
return
0; }
203
static
inline
int
pgd_present
(
pgd_t
pgd) {
return
1; }
204
static
inline
void
pgd_clear
(
pgd_t
*pgdp) {}
205
206
#define pte_ERROR(e) \
207
printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
208
__FILE__, __LINE__, pte_val(e))
209
#define pmd_ERROR(e) \
210
printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
211
__FILE__, __LINE__, pmd_val(e))
212
#define pgd_ERROR(e) \
213
printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
214
__FILE__, __LINE__, pgd_val(e))
215
216
/*
217
* The following only work if pte_present() is true.
218
* Undefined behaviour if not...
219
* [we have the full set here even if they don't change from m68k]
220
*/
221
static
inline
int
pte_read
(
pte_t
pte
)
222
{
223
return
pte_val
(pte) &
CF_PAGE_READABLE
;
224
}
225
226
static
inline
int
pte_write
(
pte_t
pte
)
227
{
228
return
pte_val
(pte) &
CF_PAGE_WRITABLE
;
229
}
230
231
static
inline
int
pte_exec
(
pte_t
pte
)
232
{
233
return
pte_val
(pte) &
CF_PAGE_EXEC
;
234
}
235
236
static
inline
int
pte_dirty
(
pte_t
pte
)
237
{
238
return
pte_val
(pte) &
CF_PAGE_DIRTY
;
239
}
240
241
static
inline
int
pte_young
(
pte_t
pte
)
242
{
243
return
pte_val
(pte) &
CF_PAGE_ACCESSED
;
244
}
245
246
static
inline
int
pte_file
(
pte_t
pte
)
247
{
248
return
pte_val
(pte) &
CF_PAGE_FILE
;
249
}
250
251
static
inline
int
pte_special
(
pte_t
pte
)
252
{
253
return
0;
254
}
255
256
static
inline
pte_t
pte_wrprotect
(
pte_t
pte
)
257
{
258
pte_val
(pte) &= ~
CF_PAGE_WRITABLE
;
259
return
pte
;
260
}
261
262
static
inline
pte_t
pte_rdprotect
(
pte_t
pte
)
263
{
264
pte_val
(pte) &= ~
CF_PAGE_READABLE
;
265
return
pte
;
266
}
267
268
static
inline
pte_t
pte_exprotect
(
pte_t
pte
)
269
{
270
pte_val
(pte) &= ~
CF_PAGE_EXEC
;
271
return
pte
;
272
}
273
274
static
inline
pte_t
pte_mkclean
(
pte_t
pte
)
275
{
276
pte_val
(pte) &= ~
CF_PAGE_DIRTY
;
277
return
pte
;
278
}
279
280
static
inline
pte_t
pte_mkold
(
pte_t
pte
)
281
{
282
pte_val
(pte) &= ~
CF_PAGE_ACCESSED
;
283
return
pte
;
284
}
285
286
static
inline
pte_t
pte_mkwrite
(
pte_t
pte
)
287
{
288
pte_val
(pte) |=
CF_PAGE_WRITABLE
;
289
return
pte
;
290
}
291
292
static
inline
pte_t
pte_mkread
(
pte_t
pte
)
293
{
294
pte_val
(pte) |=
CF_PAGE_READABLE
;
295
return
pte
;
296
}
297
298
static
inline
pte_t
pte_mkexec
(
pte_t
pte
)
299
{
300
pte_val
(pte) |=
CF_PAGE_EXEC
;
301
return
pte
;
302
}
303
304
static
inline
pte_t
pte_mkdirty
(
pte_t
pte
)
305
{
306
pte_val
(pte) |=
CF_PAGE_DIRTY
;
307
return
pte
;
308
}
309
310
static
inline
pte_t
pte_mkyoung
(
pte_t
pte
)
311
{
312
pte_val
(pte) |=
CF_PAGE_ACCESSED
;
313
return
pte
;
314
}
315
316
static
inline
pte_t
pte_mknocache(
pte_t
pte
)
317
{
318
pte_val
(pte) |= 0x80 | (
pte_val
(pte) & ~0x40);
319
return
pte
;
320
}
321
322
static
inline
pte_t
pte_mkcache(
pte_t
pte
)
323
{
324
pte_val
(pte) &= ~
CF_PAGE_NOCACHE
;
325
return
pte
;
326
}
327
328
static
inline
pte_t
pte_mkspecial
(
pte_t
pte
)
329
{
330
return
pte
;
331
}
332
333
#define swapper_pg_dir kernel_pg_dir
334
extern
pgd_t
kernel_pg_dir
[
PTRS_PER_PGD
];
335
336
/*
337
* Find an entry in a pagetable directory.
338
*/
339
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
340
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
341
342
/*
343
* Find an entry in a kernel pagetable directory.
344
*/
345
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
346
347
/*
348
* Find an entry in the second-level pagetable.
349
*/
350
static
inline
pmd_t
*
pmd_offset
(
pgd_t
*pgd,
unsigned
long
address
)
351
{
352
return
(
pmd_t
*) pgd;
353
}
354
355
/*
356
* Find an entry in the third-level pagetable.
357
*/
358
#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
359
#define pte_offset_kernel(dir, address) \
360
((pte_t *) __pmd_page(*(dir)) + __pte_offset(address))
361
362
/*
363
* Disable caching for page at given kernel virtual address.
364
*/
365
static
inline
void
nocache_page(
void
*
vaddr
)
366
{
367
pgd_t
*
dir
;
368
pmd_t
*pmdp;
369
pte_t
*ptep;
370
unsigned
long
addr
= (
unsigned
long
) vaddr;
371
372
dir =
pgd_offset_k
(addr);
373
pmdp =
pmd_offset
(dir, addr);
374
ptep =
pte_offset_kernel
(pmdp, addr);
375
*ptep = pte_mknocache(*ptep);
376
}
377
378
/*
379
* Enable caching for page at given kernel virtual address.
380
*/
381
static
inline
void
cache_page(
void
*
vaddr
)
382
{
383
pgd_t
*
dir
;
384
pmd_t
*pmdp;
385
pte_t
*ptep;
386
unsigned
long
addr
= (
unsigned
long
) vaddr;
387
388
dir =
pgd_offset_k
(addr);
389
pmdp =
pmd_offset
(dir, addr);
390
ptep =
pte_offset_kernel
(pmdp, addr);
391
*ptep = pte_mkcache(*ptep);
392
}
393
394
#define PTE_FILE_MAX_BITS 21
395
#define PTE_FILE_SHIFT 11
396
397
static
inline
unsigned
long
pte_to_pgoff
(
pte_t
pte
)
398
{
399
return
pte_val
(pte) >>
PTE_FILE_SHIFT
;
400
}
401
402
static
inline
pte_t
pgoff_to_pte
(
unsigned
pgoff)
403
{
404
return
__pte
((pgoff <<
PTE_FILE_SHIFT
) +
CF_PAGE_FILE
);
405
}
406
407
/*
408
* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
409
*/
410
#define __swp_type(x) ((x).val & 0xFF)
411
#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
412
#define __swp_entry(typ, off) ((swp_entry_t) { (typ) | \
413
(off << PTE_FILE_SHIFT) })
414
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
415
#define __swp_entry_to_pte(x) (__pte((x).val))
416
417
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
418
419
#define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \
420
__pte_offset(addr))
421
#define pte_unmap(pte) ((void) 0)
422
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
423
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
424
425
#endif
/* !__ASSEMBLY__ */
426
#endif
/* _MCF_PGTABLE_H */
Generated on Thu Jan 10 2013 13:08:06 for Linux Kernel by
1.8.2