Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
parisc
include
asm
pgalloc.h
Go to the documentation of this file.
1
#ifndef _ASM_PGALLOC_H
2
#define _ASM_PGALLOC_H
3
4
#include <
linux/gfp.h
>
5
#include <
linux/mm.h
>
6
#include <
linux/threads.h
>
7
#include <asm/processor.h>
8
#include <asm/fixmap.h>
9
10
#include <asm/cache.h>
11
12
/* Allocate the top level pgd (page directory)
13
*
14
* Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
15
* allocate the first pmd adjacent to the pgd. This means that we can
16
* subtract a constant offset to get to it. The pmd and pgd sizes are
17
* arranged so that a single pmd covers 4GB (giving a full 64-bit
18
* process access to 8TB) so our lookups are effectively L2 for the
19
* first 4GB of the kernel (i.e. for all ILP32 processes and all the
20
* kernel for machines with under 4GB of memory) */
21
static
inline
pgd_t
*
pgd_alloc
(
struct
mm_struct
*mm)
22
{
23
pgd_t
*pgd = (
pgd_t
*)
__get_free_pages
(
GFP_KERNEL
,
24
PGD_ALLOC_ORDER
);
25
pgd_t
*actual_pgd = pgd;
26
27
if
(
likely
(pgd !=
NULL
)) {
28
memset
(pgd, 0,
PAGE_SIZE
<<
PGD_ALLOC_ORDER
);
29
#ifdef CONFIG_64BIT
30
actual_pgd +=
PTRS_PER_PGD
;
31
/* Populate first pmd with allocated memory. We mark it
32
* with PxD_FLAG_ATTACHED as a signal to the system that this
33
* pmd entry may not be cleared. */
34
__pgd_val_set
(*actual_pgd, (
PxD_FLAG_PRESENT
|
35
PxD_FLAG_VALID
|
36
PxD_FLAG_ATTACHED
)
37
+ (
__u32
)(
__pa
((
unsigned
long
)pgd) >>
PxD_VALUE_SHIFT
));
38
/* The first pmd entry also is marked with _PAGE_GATEWAY as
39
* a signal that this pmd may not be freed */
40
__pgd_val_set
(*pgd,
PxD_FLAG_ATTACHED
);
41
#endif
42
}
43
return
actual_pgd;
44
}
45
46
static
inline
void
pgd_free
(
struct
mm_struct
*mm,
pgd_t
*pgd)
47
{
48
#ifdef CONFIG_64BIT
49
pgd -=
PTRS_PER_PGD
;
50
#endif
51
free_pages
((
unsigned
long
)pgd,
PGD_ALLOC_ORDER
);
52
}
53
54
#if PT_NLEVELS == 3
55
56
/* Three Level Page Table Support for pmd's */
57
58
static
inline
void
pgd_populate
(
struct
mm_struct
*mm,
pgd_t
*pgd,
pmd_t
*
pmd
)
59
{
60
__pgd_val_set
(*pgd, (
PxD_FLAG_PRESENT
|
PxD_FLAG_VALID
) +
61
(
__u32
)(
__pa
((
unsigned
long
)pmd) >>
PxD_VALUE_SHIFT
));
62
}
63
64
static
inline
pmd_t
*
pmd_alloc_one
(
struct
mm_struct
*mm,
unsigned
long
address
)
65
{
66
pmd_t
*pmd = (
pmd_t
*)
__get_free_pages
(
GFP_KERNEL
|
__GFP_REPEAT
,
67
PMD_ORDER
);
68
if
(pmd)
69
memset
(pmd, 0,
PAGE_SIZE
<<
PMD_ORDER
);
70
return
pmd
;
71
}
72
73
static
inline
void
pmd_free
(
struct
mm_struct
*mm,
pmd_t
*pmd)
74
{
75
#ifdef CONFIG_64BIT
76
if
(
pmd_flag
(*pmd) &
PxD_FLAG_ATTACHED
)
77
/* This is the permanent pmd attached to the pgd;
78
* cannot free it */
79
return
;
80
#endif
81
free_pages
((
unsigned
long
)pmd,
PMD_ORDER
);
82
}
83
84
#else
85
86
/* Two Level Page Table Support for pmd's */
87
88
/*
89
* allocating and freeing a pmd is trivial: the 1-entry pmd is
90
* inside the pgd, so has no extra memory associated with it.
91
*/
92
93
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
94
#define pmd_free(mm, x) do { } while (0)
95
#define pgd_populate(mm, pmd, pte) BUG()
96
97
#endif
98
99
static
inline
void
100
pmd_populate_kernel
(
struct
mm_struct
*mm,
pmd_t
*pmd,
pte_t
*
pte
)
101
{
102
#ifdef CONFIG_64BIT
103
/* preserve the gateway marker if this is the beginning of
104
* the permanent pmd */
105
if
(
pmd_flag
(*pmd) & PxD_FLAG_ATTACHED)
106
__pmd_val_set
(*pmd, (
PxD_FLAG_PRESENT
|
107
PxD_FLAG_VALID
|
108
PxD_FLAG_ATTACHED)
109
+ (
__u32
)(
__pa
((
unsigned
long
)pte) >>
PxD_VALUE_SHIFT
));
110
else
111
#endif
112
__pmd_val_set
(*pmd, (
PxD_FLAG_PRESENT
|
PxD_FLAG_VALID
)
113
+ (
__u32
)(
__pa
((
unsigned
long
)pte) >>
PxD_VALUE_SHIFT
));
114
}
115
116
#define pmd_populate(mm, pmd, pte_page) \
117
pmd_populate_kernel(mm, pmd, page_address(pte_page))
118
#define pmd_pgtable(pmd) pmd_page(pmd)
119
120
static
inline
pgtable_t
121
pte_alloc_one
(
struct
mm_struct
*mm,
unsigned
long
address)
122
{
123
struct
page
*
page
=
alloc_page
(
GFP_KERNEL
|
__GFP_REPEAT
|
__GFP_ZERO
);
124
if
(page)
125
pgtable_page_ctor(page);
126
return
page
;
127
}
128
129
static
inline
pte_t
*
130
pte_alloc_one_kernel
(
struct
mm_struct
*mm,
unsigned
long
addr
)
131
{
132
pte_t
*
pte
= (
pte_t
*)
__get_free_page
(
GFP_KERNEL
|
__GFP_REPEAT
|
__GFP_ZERO
);
133
return
pte
;
134
}
135
136
static
inline
void
pte_free_kernel
(
struct
mm_struct
*mm,
pte_t
*pte)
137
{
138
free_page
((
unsigned
long
)pte);
139
}
140
141
static
inline
void
pte_free
(
struct
mm_struct
*mm,
struct
page
*pte)
142
{
143
pgtable_page_dtor(pte);
144
pte_free_kernel
(mm,
page_address
(pte));
145
}
146
147
#define check_pgt_cache() do { } while (0)
148
149
#endif
Generated on Thu Jan 10 2013 12:50:12 for Linux Kernel by
1.8.2