Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
tile
include
asm
page.h
Go to the documentation of this file.
1
/*
2
* Copyright 2010 Tilera Corporation. All Rights Reserved.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation, version 2.
7
*
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11
* NON INFRINGEMENT. See the GNU General Public License for
12
* more details.
13
*/
14
15
#ifndef _ASM_TILE_PAGE_H
16
#define _ASM_TILE_PAGE_H
17
18
#include <linux/const.h>
19
#include <
hv/hypervisor.h
>
20
#include <
arch/chip.h
>
21
22
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
23
#if defined(CONFIG_PAGE_SIZE_16KB)
24
#define PAGE_SHIFT 14
25
#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
26
#elif defined(CONFIG_PAGE_SIZE_64KB)
27
#define PAGE_SHIFT 16
28
#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
29
#else
30
#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
31
#define CTX_PAGE_FLAG 0
32
#endif
33
#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
34
35
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
36
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
37
38
#define PAGE_MASK (~(PAGE_SIZE - 1))
39
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
40
41
/*
42
* If the Kconfig doesn't specify, set a maximum zone order that
43
* is enough so that we can create huge pages from small pages given
44
* the respective sizes of the two page types. See <linux/mmzone.h>.
45
*/
46
#ifndef CONFIG_FORCE_MAX_ZONEORDER
47
#define CONFIG_FORCE_MAX_ZONEORDER (HPAGE_SHIFT - PAGE_SHIFT + 1)
48
#endif
49
50
#ifndef __ASSEMBLY__
51
52
#include <linux/types.h>
53
#include <linux/string.h>
54
55
struct
page
;
56
57
static
inline
void
clear_page
(
void
*
page
)
58
{
59
memset
(page, 0,
PAGE_SIZE
);
60
}
61
62
static
inline
void
copy_page
(
void
*to,
void
*
from
)
63
{
64
memcpy
(to, from,
PAGE_SIZE
);
65
}
66
67
static
inline
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
,
68
struct
page *
pg
)
69
{
70
clear_page
(page);
71
}
72
73
static
inline
void
copy_user_page
(
void
*to,
void
*
from
,
unsigned
long
vaddr
,
74
struct
page
*topage)
75
{
76
copy_page
(to, from);
77
}
78
79
/*
80
* Hypervisor page tables are made of the same basic structure.
81
*/
82
83
typedef
HV_PTE
pte_t
;
84
typedef
HV_PTE
pgd_t
;
85
typedef
HV_PTE
pgprot_t
;
86
87
/*
88
* User L2 page tables are managed as one L2 page table per page,
89
* because we use the page allocator for them. This keeps the allocation
90
* simple, but it's also inefficient, since L2 page tables are much smaller
91
* than pages (currently 2KB vs 64KB). So we should revisit this.
92
*/
93
typedef
struct
page
*
pgtable_t
;
94
95
/* Must be a macro since it is used to create constants. */
96
#define __pgprot(val) hv_pte(val)
97
98
/* Rarely-used initializers, typically with a "zero" value. */
99
#define __pte(x) hv_pte(x)
100
#define __pgd(x) hv_pte(x)
101
102
static
inline
u64
pgprot_val
(
pgprot_t
pgprot
)
103
{
104
return
hv_pte_val
(pgprot);
105
}
106
107
static
inline
u64
pte_val
(
pte_t
pte
)
108
{
109
return
hv_pte_val
(pte);
110
}
111
112
static
inline
u64
pgd_val
(
pgd_t
pgd)
113
{
114
return
hv_pte_val
(pgd);
115
}
116
117
#ifdef __tilegx__
118
119
typedef
HV_PTE
pmd_t
;
120
121
#define __pmd(x) hv_pte(x)
122
123
static
inline
u64
pmd_val
(
pmd_t
pmd
)
124
{
125
return
hv_pte_val
(pmd);
126
}
127
128
#endif
129
130
static
inline
__attribute_const__
int
get_order
(
unsigned
long
size
)
131
{
132
return
BITS_PER_LONG
- __builtin_clzl((size - 1) >>
PAGE_SHIFT
);
133
}
134
135
#endif
/* !__ASSEMBLY__ */
136
137
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
138
139
#define HUGE_MAX_HSTATE 6
140
141
#ifdef CONFIG_HUGETLB_PAGE
142
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
143
#endif
144
145
/* Each memory controller has PAs distinct in their high bits. */
146
#define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS())
147
#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
148
#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
149
#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
150
151
#ifdef __tilegx__
152
153
/*
154
* We reserve the lower half of memory for user-space programs, and the
155
* upper half for system code. We re-map all of physical memory in the
156
* upper half, which takes a quarter of our VA space. Then we have
157
* the vmalloc regions. The supervisor code lives at 0xfffffff700000000,
158
* with the hypervisor above that.
159
*
160
* Loadable kernel modules are placed immediately after the static
161
* supervisor code, with each being allocated a 256MB region of
162
* address space, so we don't have to worry about the range of "jal"
163
* and other branch instructions.
164
*
165
* For now we keep life simple and just allocate one pmd (4GB) for vmalloc.
166
* Similarly, for now we don't play any struct page mapping games.
167
*/
168
169
#if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH()
170
# error Too much PA to map with the VA available!
171
#endif
172
#define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1))
173
174
#define MEM_LOW_END (HALF_VA_SPACE - 1)
/* low half */
175
#define MEM_HIGH_START (-HALF_VA_SPACE)
/* high half */
176
#define PAGE_OFFSET MEM_HIGH_START
177
#define FIXADDR_BASE _AC(0xfffffff400000000, UL)
/* 4 GB */
178
#define FIXADDR_TOP _AC(0xfffffff500000000, UL)
/* 4 GB */
179
#define _VMALLOC_START FIXADDR_TOP
180
#define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL)
/* 4 GB */
181
#define MEM_SV_START _AC(0xfffffff700000000, UL)
/* 256 MB */
182
#define MEM_SV_INTRPT MEM_SV_START
183
#define MEM_MODULE_START _AC(0xfffffff710000000, UL)
/* 256 MB */
184
#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
185
#define MEM_HV_START _AC(0xfffffff800000000, UL)
/* 32 GB */
186
187
/* Highest DTLB address we will use */
188
#define KERNEL_HIGH_VADDR MEM_SV_START
189
190
#else
/* !__tilegx__ */
191
192
/*
193
* A PAGE_OFFSET of 0xC0000000 means that the kernel has
194
* a virtual address space of one gigabyte, which limits the
195
* amount of physical memory you can use to about 768MB.
196
* If you want more physical memory than this then see the CONFIG_HIGHMEM
197
* option in the kernel configuration.
198
*
199
* The top 16MB chunk in the table below is unavailable to Linux. Since
200
* the kernel interrupt vectors must live at ether 0xfe000000 or 0xfd000000
201
* (depending on whether the kernel is at PL2 or Pl1), we map all of the
202
* bottom of RAM at this address with a huge page table entry to minimize
203
* its ITLB footprint (as well as at PAGE_OFFSET). The last architected
204
* requirement is that user interrupt vectors live at 0xfc000000, so we
205
* make that range of memory available to user processes. The remaining
206
* regions are sized as shown; the first four addresses use the PL 1
207
* values, and after that, we show "typical" values, since the actual
208
* addresses depend on kernel #defines.
209
*
210
* MEM_HV_INTRPT 0xfe000000
211
* MEM_SV_INTRPT (kernel code) 0xfd000000
212
* MEM_USER_INTRPT (user vector) 0xfc000000
213
* FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR)
214
* PKMAP_BASE 0xf7000000 (via LAST_PKMAP)
215
* HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS)
216
* VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE)
217
* mapped LOWMEM 0xc0000000
218
*/
219
220
#define MEM_USER_INTRPT _AC(0xfc000000, UL)
221
#if CONFIG_KERNEL_PL == 1
222
#define MEM_SV_INTRPT _AC(0xfd000000, UL)
223
#define MEM_HV_INTRPT _AC(0xfe000000, UL)
224
#else
225
#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
226
#define MEM_SV_INTRPT _AC(0xfe000000, UL)
227
#define MEM_HV_INTRPT _AC(0xff000000, UL)
228
#endif
229
230
#define INTRPT_SIZE 0x4000
231
232
/* Tolerate page size larger than the architecture interrupt region size. */
233
#if PAGE_SIZE > INTRPT_SIZE
234
#undef INTRPT_SIZE
235
#define INTRPT_SIZE PAGE_SIZE
236
#endif
237
238
#define KERNEL_HIGH_VADDR MEM_USER_INTRPT
239
#define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE)
240
241
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
242
243
/* On 32-bit architectures we mix kernel modules in with other vmaps. */
244
#define MEM_MODULE_START VMALLOC_START
245
#define MEM_MODULE_END VMALLOC_END
246
247
#endif
/* __tilegx__ */
248
249
#ifndef __ASSEMBLY__
250
251
#ifdef CONFIG_HIGHMEM
252
253
/* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */
254
extern
unsigned
long
pbase_map[];
255
extern
void
*vbase_map[];
256
257
static
inline
unsigned
long
kaddr_to_pfn(
const
volatile
void
*_kaddr)
258
{
259
unsigned
long
kaddr = (
unsigned
long
)_kaddr;
260
return
pbase_map[kaddr >>
HPAGE_SHIFT
] +
261
((kaddr & (
HPAGE_SIZE
- 1)) >>
PAGE_SHIFT
);
262
}
263
264
static
inline
void
*
pfn_to_kaddr
(
unsigned
long
pfn)
265
{
266
return
vbase_map[
__pfn_to_highbits
(pfn)] + (pfn <<
PAGE_SHIFT
);
267
}
268
269
static
inline
phys_addr_t
virt_to_phys
(
const
volatile
void
*kaddr)
270
{
271
unsigned
long
pfn = kaddr_to_pfn(kaddr);
272
return
((
phys_addr_t
)pfn <<
PAGE_SHIFT
) +
273
((
unsigned
long
)kaddr & (
PAGE_SIZE
-1));
274
}
275
276
static
inline
void
*
phys_to_virt
(
phys_addr_t
paddr
)
277
{
278
return
pfn_to_kaddr
(paddr >>
PAGE_SHIFT
) + (paddr & (
PAGE_SIZE
-1));
279
}
280
281
/* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */
282
static
inline
int
virt_addr_valid
(
const
volatile
void
*kaddr)
283
{
284
extern
void
*
high_memory
;
/* copied from <linux/mm.h> */
285
return
((
unsigned
long
)kaddr >=
PAGE_OFFSET
&& kaddr < high_memory);
286
}
287
288
#else
/* !CONFIG_HIGHMEM */
289
290
static
inline
unsigned
long
kaddr_to_pfn(
const
volatile
void
*kaddr)
291
{
292
return
((
unsigned
long
)kaddr -
PAGE_OFFSET
) >>
PAGE_SHIFT
;
293
}
294
295
static
inline
void
*
pfn_to_kaddr
(
unsigned
long
pfn)
296
{
297
return
(
void
*)((pfn <<
PAGE_SHIFT
) +
PAGE_OFFSET
);
298
}
299
300
static
inline
phys_addr_t
virt_to_phys
(
const
volatile
void
*kaddr)
301
{
302
return
(
phys_addr_t
)((
unsigned
long
)kaddr -
PAGE_OFFSET
);
303
}
304
305
static
inline
void
*
phys_to_virt
(
phys_addr_t
paddr
)
306
{
307
return
(
void
*)((
unsigned
long
)paddr +
PAGE_OFFSET
);
308
}
309
310
/* Check that the given address is within some mapped range of PAs. */
311
#define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr))
312
313
#endif
/* !CONFIG_HIGHMEM */
314
315
/* All callers are not consistent in how they call these functions. */
316
#define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr))
317
#define __va(paddr) phys_to_virt((phys_addr_t)(paddr))
318
319
extern
int
devmem_is_allowed
(
unsigned
long
pagenr);
320
321
#ifdef CONFIG_FLATMEM
322
static
inline
int
pfn_valid
(
unsigned
long
pfn)
323
{
324
return
pfn <
max_mapnr
;
325
}
326
#endif
327
328
/* Provide as macros since these require some other headers included. */
329
#define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT)
330
#define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn((void *)(kaddr)))
331
#define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page))
332
333
struct
mm_struct
;
334
extern
pte_t
*
virt_to_pte
(
struct
mm_struct
*mm,
unsigned
long
addr
);
335
336
#endif
/* !__ASSEMBLY__ */
337
338
#define VM_DATA_DEFAULT_FLAGS \
339
(VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
340
341
#include <
asm-generic/memory_model.h
>
342
343
#endif
/* _ASM_TILE_PAGE_H */
Generated on Thu Jan 10 2013 12:50:02 for Linux Kernel by
1.8.2