Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
tile
include
asm
pgtable.h
Go to the documentation of this file.
1
/*
2
* Copyright 2010 Tilera Corporation. All Rights Reserved.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation, version 2.
7
*
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11
* NON INFRINGEMENT. See the GNU General Public License for
12
* more details.
13
*
14
* This file contains the functions and defines necessary to modify and use
15
* the TILE page table tree.
16
*/
17
18
#ifndef _ASM_TILE_PGTABLE_H
19
#define _ASM_TILE_PGTABLE_H
20
21
#include <
hv/hypervisor.h
>
22
23
#ifndef __ASSEMBLY__
24
25
#include <linux/bitops.h>
26
#include <
linux/threads.h
>
27
#include <linux/slab.h>
28
#include <linux/list.h>
29
#include <
linux/spinlock.h
>
30
#include <
linux/pfn.h
>
31
#include <asm/processor.h>
32
#include <asm/fixmap.h>
33
#include <asm/page.h>
34
35
struct
mm_struct
;
36
struct
vm_area_struct
;
37
38
/*
39
* ZERO_PAGE is a global shared page that is always zero: used
40
* for zero-mapped memory areas etc..
41
*/
42
extern
unsigned
long
empty_zero_page
[
PAGE_SIZE
/
sizeof
(
unsigned
long
)];
43
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
44
45
extern
pgd_t
swapper_pg_dir
[];
46
extern
pgprot_t
swapper_pgprot
;
47
extern
struct
kmem_cache
*
pgd_cache
;
48
extern
spinlock_t
pgd_lock
;
49
extern
struct
list_head
pgd_list
;
50
51
/*
52
* The very last slots in the pgd_t are for addresses unusable by Linux
53
* (pgd_addr_invalid() returns true). So we use them for the list structure.
54
* The x86 code we are modelled on uses the page->private/index fields
55
* (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since
56
* our pgds are so much smaller than a page, it seems a waste to
57
* spend a whole page on each pgd.
58
*/
59
#define PGD_LIST_OFFSET \
60
((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head))
61
#define pgd_to_list(pgd) \
62
((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET))
63
#define list_to_pgd(list) \
64
((pgd_t *)((char *)(list) - PGD_LIST_OFFSET))
65
66
extern
void
pgtable_cache_init
(
void
);
67
extern
void
paging_init
(
void
);
68
extern
void
set_page_homes
(
void
);
69
70
#define FIRST_USER_ADDRESS 0
71
72
#define _PAGE_PRESENT HV_PTE_PRESENT
73
#define _PAGE_HUGE_PAGE HV_PTE_PAGE
74
#define _PAGE_SUPER_PAGE HV_PTE_SUPER
75
#define _PAGE_READABLE HV_PTE_READABLE
76
#define _PAGE_WRITABLE HV_PTE_WRITABLE
77
#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
78
#define _PAGE_ACCESSED HV_PTE_ACCESSED
79
#define _PAGE_DIRTY HV_PTE_DIRTY
80
#define _PAGE_GLOBAL HV_PTE_GLOBAL
81
#define _PAGE_USER HV_PTE_USER
82
83
/*
84
* All the "standard" bits. Cache-control bits are managed elsewhere.
85
* This is used to test for valid level-2 page table pointers by checking
86
* all the bits, and to mask away the cache control bits for mprotect.
87
*/
88
#define _PAGE_ALL (\
89
_PAGE_PRESENT | \
90
_PAGE_HUGE_PAGE | \
91
_PAGE_SUPER_PAGE | \
92
_PAGE_READABLE | \
93
_PAGE_WRITABLE | \
94
_PAGE_EXECUTABLE | \
95
_PAGE_ACCESSED | \
96
_PAGE_DIRTY | \
97
_PAGE_GLOBAL | \
98
_PAGE_USER \
99
)
100
101
#define PAGE_NONE \
102
__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
103
#define PAGE_SHARED \
104
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
105
_PAGE_USER | _PAGE_ACCESSED)
106
107
#define PAGE_SHARED_EXEC \
108
__pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
109
_PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED)
110
#define PAGE_COPY_NOEXEC \
111
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
112
#define PAGE_COPY_EXEC \
113
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
114
_PAGE_READABLE | _PAGE_EXECUTABLE)
115
#define PAGE_COPY \
116
PAGE_COPY_NOEXEC
117
#define PAGE_READONLY \
118
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
119
#define PAGE_READONLY_EXEC \
120
__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
121
_PAGE_READABLE | _PAGE_EXECUTABLE)
122
123
#define _PAGE_KERNEL_RO \
124
(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED)
125
#define _PAGE_KERNEL \
126
(_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY)
127
#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE)
128
129
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
130
#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
131
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
132
133
#define page_to_kpgprot(p) PAGE_KERNEL
134
135
/*
136
* We could tighten these up, but for now writable or executable
137
* implies readable.
138
*/
139
#define __P000 PAGE_NONE
140
#define __P001 PAGE_READONLY
141
#define __P010 PAGE_COPY
/* this is write-only, which we won't support */
142
#define __P011 PAGE_COPY
143
#define __P100 PAGE_READONLY_EXEC
144
#define __P101 PAGE_READONLY_EXEC
145
#define __P110 PAGE_COPY_EXEC
146
#define __P111 PAGE_COPY_EXEC
147
148
#define __S000 PAGE_NONE
149
#define __S001 PAGE_READONLY
150
#define __S010 PAGE_SHARED
151
#define __S011 PAGE_SHARED
152
#define __S100 PAGE_READONLY_EXEC
153
#define __S101 PAGE_READONLY_EXEC
154
#define __S110 PAGE_SHARED_EXEC
155
#define __S111 PAGE_SHARED_EXEC
156
157
/*
158
* All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT
159
* and PAGE_HUGE_PAGE, which must be one and zero, respectively.
160
* We set the ignored bits to zero.
161
*/
162
#define _PAGE_TABLE _PAGE_PRESENT
163
164
/* Inherit the caching flags from the old protection bits. */
165
#define pgprot_modify(oldprot, newprot) \
166
(pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
167
168
/* Just setting the PFN to zero suffices. */
169
#define pte_pgprot(x) hv_pte_set_pa((x), 0)
170
171
/*
172
* For PTEs and PDEs, we must clear the Present bit first when
173
* clearing a page table entry, so clear the bottom half first and
174
* enforce ordering with a barrier.
175
*/
176
static
inline
void
__pte_clear(
pte_t
*ptep)
177
{
178
#ifdef __tilegx__
179
ptep->val = 0;
180
#else
181
u32
*
tmp
= (
u32
*)ptep;
182
tmp[0] = 0;
183
barrier
();
184
tmp[1] = 0;
185
#endif
186
}
187
#define pte_clear(mm, addr, ptep) __pte_clear(ptep)
188
189
/*
190
* The following only work if pte_present() is true.
191
* Undefined behaviour if not..
192
*/
193
#define pte_present hv_pte_get_present
194
#define pte_mknotpresent hv_pte_clear_present
195
#define pte_user hv_pte_get_user
196
#define pte_read hv_pte_get_readable
197
#define pte_dirty hv_pte_get_dirty
198
#define pte_young hv_pte_get_accessed
199
#define pte_write hv_pte_get_writable
200
#define pte_exec hv_pte_get_executable
201
#define pte_huge hv_pte_get_page
202
#define pte_super hv_pte_get_super
203
#define pte_rdprotect hv_pte_clear_readable
204
#define pte_exprotect hv_pte_clear_executable
205
#define pte_mkclean hv_pte_clear_dirty
206
#define pte_mkold hv_pte_clear_accessed
207
#define pte_wrprotect hv_pte_clear_writable
208
#define pte_mksmall hv_pte_clear_page
209
#define pte_mkread hv_pte_set_readable
210
#define pte_mkexec hv_pte_set_executable
211
#define pte_mkdirty hv_pte_set_dirty
212
#define pte_mkyoung hv_pte_set_accessed
213
#define pte_mkwrite hv_pte_set_writable
214
#define pte_mkhuge hv_pte_set_page
215
#define pte_mksuper hv_pte_set_super
216
217
#define pte_special(pte) 0
218
#define pte_mkspecial(pte) (pte)
219
220
/*
221
* Use some spare bits in the PTE for user-caching tags.
222
*/
223
#define pte_set_forcecache hv_pte_set_client0
224
#define pte_get_forcecache hv_pte_get_client0
225
#define pte_clear_forcecache hv_pte_clear_client0
226
#define pte_set_anyhome hv_pte_set_client1
227
#define pte_get_anyhome hv_pte_get_client1
228
#define pte_clear_anyhome hv_pte_clear_client1
229
230
/*
231
* A migrating PTE has PAGE_PRESENT clear but all the other bits preserved.
232
*/
233
#define pte_migrating hv_pte_get_migrating
234
#define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x))
235
#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
236
237
#define pte_ERROR(e) \
238
pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
239
#define pgd_ERROR(e) \
240
pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
241
242
/* Return PA and protection info for a given kernel VA. */
243
int
va_to_cpa_and_pte
(
void
*
va
,
phys_addr_t
*
cpa
,
pte_t
*
pte
);
244
245
/*
246
* __set_pte() ensures we write the 64-bit PTE with 32-bit words in
247
* the right order on 32-bit platforms and also allows us to write
248
* hooks to check valid PTEs, etc., if we want.
249
*/
250
void
__set_pte
(
pte_t
*ptep,
pte_t
pte
);
251
252
/*
253
* set_pte() sets the given PTE and also sanity-checks the
254
* requested PTE against the page homecaching. Unspecified parts
255
* of the PTE are filled in when it is written to memory, i.e. all
256
* caching attributes if "!forcecache", or the home cpu if "anyhome".
257
*/
258
extern
void
set_pte
(
pte_t
*ptep,
pte_t
pte
);
259
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
260
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
261
262
#define pte_page(x) pfn_to_page(pte_pfn(x))
263
264
static
inline
int
pte_none
(
pte_t
pte
)
265
{
266
return
!pte.val;
267
}
268
269
static
inline
unsigned
long
pte_pfn
(
pte_t
pte
)
270
{
271
return
PFN_DOWN
(hv_pte_get_pa(pte));
272
}
273
274
/* Set or get the remote cache cpu in a pgprot with remote caching. */
275
extern
pgprot_t
set_remote_cache_cpu
(
pgprot_t
prot,
int
cpu
);
276
extern
int
get_remote_cache_cpu
(
pgprot_t
prot);
277
278
static
inline
pte_t
pfn_pte
(
unsigned
long
pfn,
pgprot_t
prot)
279
{
280
return
hv_pte_set_pa(prot,
PFN_PHYS
(pfn));
281
}
282
283
/* Support for priority mappings. */
284
extern
void
start_mm_caching
(
struct
mm_struct
*mm);
285
extern
void
check_mm_caching
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
);
286
287
/*
288
* Support non-linear file mappings (see sys_remap_file_pages).
289
* This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the
290
* file offset in the 32 high bits.
291
*/
292
#define _PAGE_FILE HV_PTE_CLIENT1
293
#define PTE_FILE_MAX_BITS 32
294
#define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte))
295
#define pte_to_pgoff(pte) ((pte).val >> 32)
296
#define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE })
297
298
/*
299
* Encode and de-code a swap entry (see <linux/swapops.h>).
300
* We put the swap file type+offset in the 32 high bits;
301
* I believe we can just leave the low bits clear.
302
*/
303
#define __swp_type(swp) ((swp).val & 0x1f)
304
#define __swp_offset(swp) ((swp).val >> 5)
305
#define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) })
306
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
307
#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
308
309
/*
310
* Conversion functions: convert a page and protection to a page entry,
311
* and a page entry and page directory to the page they refer to.
312
*/
313
314
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
315
316
/*
317
* If we are doing an mprotect(), just accept the new vma->vm_page_prot
318
* value and combine it with the PFN from the old PTE to get a new PTE.
319
*/
320
static
inline
pte_t
pte_modify
(
pte_t
pte,
pgprot_t
newprot)
321
{
322
return
pfn_pte
(
pte_pfn
(pte), newprot);
323
}
324
325
/*
326
* The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
327
*
328
* This macro returns the index of the entry in the pgd page which would
329
* control the given virtual address.
330
*/
331
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
332
333
/*
334
* pgd_offset() returns a (pgd_t *)
335
* pgd_index() is used get the offset into the pgd page's array of pgd_t's.
336
*/
337
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
338
339
/*
340
* A shortcut which implies the use of the kernel's pgd, instead
341
* of a process's.
342
*/
343
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
344
345
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
346
#define pte_unmap(pte) do { } while (0)
347
348
/* Clear a non-executable kernel PTE and flush it from the TLB. */
349
#define kpte_clear_flush(ptep, vaddr) \
350
do { \
351
pte_clear(&init_mm, (vaddr), (ptep)); \
352
local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \
353
} while (0)
354
355
/*
356
* The kernel page tables contain what we need, and we flush when we
357
* change specific page table entries.
358
*/
359
#define update_mmu_cache(vma, address, pte) do { } while (0)
360
361
#ifdef CONFIG_FLATMEM
362
#define kern_addr_valid(addr) (1)
363
#endif
/* CONFIG_FLATMEM */
364
365
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
366
remap_pfn_range(vma, vaddr, pfn, size, prot)
367
368
extern
void
vmalloc_sync_all
(
void
);
369
370
#endif
/* !__ASSEMBLY__ */
371
372
#ifdef __tilegx__
373
#include <asm/pgtable_64.h>
374
#else
375
#include <asm/pgtable_32.h>
376
#endif
377
378
#ifndef __ASSEMBLY__
379
380
static
inline
int
pmd_none
(
pmd_t
pmd
)
381
{
382
/*
383
* Only check low word on 32-bit platforms, since it might be
384
* out of sync with upper half.
385
*/
386
return
(
unsigned
long
)
pmd_val
(pmd) == 0;
387
}
388
389
static
inline
int
pmd_present
(
pmd_t
pmd)
390
{
391
return
pmd_val
(pmd) &
_PAGE_PRESENT
;
392
}
393
394
static
inline
int
pmd_bad
(
pmd_t
pmd)
395
{
396
return
((
pmd_val
(pmd) &
_PAGE_ALL
) !=
_PAGE_TABLE
);
397
}
398
399
static
inline
unsigned
long
pages_to_mb
(
unsigned
long
npg)
400
{
401
return
npg >> (20 -
PAGE_SHIFT
);
402
}
403
404
/*
405
* The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
406
*
407
* This function returns the index of the entry in the pmd which would
408
* control the given virtual address.
409
*/
410
static
inline
unsigned
long
pmd_index
(
unsigned
long
address
)
411
{
412
return
(address >>
PMD_SHIFT
) & (
PTRS_PER_PMD
- 1);
413
}
414
415
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
416
static
inline
int
pmdp_test_and_clear_young
(
struct
vm_area_struct
*vma,
417
unsigned
long
address,
418
pmd_t
*pmdp)
419
{
420
return
ptep_test_and_clear_young
(vma, address,
pmdp_ptep
(pmdp));
421
}
422
423
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
424
static
inline
void
pmdp_set_wrprotect(
struct
mm_struct
*mm,
425
unsigned
long
address,
pmd_t
*pmdp)
426
{
427
ptep_set_wrprotect
(mm, address,
pmdp_ptep
(pmdp));
428
}
429
430
431
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
432
static
inline
pmd_t
pmdp_get_and_clear(
struct
mm_struct
*mm,
433
unsigned
long
address,
434
pmd_t
*pmdp)
435
{
436
return
pte_pmd
(
ptep_get_and_clear
(mm, address,
pmdp_ptep
(pmdp)));
437
}
438
439
static
inline
void
__set_pmd
(
pmd_t
*pmdp,
pmd_t
pmdval)
440
{
441
set_pte
(
pmdp_ptep
(pmdp),
pmd_pte
(pmdval));
442
}
443
444
#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
445
446
/* Create a pmd from a PTFN. */
447
static
inline
pmd_t
ptfn_pmd(
unsigned
long
ptfn,
pgprot_t
prot)
448
{
449
return
pte_pmd
(hv_pte_set_ptfn(prot, ptfn));
450
}
451
452
/* Return the page-table frame number (ptfn) that a pmd_t points at. */
453
#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
454
455
/*
456
* A given kernel pmd_t maps to a specific virtual address (either a
457
* kernel huge page or a kernel pte_t table). Since kernel pte_t
458
* tables can be aligned at sub-page granularity, this function can
459
* return non-page-aligned pointers, despite its name.
460
*/
461
static
inline
unsigned
long
pmd_page_vaddr
(
pmd_t
pmd)
462
{
463
phys_addr_t
pa =
464
(
phys_addr_t
)
pmd_ptfn
(pmd) <<
HV_LOG2_PAGE_TABLE_ALIGN
;
465
return
(
unsigned
long
)
__va
(pa);
466
}
467
468
/*
469
* A pmd_t points to the base of a huge page or to a pte_t array.
470
* If a pte_t array, since we can have multiple per page, we don't
471
* have a one-to-one mapping of pmd_t's to pages. However, this is
472
* OK for pte_lockptr(), since we just end up with potentially one
473
* lock being used for several pte_t arrays.
474
*/
475
#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
476
477
static
inline
void
pmd_clear
(
pmd_t
*pmdp)
478
{
479
__pte_clear(
pmdp_ptep
(pmdp));
480
}
481
482
#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
483
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
484
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
485
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
486
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
487
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
488
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
489
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
490
#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
491
#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
492
#define __HAVE_ARCH_PMD_WRITE
493
494
#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
495
#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
496
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
497
498
static
inline
pmd_t
pmd_modify(
pmd_t
pmd,
pgprot_t
newprot)
499
{
500
return
pfn_pmd
(
pmd_pfn
(pmd), newprot);
501
}
502
503
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
504
#define has_transparent_hugepage() 1
505
#define pmd_trans_huge pmd_huge_page
506
507
static
inline
pmd_t
pmd_mksplitting(
pmd_t
pmd)
508
{
509
return
pte_pmd
(hv_pte_set_client2(
pmd_pte
(pmd)));
510
}
511
512
static
inline
int
pmd_trans_splitting(
pmd_t
pmd)
513
{
514
return
hv_pte_get_client2(
pmd_pte
(pmd));
515
}
516
#endif
/* CONFIG_TRANSPARENT_HUGEPAGE */
517
518
/*
519
* The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
520
*
521
* This macro returns the index of the entry in the pte page which would
522
* control the given virtual address.
523
*/
524
static
inline
unsigned
long
pte_index
(
unsigned
long
address)
525
{
526
return
(address >>
PAGE_SHIFT
) & (
PTRS_PER_PTE
- 1);
527
}
528
529
static
inline
pte_t
*
pte_offset_kernel
(
pmd_t
*pmd,
unsigned
long
address)
530
{
531
return
(
pte_t
*)
pmd_page_vaddr
(*pmd) +
pte_index
(address);
532
}
533
534
#include <
asm-generic/pgtable.h
>
535
536
/* Support /proc/NN/pgtable API. */
537
struct
seq_file
;
538
int
arch_proc_pgtable_show
(
struct
seq_file
*
m
,
struct
mm_struct
*mm,
539
unsigned
long
vaddr
,
unsigned
long
pagesize,
540
pte_t
*ptep,
void
**datap);
541
542
#endif
/* !__ASSEMBLY__ */
543
544
#endif
/* _ASM_TILE_PGTABLE_H */
Generated on Thu Jan 10 2013 12:50:15 for Linux Kernel by
1.8.2