Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
ia64
include
asm
tlb.h
Go to the documentation of this file.
1
#ifndef _ASM_IA64_TLB_H
2
#define _ASM_IA64_TLB_H
3
/*
4
* Based on <asm-generic/tlb.h>.
5
*
6
* Copyright (C) 2002-2003 Hewlett-Packard Co
7
* David Mosberger-Tang <
[email protected]
>
8
*/
9
/*
10
* Removing a translation from a page table (including TLB-shootdown) is a four-step
11
* procedure:
12
*
13
* (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
14
* (this is a no-op on ia64).
15
* (2) Clear the relevant portions of the page-table
16
* (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
17
* (4) Release the pages that were freed up in step (2).
18
*
19
* Note that the ordering of these steps is crucial to avoid races on MP machines.
20
*
21
* The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
22
* unmapping a portion of the virtual address space, these hooks are called according to
23
* the following template:
24
*
25
* tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
26
* {
27
* for each vma that needs a shootdown do {
28
* tlb_start_vma(tlb, vma);
29
* for each page-table-entry PTE that needs to be removed do {
30
* tlb_remove_tlb_entry(tlb, pte, address);
31
* if (pte refers to a normal page) {
32
* tlb_remove_page(tlb, page);
33
* }
34
* }
35
* tlb_end_vma(tlb, vma);
36
* }
37
* }
38
* tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
39
*/
40
#include <
linux/mm.h
>
41
#include <
linux/pagemap.h
>
42
#include <
linux/swap.h
>
43
44
#include <asm/pgalloc.h>
45
#include <asm/processor.h>
46
#include <asm/tlbflush.h>
47
#include <asm/machvec.h>
48
49
#ifdef CONFIG_SMP
50
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
51
#else
52
# define tlb_fast_mode(tlb) (1)
53
#endif
54
55
/*
56
* If we can't allocate a page to make a big batch of page pointers
57
* to work on, then just handle a few from the on-stack structure.
58
*/
59
#define IA64_GATHER_BUNDLE 8
60
61
struct
mmu_gather
{
62
struct
mm_struct
*
mm
;
63
unsigned
int
nr
;
/* == ~0U => fast mode */
64
unsigned
int
max
;
65
unsigned
char
fullmm
;
/* non-zero means full mm flush */
66
unsigned
char
need_flush
;
/* really unmapped some PTEs? */
67
unsigned
long
start_addr
;
68
unsigned
long
end_addr
;
69
struct
page
**
pages
;
70
struct
page
*
local
[
IA64_GATHER_BUNDLE
];
71
};
72
73
struct
ia64_tr_entry
{
74
u64
ifa
;
75
u64
itir
;
76
u64
pte
;
77
u64
rr
;
78
};
/*Record for tr entry!*/
79
80
extern
int
ia64_itr_entry
(
u64
target_mask,
u64
va
,
u64
pte
,
u64
log_size);
81
extern
void
ia64_ptr_entry
(
u64
target_mask,
int
slot
);
82
83
extern
struct
ia64_tr_entry
*
ia64_idtrs
[
NR_CPUS
];
84
85
/*
86
region register macros
87
*/
88
#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
89
#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
90
#define RR_VE_MASK 0x0000000000000001L
91
#define RR_VE_SHIFT 0
92
#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
93
#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
94
#define RR_PS_MASK 0x00000000000000fcL
95
#define RR_PS_SHIFT 2
96
#define RR_RID_MASK 0x00000000ffffff00L
97
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
98
99
/*
100
* Flush the TLB for address range START to END and, if not in fast mode, release the
101
* freed pages that where gathered up to this point.
102
*/
103
static
inline
void
104
ia64_tlb_flush_mmu (
struct
mmu_gather
*tlb,
unsigned
long
start
,
unsigned
long
end
)
105
{
106
unsigned
int
nr
;
107
108
if
(!tlb->
need_flush
)
109
return
;
110
tlb->
need_flush
= 0;
111
112
if
(tlb->
fullmm
) {
113
/*
114
* Tearing down the entire address space. This happens both as a result
115
* of exit() and execve(). The latter case necessitates the call to
116
* flush_tlb_mm() here.
117
*/
118
flush_tlb_mm
(tlb->
mm
);
119
}
else
if
(
unlikely
(end - start >= 1024*1024*1024*1024
UL
120
||
REGION_NUMBER
(start) !=
REGION_NUMBER
(end - 1)))
121
{
122
/*
123
* If we flush more than a tera-byte or across regions, we're probably
124
* better off just flushing the entire TLB(s). This should be very rare
125
* and is not worth optimizing for.
126
*/
127
flush_tlb_all
();
128
}
else
{
129
/*
130
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a
131
* vma pointer.
132
*/
133
struct
vm_area_struct
vma;
134
135
vma.
vm_mm
= tlb->
mm
;
136
/* flush the address range from the tlb: */
137
flush_tlb_range
(&vma, start, end);
138
/* now flush the virt. page-table area mapping the address range: */
139
flush_tlb_range
(&vma,
ia64_thash
(start),
ia64_thash
(end));
140
}
141
142
/* lastly, release the freed pages */
143
nr = tlb->
nr
;
144
if
(!
tlb_fast_mode
(tlb)) {
145
unsigned
long
i
;
146
tlb->
nr
= 0;
147
tlb->
start_addr
= ~0
UL
;
148
for
(i = 0; i <
nr
; ++
i
)
149
free_page_and_swap_cache
(tlb->
pages
[i]);
150
}
151
}
152
153
static
inline
void
__tlb_alloc_page(
struct
mmu_gather
*tlb)
154
{
155
unsigned
long
addr
=
__get_free_pages
(
GFP_NOWAIT
|
__GFP_NOWARN
, 0);
156
157
if
(addr) {
158
tlb->
pages
= (
void
*)addr;
159
tlb->
max
=
PAGE_SIZE
/
sizeof
(
void
*);
160
}
161
}
162
163
164
static
inline
void
165
tlb_gather_mmu
(
struct
mmu_gather
*tlb,
struct
mm_struct
*mm,
unsigned
int
full_mm_flush)
166
{
167
tlb->
mm
= mm;
168
tlb->
max
=
ARRAY_SIZE
(tlb->
local
);
169
tlb->
pages
= tlb->
local
;
170
/*
171
* Use fast mode if only 1 CPU is online.
172
*
173
* It would be tempting to turn on fast-mode for full_mm_flush as well. But this
174
* doesn't work because of speculative accesses and software prefetching: the page
175
* table of "mm" may (and usually is) the currently active page table and even
176
* though the kernel won't do any user-space accesses during the TLB shoot down, a
177
* compiler might use speculation or lfetch.fault on what happens to be a valid
178
* user-space address. This in turn could trigger a TLB miss fault (or a VHPT
179
* walk) and re-insert a TLB entry we just removed. Slow mode avoids such
180
* problems. (We could make fast-mode work by switching the current task to a
181
* different "mm" during the shootdown.) --davidm 08/02/2002
182
*/
183
tlb->
nr
= (
num_online_cpus
() == 1) ? ~0
U
: 0;
184
tlb->
fullmm
= full_mm_flush;
185
tlb->
start_addr
= ~0
UL
;
186
}
187
188
/*
189
* Called at the end of the shootdown operation to free up any resources that were
190
* collected.
191
*/
192
static
inline
void
193
tlb_finish_mmu
(
struct
mmu_gather
*tlb,
unsigned
long
start
,
unsigned
long
end
)
194
{
195
/*
196
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
197
* tlb->end_addr.
198
*/
199
ia64_tlb_flush_mmu(tlb, start, end);
200
201
/* keep the page table cache within bounds */
202
check_pgt_cache
();
203
204
if
(tlb->
pages
!= tlb->
local
)
205
free_pages
((
unsigned
long
)tlb->
pages
, 0);
206
}
207
208
/*
209
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
210
* must be delayed until after the TLB has been flushed (see comments at the beginning of
211
* this file).
212
*/
213
static
inline
int
__tlb_remove_page
(
struct
mmu_gather
*tlb,
struct
page
*
page
)
214
{
215
tlb->
need_flush
= 1;
216
217
if
(
tlb_fast_mode
(tlb)) {
218
free_page_and_swap_cache
(page);
219
return
1;
/* avoid calling tlb_flush_mmu */
220
}
221
222
if
(!tlb->
nr
&& tlb->
pages
== tlb->
local
)
223
__tlb_alloc_page(tlb);
224
225
tlb->
pages
[tlb->
nr
++] =
page
;
226
VM_BUG_ON
(tlb->
nr
> tlb->
max
);
227
228
return
tlb->
max
- tlb->
nr
;
229
}
230
231
static
inline
void
tlb_flush_mmu
(
struct
mmu_gather
*tlb)
232
{
233
ia64_tlb_flush_mmu(tlb, tlb->
start_addr
, tlb->
end_addr
);
234
}
235
236
static
inline
void
tlb_remove_page(
struct
mmu_gather
*tlb,
struct
page *page)
237
{
238
if
(!
__tlb_remove_page
(tlb, page))
239
tlb_flush_mmu
(tlb);
240
}
241
242
/*
243
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
244
* PTE, not just those pointing to (normal) physical memory.
245
*/
246
static
inline
void
247
__tlb_remove_tlb_entry
(
struct
mmu_gather
*tlb,
pte_t
*ptep,
unsigned
long
address
)
248
{
249
if
(tlb->
start_addr
== ~0
UL
)
250
tlb->
start_addr
=
address
;
251
tlb->
end_addr
= address +
PAGE_SIZE
;
252
}
253
254
#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
255
256
#define tlb_start_vma(tlb, vma) do { } while (0)
257
#define tlb_end_vma(tlb, vma) do { } while (0)
258
259
#define tlb_remove_tlb_entry(tlb, ptep, addr) \
260
do { \
261
tlb->need_flush = 1; \
262
__tlb_remove_tlb_entry(tlb, ptep, addr); \
263
} while (0)
264
265
#define pte_free_tlb(tlb, ptep, address) \
266
do { \
267
tlb->need_flush = 1; \
268
__pte_free_tlb(tlb, ptep, address); \
269
} while (0)
270
271
#define pmd_free_tlb(tlb, ptep, address) \
272
do { \
273
tlb->need_flush = 1; \
274
__pmd_free_tlb(tlb, ptep, address); \
275
} while (0)
276
277
#define pud_free_tlb(tlb, pudp, address) \
278
do { \
279
tlb->need_flush = 1; \
280
__pud_free_tlb(tlb, pudp, address); \
281
} while (0)
282
283
#endif
/* _ASM_IA64_TLB_H */
Generated on Thu Jan 10 2013 12:50:41 for Linux Kernel by
1.8.2