Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
m68k
include
asm
mmu_context.h
Go to the documentation of this file.
1
#ifndef __M68K_MMU_CONTEXT_H
2
#define __M68K_MMU_CONTEXT_H
3
4
#include <
asm-generic/mm_hooks.h
>
5
6
static
inline
void
enter_lazy_tlb
(
struct
mm_struct
*mm,
struct
task_struct
*tsk)
7
{
8
}
9
10
#ifdef CONFIG_MMU
11
12
#if defined(CONFIG_COLDFIRE)
13
14
#include <asm/atomic.h>
15
#include <asm/bitops.h>
16
#include <
asm/mcfmmu.h
>
17
#include <asm/mmu.h>
18
19
#define NO_CONTEXT 256
20
#define LAST_CONTEXT 255
21
#define FIRST_CONTEXT 1
22
23
extern
unsigned
long
context_map
[];
24
extern
mm_context_t
next_mmu_context
;
25
26
extern
atomic_t
nr_free_contexts
;
27
extern
struct
mm_struct
*
context_mm
[
LAST_CONTEXT
+1];
28
extern
void
steal_context
(
void
);
29
30
static
inline
void
get_mmu_context
(
struct
mm_struct
*mm)
31
{
32
mm_context_t
ctx
;
33
34
if
(mm->
context
!=
NO_CONTEXT
)
35
return
;
36
while
(atomic_dec_and_test_lt(&nr_free_contexts)) {
37
atomic_inc
(&nr_free_contexts);
38
steal_context
();
39
}
40
ctx =
next_mmu_context
;
41
while
(
test_and_set_bit
(ctx,
context_map
)) {
42
ctx =
find_next_zero_bit
(
context_map
,
LAST_CONTEXT
+1, ctx);
43
if
(ctx >
LAST_CONTEXT
)
44
ctx = 0;
45
}
46
next_mmu_context = (ctx + 1) &
LAST_CONTEXT
;
47
mm->
context
=
ctx
;
48
context_mm[
ctx
] = mm;
49
}
50
51
/*
52
* Set up the context for a new address space.
53
*/
54
#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
55
56
/*
57
* We're finished using the context for an address space.
58
*/
59
static
inline
void
destroy_context
(
struct
mm_struct
*mm)
60
{
61
if
(mm->
context
!=
NO_CONTEXT
) {
62
clear_bit
(mm->
context
,
context_map
);
63
mm->
context
=
NO_CONTEXT
;
64
atomic_inc
(&nr_free_contexts);
65
}
66
}
67
68
static
inline
void
set_context(
mm_context_t
context
,
pgd_t
*
pgd
)
69
{
70
__asm__
__volatile__ (
"movec %0,%%asid"
: :
"d"
(context));
71
}
72
73
static
inline
void
switch_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
,
74
struct
task_struct
*tsk)
75
{
76
get_mmu_context
(tsk->
mm
);
77
set_context(tsk->
mm
->context, next->
pgd
);
78
}
79
80
/*
81
* After we have set current->mm to a new value, this activates
82
* the context for the new mm so we see the new mappings.
83
*/
84
static
inline
void
activate_mm
(
struct
mm_struct
*active_mm,
85
struct
mm_struct
*mm)
86
{
87
get_mmu_context
(mm);
88
set_context(mm->
context
, mm->
pgd
);
89
}
90
91
#define deactivate_mm(tsk, mm) do { } while (0)
92
93
extern
void
mmu_context_init
(
void
);
94
#define prepare_arch_switch(next) load_ksp_mmu(next)
95
96
static
inline
void
load_ksp_mmu(
struct
task_struct
*
task
)
97
{
98
unsigned
long
flags
;
99
struct
mm_struct
*mm;
100
int
asid
;
101
pgd_t
*
pgd
;
102
pmd_t
*
pmd
;
103
pte_t
*
pte
;
104
unsigned
long
mmuar;
105
106
local_irq_save
(flags);
107
mmuar = task->
thread
.ksp;
108
109
/* Search for a valid TLB entry, if one is found, don't remap */
110
mmu_write(
MMUAR
, mmuar);
111
mmu_write(
MMUOR
,
MMUOR_STLB
|
MMUOR_ADR
);
112
if
(mmu_read(
MMUSR
) &
MMUSR_HIT
)
113
goto
end
;
114
115
if
(mmuar >=
PAGE_OFFSET
) {
116
mm = &
init_mm
;
117
}
else
{
118
pr_info
(
"load_ksp_mmu: non-kernel mm found: 0x%p\n"
, task->
mm
);
119
mm = task->
mm
;
120
}
121
122
if
(!mm)
123
goto
bug;
124
125
pgd =
pgd_offset
(mm, mmuar);
126
if
(
pgd_none
(*pgd))
127
goto
bug;
128
129
pmd =
pmd_offset
(pgd, mmuar);
130
if
(
pmd_none
(*pmd))
131
goto
bug;
132
133
pte = (mmuar >=
PAGE_OFFSET
) ?
pte_offset_kernel
(pmd, mmuar)
134
:
pte_offset_map
(pmd, mmuar);
135
if
(
pte_none
(*pte) || !
pte_present
(*pte))
136
goto
bug;
137
138
set_pte
(pte,
pte_mkyoung
(*pte));
139
asid = mm->
context
& 0xff;
140
if
(!
pte_dirty
(*pte) && mmuar <=
PAGE_OFFSET
)
141
set_pte
(pte,
pte_wrprotect
(*pte));
142
143
mmu_write(
MMUTR
, (mmuar &
PAGE_MASK
) | (asid <<
MMUTR_IDN
) |
144
(((
int
)(pte->
pte
) & (
int
)
CF_PAGE_MMUTR_MASK
)
145
>>
CF_PAGE_MMUTR_SHIFT
) |
MMUTR_V
);
146
147
mmu_write(
MMUDR
, (
pte_val
(*pte) & PAGE_MASK) |
148
((pte->
pte
) &
CF_PAGE_MMUDR_MASK
) |
MMUDR_SZ_8KB
|
MMUDR_X
);
149
150
mmu_write(
MMUOR
,
MMUOR_ACC
|
MMUOR_UAA
);
151
152
goto
end
;
153
154
bug:
155
pr_info
(
"ksp load failed: mm=0x%p ksp=0x08%lx\n"
, mm, mmuar);
156
end
:
157
local_irq_restore
(flags);
158
}
159
160
#elif defined(CONFIG_SUN3)
161
#include <
asm/sun3mmu.h
>
162
#include <linux/sched.h>
163
164
extern
unsigned
long
get_free_context
(
struct
mm_struct
*mm);
165
extern
void
clear_context
(
unsigned
long
context);
166
167
/* set the context for a new task to unmapped */
168
static
inline
int
init_new_context
(
struct
task_struct
*tsk,
169
struct
mm_struct
*mm)
170
{
171
mm->
context
=
SUN3_INVALID_CONTEXT
;
172
return
0;
173
}
174
175
/* find the context given to this process, and if it hasn't already
176
got one, go get one for it. */
177
static
inline
void
get_mmu_context
(
struct
mm_struct
*mm)
178
{
179
if
(mm->
context
==
SUN3_INVALID_CONTEXT
)
180
mm->
context
=
get_free_context
(mm);
181
}
182
183
/* flush context if allocated... */
184
static
inline
void
destroy_context
(
struct
mm_struct
*mm)
185
{
186
if
(mm->
context
!=
SUN3_INVALID_CONTEXT
)
187
clear_context
(mm->
context
);
188
}
189
190
static
inline
void
activate_context
(
struct
mm_struct
*mm)
191
{
192
get_mmu_context
(mm);
193
sun3_put_context(mm->
context
);
194
}
195
196
static
inline
void
switch_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
,
197
struct
task_struct
*tsk)
198
{
199
activate_context
(tsk->
mm
);
200
}
201
202
#define deactivate_mm(tsk, mm) do { } while (0)
203
204
static
inline
void
activate_mm
(
struct
mm_struct
*prev_mm,
205
struct
mm_struct
*next_mm)
206
{
207
activate_context
(next_mm);
208
}
209
210
#else
211
212
#include <asm/setup.h>
213
#include <asm/page.h>
214
#include <asm/pgalloc.h>
215
216
static
inline
int
init_new_context
(
struct
task_struct
*tsk,
217
struct
mm_struct
*mm)
218
{
219
mm->
context
=
virt_to_phys
(mm->
pgd
);
220
return
0;
221
}
222
223
#define destroy_context(mm) do { } while(0)
224
225
static
inline
void
switch_mm_0230(
struct
mm_struct
*mm)
226
{
227
unsigned
long
crp[2] = {
228
0x80000000 |
_PAGE_TABLE
, mm->
context
229
};
230
unsigned
long
tmp
;
231
232
asm
volatile
(
".chip 68030"
);
233
234
/* flush MC68030/MC68020 caches (they are virtually addressed) */
235
asm
volatile
(
236
"movec %%cacr,%0;"
237
"orw %1,%0; "
238
"movec %0,%%cacr"
239
:
"=d"
(
tmp
) :
"di"
(
FLUSH_I_AND_D
));
240
241
/* Switch the root pointer. For a 030-only kernel,
242
* avoid flushing the whole ATC, we only need to
243
* flush the user entries. The 68851 does this by
244
* itself. Avoid a runtime check here.
245
*/
246
asm
volatile
(
247
#ifdef CPU_M68030_ONLY
248
"pmovefd %0,%%crp; "
249
"pflush #0,#4"
250
#else
251
"pmove %0,%%crp"
252
#endif
253
: :
"m"
(crp[0]));
254
255
asm
volatile
(
".chip 68k"
);
256
}
257
258
static
inline
void
switch_mm_0460(
struct
mm_struct
*mm)
259
{
260
asm
volatile
(
".chip 68040"
);
261
262
/* flush address translation cache (user entries) */
263
asm
volatile
(
"pflushan"
);
264
265
/* switch the root pointer */
266
asm
volatile
(
"movec %0,%%urp"
: :
"r"
(mm->
context
));
267
268
if
(
CPU_IS_060
) {
269
unsigned
long
tmp
;
270
271
/* clear user entries in the branch cache */
272
asm
volatile
(
273
"movec %%cacr,%0; "
274
"orl %1,%0; "
275
"movec %0,%%cacr"
276
:
"=d"
(
tmp
):
"di"
(0x00200000));
277
}
278
279
asm
volatile
(
".chip 68k"
);
280
}
281
282
static
inline
void
switch_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
,
struct
task_struct
*tsk)
283
{
284
if
(prev != next) {
285
if
(
CPU_IS_020_OR_030
)
286
switch_mm_0230(next);
287
else
288
switch_mm_0460(next);
289
}
290
}
291
292
#define deactivate_mm(tsk,mm) do { } while (0)
293
294
static
inline
void
activate_mm
(
struct
mm_struct
*prev_mm,
295
struct
mm_struct
*next_mm)
296
{
297
next_mm->
context
=
virt_to_phys
(next_mm->
pgd
);
298
299
if
(
CPU_IS_020_OR_030
)
300
switch_mm_0230(next_mm);
301
else
302
switch_mm_0460(next_mm);
303
}
304
305
#endif
306
307
#else
/* !CONFIG_MMU */
308
309
static
inline
int
init_new_context
(
struct
task_struct
*tsk,
struct
mm_struct
*mm)
310
{
311
return
0;
312
}
313
314
315
static
inline
void
switch_mm
(
struct
mm_struct
*prev,
struct
mm_struct
*next,
struct
task_struct
*tsk)
316
{
317
}
318
319
#define destroy_context(mm) do { } while (0)
320
#define deactivate_mm(tsk,mm) do { } while (0)
321
322
static
inline
void
activate_mm
(
struct
mm_struct
*prev_mm,
struct
mm_struct
*next_mm)
323
{
324
}
325
326
#endif
/* CONFIG_MMU */
327
#endif
/* __M68K_MMU_CONTEXT_H */
Generated on Thu Jan 10 2013 12:49:59 for Linux Kernel by
1.8.2