Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
ia64
include
asm
mmu_context.h
Go to the documentation of this file.
1
#ifndef _ASM_IA64_MMU_CONTEXT_H
2
#define _ASM_IA64_MMU_CONTEXT_H
3
4
/*
5
* Copyright (C) 1998-2002 Hewlett-Packard Co
6
* David Mosberger-Tang <
[email protected]
>
7
*/
8
9
/*
10
* Routines to manage the allocation of task context numbers. Task context
11
* numbers are used to reduce or eliminate the need to perform TLB flushes
12
* due to context switches. Context numbers are implemented using ia-64
13
* region ids. Since the IA-64 TLB does not consider the region number when
14
* performing a TLB lookup, we need to assign a unique region id to each
15
* region in a process. We use the least significant three bits in aregion
16
* id for this purpose.
17
*/
18
19
#define IA64_REGION_ID_KERNEL 0
/* the kernel's region id (tlb.c depends on this being 0) */
20
21
#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
22
23
# include <asm/page.h>
24
# ifndef __ASSEMBLY__
25
26
#include <linux/compiler.h>
27
#include <
linux/percpu.h
>
28
#include <linux/sched.h>
29
#include <
linux/spinlock.h
>
30
31
#include <asm/processor.h>
32
#include <
asm-generic/mm_hooks.h
>
33
34
struct
ia64_ctx
{
35
spinlock_t
lock
;
36
unsigned
int
next
;
/* next context number to use */
37
unsigned
int
limit
;
/* available free range */
38
unsigned
int
max_ctx
;
/* max. context value supported by all CPUs */
39
/* call wrap_mmu_context when next >= max */
40
unsigned
long
*
bitmap
;
/* bitmap size is max_ctx+1 */
41
unsigned
long
*
flushmap
;
/* pending rid to be flushed */
42
};
43
44
extern
struct
ia64_ctx
ia64_ctx
;
45
DECLARE_PER_CPU
(
u8
, ia64_need_tlb_flush);
46
47
extern
void
mmu_context_init
(
void
);
48
extern
void
wrap_mmu_context
(
struct
mm_struct
*mm);
49
50
static
inline
void
51
enter_lazy_tlb
(
struct
mm_struct
*mm,
struct
task_struct
*tsk)
52
{
53
}
54
55
/*
56
* When the context counter wraps around all TLBs need to be flushed because
57
* an old context number might have been reused. This is signalled by the
58
* ia64_need_tlb_flush per-CPU variable, which is checked in the routine
59
* below. Called by activate_mm(). <
[email protected]
>
60
*/
61
static
inline
void
62
delayed_tlb_flush (
void
)
63
{
64
extern
void
local_flush_tlb_all
(
void
);
65
unsigned
long
flags
;
66
67
if
(
unlikely
(
__ia64_per_cpu_var
(ia64_need_tlb_flush))) {
68
spin_lock_irqsave
(&
ia64_ctx
.
lock
, flags);
69
if
(
__ia64_per_cpu_var
(ia64_need_tlb_flush)) {
70
local_flush_tlb_all
();
71
__ia64_per_cpu_var
(ia64_need_tlb_flush) = 0;
72
}
73
spin_unlock_irqrestore(&
ia64_ctx
.
lock
, flags);
74
}
75
}
76
77
static
inline
nv_mm_context_t
78
get_mmu_context
(
struct
mm_struct
*mm)
79
{
80
unsigned
long
flags
;
81
nv_mm_context_t
context
= mm->
context
;
82
83
if
(
likely
(context))
84
goto
out
;
85
86
spin_lock_irqsave
(&
ia64_ctx
.
lock
, flags);
87
/* re-check, now that we've got the lock: */
88
context = mm->
context
;
89
if
(context == 0) {
90
cpumask_clear(mm_cpumask(mm));
91
if
(
ia64_ctx
.
next
>=
ia64_ctx
.
limit
) {
92
ia64_ctx
.
next
=
find_next_zero_bit
(
ia64_ctx
.
bitmap
,
93
ia64_ctx
.
max_ctx
,
ia64_ctx
.
next
);
94
ia64_ctx
.
limit
=
find_next_bit
(
ia64_ctx
.
bitmap
,
95
ia64_ctx
.
max_ctx
,
ia64_ctx
.
next
);
96
if
(
ia64_ctx
.
next
>=
ia64_ctx
.
max_ctx
)
97
wrap_mmu_context
(mm);
98
}
99
mm->
context
= context =
ia64_ctx
.
next
++;
100
__set_bit
(context,
ia64_ctx
.
bitmap
);
101
}
102
spin_unlock_irqrestore(&
ia64_ctx
.
lock
, flags);
103
out
:
104
/*
105
* Ensure we're not starting to use "context" before any old
106
* uses of it are gone from our TLB.
107
*/
108
delayed_tlb_flush();
109
110
return
context
;
111
}
112
113
/*
114
* Initialize context number to some sane value. MM is guaranteed to be a
115
* brand-new address-space, so no TLB flushing is needed, ever.
116
*/
117
static
inline
int
118
init_new_context
(
struct
task_struct
*
p
,
struct
mm_struct
*mm)
119
{
120
mm->
context
= 0;
121
return
0;
122
}
123
124
static
inline
void
125
destroy_context
(
struct
mm_struct
*mm)
126
{
127
/* Nothing to do. */
128
}
129
130
static
inline
void
131
reload_context (
nv_mm_context_t
context)
132
{
133
unsigned
long
rid;
134
unsigned
long
rid_incr = 0;
135
unsigned
long
rr0, rr1, rr2, rr3, rr4, old_rr4;
136
137
old_rr4 =
ia64_get_rr
(
RGN_BASE
(
RGN_HPAGE
));
138
rid = context << 3;
/* make space for encoding the region number */
139
rid_incr = 1 << 8;
140
141
/* encode the region id, preferred page size, and VHPT enable bit: */
142
rr0 = (rid << 8) | (
PAGE_SHIFT
<< 2) | 1;
143
rr1 = rr0 + 1*rid_incr;
144
rr2 = rr0 + 2*rid_incr;
145
rr3 = rr0 + 3*rid_incr;
146
rr4 = rr0 + 4*rid_incr;
147
#ifdef CONFIG_HUGETLB_PAGE
148
rr4 = (rr4 & (~(0xfc
UL
))) | (old_rr4 & 0xfc);
149
150
# if RGN_HPAGE != 4
151
# error "reload_context assumes RGN_HPAGE is 4"
152
# endif
153
#endif
154
155
ia64_set_rr0_to_rr4
(rr0, rr1, rr2, rr3, rr4);
156
ia64_srlz_i
();
/* srlz.i implies srlz.d */
157
}
158
159
/*
160
* Must be called with preemption off
161
*/
162
static
inline
void
163
activate_context
(
struct
mm_struct
*mm)
164
{
165
nv_mm_context_t
context
;
166
167
do
{
168
context =
get_mmu_context
(mm);
169
if
(!
cpumask_test_cpu
(
smp_processor_id
(), mm_cpumask(mm)))
170
cpumask_set_cpu(
smp_processor_id
(), mm_cpumask(mm));
171
reload_context(context);
172
/*
173
* in the unlikely event of a TLB-flush by another thread,
174
* redo the load.
175
*/
176
}
while
(
unlikely
(context != mm->
context
));
177
}
178
179
#define deactivate_mm(tsk,mm) do { } while (0)
180
181
/*
182
* Switch from address space PREV to address space NEXT.
183
*/
184
static
inline
void
185
activate_mm
(
struct
mm_struct
*
prev
,
struct
mm_struct
*
next
)
186
{
187
/*
188
* We may get interrupts here, but that's OK because interrupt
189
* handlers cannot touch user-space.
190
*/
191
ia64_set_kr
(
IA64_KR_PT_BASE
,
__pa
(next->
pgd
));
192
activate_context
(next);
193
}
194
195
#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
196
197
# endif
/* ! __ASSEMBLY__ */
198
#endif
/* _ASM_IA64_MMU_CONTEXT_H */
Generated on Thu Jan 10 2013 12:49:59 for Linux Kernel by
1.8.2