Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
powerpc
mm
tlb_hash32.c
Go to the documentation of this file.
1
/*
2
* This file contains the routines for TLB flushing.
3
* On machines where the MMU uses a hash table to store virtual to
4
* physical translations, these routines flush entries from the
5
* hash table also.
6
* -- paulus
7
*
8
* Derived from arch/ppc/mm/init.c:
9
* Copyright (C) 1995-1996 Gary Thomas (
[email protected]
)
10
*
11
* Modifications by Paul Mackerras (PowerMac) (
[email protected]
)
12
* and Cort Dougan (PReP) (
[email protected]
)
13
* Copyright (C) 1996 Paul Mackerras
14
*
15
* Derived from "arch/i386/mm/init.c"
16
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
17
*
18
* This program is free software; you can redistribute it and/or
19
* modify it under the terms of the GNU General Public License
20
* as published by the Free Software Foundation; either version
21
* 2 of the License, or (at your option) any later version.
22
*
23
*/
24
25
#include <linux/kernel.h>
26
#include <
linux/mm.h
>
27
#include <
linux/init.h
>
28
#include <
linux/highmem.h
>
29
#include <
linux/pagemap.h
>
30
#include <linux/export.h>
31
32
#include <asm/tlbflush.h>
33
#include <asm/tlb.h>
34
35
#include "
mmu_decl.h
"
36
37
/*
38
* Called when unmapping pages to flush entries from the TLB/hash table.
39
*/
40
void
flush_hash_entry
(
struct
mm_struct
*mm,
pte_t
*ptep,
unsigned
long
addr
)
41
{
42
unsigned
long
ptephys;
43
44
if
(
Hash
!= 0) {
45
ptephys =
__pa
(ptep) &
PAGE_MASK
;
46
flush_hash_pages
(mm->
context
.
id
, addr, ptephys, 1);
47
}
48
}
49
EXPORT_SYMBOL
(
flush_hash_entry
);
50
51
/*
52
* Called by ptep_set_access_flags, must flush on CPUs for which the
53
* DSI handler can't just "fixup" the TLB on a write fault
54
*/
55
void
flush_tlb_page_nohash
(
struct
vm_area_struct
*vma,
unsigned
long
addr
)
56
{
57
if
(
Hash
!= 0)
58
return
;
59
_tlbie
(addr);
60
}
61
62
/*
63
* Called at the end of a mmu_gather operation to make sure the
64
* TLB flush is completely done.
65
*/
66
void
tlb_flush
(
struct
mmu_gather
*tlb)
67
{
68
if
(
Hash
== 0) {
69
/*
70
* 603 needs to flush the whole TLB here since
71
* it doesn't use a hash table.
72
*/
73
_tlbia
();
74
}
75
}
76
77
/*
78
* TLB flushing:
79
*
80
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
81
* - flush_tlb_page(vma, vmaddr) flushes one page
82
* - flush_tlb_range(vma, start, end) flushes a range of pages
83
* - flush_tlb_kernel_range(start, end) flushes kernel pages
84
*
85
* since the hardware hash table functions as an extension of the
86
* tlb as far as the linux tables are concerned, flush it too.
87
* -- Cort
88
*/
89
90
static
void
flush_range(
struct
mm_struct
*mm,
unsigned
long
start
,
91
unsigned
long
end
)
92
{
93
pmd_t
*
pmd
;
94
unsigned
long
pmd_end;
95
int
count
;
96
unsigned
int
ctx
= mm->
context
.
id
;
97
98
if
(
Hash
== 0) {
99
_tlbia
();
100
return
;
101
}
102
start &=
PAGE_MASK
;
103
if
(start >= end)
104
return
;
105
end = (end - 1) | ~
PAGE_MASK
;
106
pmd =
pmd_offset
(
pud_offset
(
pgd_offset
(mm, start), start), start);
107
for
(;;) {
108
pmd_end = ((start +
PGDIR_SIZE
) &
PGDIR_MASK
) - 1;
109
if
(pmd_end > end)
110
pmd_end =
end
;
111
if
(!
pmd_none
(*pmd)) {
112
count = ((pmd_end -
start
) >>
PAGE_SHIFT
) + 1;
113
flush_hash_pages
(ctx, start,
pmd_val
(*pmd), count);
114
}
115
if
(pmd_end == end)
116
break
;
117
start = pmd_end + 1;
118
++
pmd
;
119
}
120
}
121
122
/*
123
* Flush kernel TLB entries in the given range
124
*/
125
void
flush_tlb_kernel_range
(
unsigned
long
start,
unsigned
long
end)
126
{
127
flush_range(&
init_mm
, start, end);
128
}
129
EXPORT_SYMBOL
(
flush_tlb_kernel_range
);
130
131
/*
132
* Flush all the (user) entries for the address space described by mm.
133
*/
134
void
flush_tlb_mm
(
struct
mm_struct
*mm)
135
{
136
struct
vm_area_struct
*
mp
;
137
138
if
(
Hash
== 0) {
139
_tlbia
();
140
return
;
141
}
142
143
/*
144
* It is safe to go down the mm's list of vmas when called
145
* from dup_mmap, holding mmap_sem. It would also be safe from
146
* unmap_region or exit_mmap, but not from vmtruncate on SMP -
147
* but it seems dup_mmap is the only SMP case which gets here.
148
*/
149
for
(mp = mm->
mmap
; mp !=
NULL
; mp = mp->
vm_next
)
150
flush_range(mp->
vm_mm
, mp->
vm_start
, mp->
vm_end
);
151
}
152
EXPORT_SYMBOL
(
flush_tlb_mm
);
153
154
void
flush_tlb_page
(
struct
vm_area_struct
*vma,
unsigned
long
vmaddr)
155
{
156
struct
mm_struct
*mm;
157
pmd_t
*
pmd
;
158
159
if
(
Hash
== 0) {
160
_tlbie
(vmaddr);
161
return
;
162
}
163
mm = (vmaddr <
TASK_SIZE
)? vma->
vm_mm
: &
init_mm
;
164
pmd =
pmd_offset
(
pud_offset
(
pgd_offset
(mm, vmaddr), vmaddr), vmaddr);
165
if
(!
pmd_none
(*pmd))
166
flush_hash_pages
(mm->
context
.
id
, vmaddr,
pmd_val
(*pmd), 1);
167
}
168
EXPORT_SYMBOL
(
flush_tlb_page
);
169
170
/*
171
* For each address in the range, find the pte for the address
172
* and check _PAGE_HASHPTE bit; if it is set, find and destroy
173
* the corresponding HPTE.
174
*/
175
void
flush_tlb_range
(
struct
vm_area_struct
*vma,
unsigned
long
start,
176
unsigned
long
end)
177
{
178
flush_range(vma->
vm_mm
, start, end);
179
}
180
EXPORT_SYMBOL
(
flush_tlb_range
);
181
182
void
__init
early_init_mmu
(
void
)
183
{
184
}
Generated on Thu Jan 10 2013 13:14:31 for Linux Kernel by
1.8.2