Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
s390
include
asm
tlb.h
Go to the documentation of this file.
1
#ifndef _S390_TLB_H
2
#define _S390_TLB_H
3
4
/*
5
* TLB flushing on s390 is complicated. The following requirement
6
* from the principles of operation is the most arduous:
7
*
8
* "A valid table entry must not be changed while it is attached
9
* to any CPU and may be used for translation by that CPU except to
10
* (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11
* or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12
* table entry, or (3) make a change by means of a COMPARE AND SWAP
13
* AND PURGE instruction that purges the TLB."
14
*
15
* The modification of a pte of an active mm struct therefore is
16
* a two step process: i) invalidate the pte, ii) store the new pte.
17
* This is true for the page protection bit as well.
18
* The only possible optimization is to flush at the beginning of
19
* a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20
*
21
* Pages used for the page tables is a different story. FIXME: more
22
*/
23
24
#include <
linux/mm.h
>
25
#include <
linux/pagemap.h
>
26
#include <
linux/swap.h
>
27
#include <asm/processor.h>
28
#include <asm/pgalloc.h>
29
#include <asm/tlbflush.h>
30
31
struct
mmu_gather
{
32
struct
mm_struct
*
mm
;
33
struct
mmu_table_batch
*
batch
;
34
unsigned
int
fullmm
;
35
};
36
37
struct
mmu_table_batch
{
38
struct
rcu_head
rcu
;
39
unsigned
int
nr
;
40
void
*
tables
[0];
41
};
42
43
#define MAX_TABLE_BATCH \
44
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
45
46
extern
void
tlb_table_flush
(
struct
mmu_gather
*tlb);
47
extern
void
tlb_remove_table
(
struct
mmu_gather
*tlb,
void
*
table
);
48
49
static
inline
void
tlb_gather_mmu
(
struct
mmu_gather
*tlb,
50
struct
mm_struct
*mm,
51
unsigned
int
full_mm_flush)
52
{
53
tlb->
mm
= mm;
54
tlb->
fullmm
= full_mm_flush;
55
tlb->
batch
=
NULL
;
56
if
(tlb->
fullmm
)
57
__tlb_flush_mm(mm);
58
}
59
60
static
inline
void
tlb_flush_mmu
(
struct
mmu_gather
*tlb)
61
{
62
tlb_table_flush
(tlb);
63
}
64
65
static
inline
void
tlb_finish_mmu
(
struct
mmu_gather
*tlb,
66
unsigned
long
start
,
unsigned
long
end
)
67
{
68
tlb_table_flush
(tlb);
69
}
70
71
/*
72
* Release the page cache reference for a pte removed by
73
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
74
* has already been freed, so just do free_page_and_swap_cache.
75
*/
76
static
inline
int
__tlb_remove_page
(
struct
mmu_gather
*tlb,
struct
page
*
page
)
77
{
78
free_page_and_swap_cache
(page);
79
return
1;
/* avoid calling tlb_flush_mmu */
80
}
81
82
static
inline
void
tlb_remove_page(
struct
mmu_gather
*tlb,
struct
page
*
page
)
83
{
84
free_page_and_swap_cache
(page);
85
}
86
87
/*
88
* pte_free_tlb frees a pte table and clears the CRSTE for the
89
* page table from the tlb.
90
*/
91
static
inline
void
pte_free_tlb
(
struct
mmu_gather
*tlb,
pgtable_t
pte
,
92
unsigned
long
address
)
93
{
94
if
(!tlb->
fullmm
)
95
return
page_table_free_rcu
(tlb, (
unsigned
long
*) pte);
96
page_table_free
(tlb->
mm
, (
unsigned
long
*) pte);
97
}
98
99
/*
100
* pmd_free_tlb frees a pmd table and clears the CRSTE for the
101
* segment table entry from the tlb.
102
* If the mm uses a two level page table the single pmd is freed
103
* as the pgd. pmd_free_tlb checks the asce_limit against 2GB
104
* to avoid the double free of the pmd in this case.
105
*/
106
static
inline
void
pmd_free_tlb
(
struct
mmu_gather
*tlb,
pmd_t
*
pmd
,
107
unsigned
long
address)
108
{
109
#ifdef CONFIG_64BIT
110
if
(tlb->
mm
->context.asce_limit <= (1
UL
<< 31))
111
return
;
112
if
(!tlb->
fullmm
)
113
return
tlb_remove_table
(tlb, pmd);
114
crst_table_free
(tlb->
mm
, (
unsigned
long
*) pmd);
115
#endif
116
}
117
118
/*
119
* pud_free_tlb frees a pud table and clears the CRSTE for the
120
* region third table entry from the tlb.
121
* If the mm uses a three level page table the single pud is freed
122
* as the pgd. pud_free_tlb checks the asce_limit against 4TB
123
* to avoid the double free of the pud in this case.
124
*/
125
static
inline
void
pud_free_tlb
(
struct
mmu_gather
*tlb,
pud_t
*pud,
126
unsigned
long
address)
127
{
128
#ifdef CONFIG_64BIT
129
if
(tlb->
mm
->context.asce_limit <= (1
UL
<< 42))
130
return
;
131
if
(!tlb->
fullmm
)
132
return
tlb_remove_table
(tlb, pud);
133
crst_table_free
(tlb->
mm
, (
unsigned
long
*) pud);
134
#endif
135
}
136
137
#define tlb_start_vma(tlb, vma) do { } while (0)
138
#define tlb_end_vma(tlb, vma) do { } while (0)
139
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
140
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
141
#define tlb_migrate_finish(mm) do { } while (0)
142
143
#endif
/* _S390_TLB_H */
Generated on Thu Jan 10 2013 12:50:41 for Linux Kernel by
1.8.2