Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
arm
include
asm
cacheflush.h
Go to the documentation of this file.
1
/*
2
* arch/arm/include/asm/cacheflush.h
3
*
4
* Copyright (C) 1999-2002 Russell King
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2 as
8
* published by the Free Software Foundation.
9
*/
10
#ifndef _ASMARM_CACHEFLUSH_H
11
#define _ASMARM_CACHEFLUSH_H
12
13
#include <
linux/mm.h
>
14
15
#include <
asm/glue-cache.h
>
16
#include <asm/shmparam.h>
17
#include <asm/cachetype.h>
18
#include <
asm/outercache.h
>
19
20
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21
22
/*
23
* This flag is used to indicate that the page pointed to by a pte is clean
24
* and does not require cleaning before returning it to the user.
25
*/
26
#define PG_dcache_clean PG_arch_1
27
28
/*
29
* MM Cache Management
30
* ===================
31
*
32
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
33
* implement these methods.
34
*
35
* Start addresses are inclusive and end addresses are exclusive;
36
* start addresses should be rounded down, end addresses up.
37
*
38
* See Documentation/cachetlb.txt for more information.
39
* Please note that the implementation of these, and the required
40
* effects are cache-type (VIVT/VIPT/PIPT) specific.
41
*
42
* flush_icache_all()
43
*
44
* Unconditionally clean and invalidate the entire icache.
45
* Currently only needed for cache-v6.S and cache-v7.S, see
46
* __flush_icache_all for the generic implementation.
47
*
48
* flush_kern_all()
49
*
50
* Unconditionally clean and invalidate the entire cache.
51
*
52
* flush_kern_louis()
53
*
54
* Flush data cache levels up to the level of unification
55
* inner shareable and invalidate the I-cache.
56
* Only needed from v7 onwards, falls back to flush_cache_all()
57
* for all other processor versions.
58
*
59
* flush_user_all()
60
*
61
* Clean and invalidate all user space cache entries
62
* before a change of page tables.
63
*
64
* flush_user_range(start, end, flags)
65
*
66
* Clean and invalidate a range of cache entries in the
67
* specified address space before a change of page tables.
68
* - start - user start address (inclusive, page aligned)
69
* - end - user end address (exclusive, page aligned)
70
* - flags - vma->vm_flags field
71
*
72
* coherent_kern_range(start, end)
73
*
74
* Ensure coherency between the Icache and the Dcache in the
75
* region described by start, end. If you have non-snooping
76
* Harvard caches, you need to implement this function.
77
* - start - virtual start address
78
* - end - virtual end address
79
*
80
* coherent_user_range(start, end)
81
*
82
* Ensure coherency between the Icache and the Dcache in the
83
* region described by start, end. If you have non-snooping
84
* Harvard caches, you need to implement this function.
85
* - start - virtual start address
86
* - end - virtual end address
87
*
88
* flush_kern_dcache_area(kaddr, size)
89
*
90
* Ensure that the data held in page is written back.
91
* - kaddr - page address
92
* - size - region size
93
*
94
* DMA Cache Coherency
95
* ===================
96
*
97
* dma_flush_range(start, end)
98
*
99
* Clean and invalidate the specified virtual address range.
100
* - start - virtual start address
101
* - end - virtual end address
102
*/
103
104
struct
cpu_cache_fns
{
105
void
(*
flush_icache_all
)(
void
);
106
void
(*
flush_kern_all
)(
void
);
107
void
(*
flush_kern_louis
)(
void
);
108
void
(*
flush_user_all
)(
void
);
109
void
(*
flush_user_range
)(
unsigned
long
,
unsigned
long
,
unsigned
int
);
110
111
void
(*
coherent_kern_range
)(
unsigned
long
,
unsigned
long
);
112
int
(*
coherent_user_range
)(
unsigned
long
,
unsigned
long
);
113
void
(*
flush_kern_dcache_area
)(
void
*,
size_t
);
114
115
void
(*
dma_map_area
)(
const
void
*,
size_t
,
int
);
116
void
(*
dma_unmap_area
)(
const
void
*,
size_t
,
int
);
117
118
void
(*
dma_flush_range
)(
const
void
*,
const
void
*);
119
};
120
121
/*
122
* Select the calling method
123
*/
124
#ifdef MULTI_CACHE
125
126
extern
struct
cpu_cache_fns
cpu_cache;
127
128
#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
129
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
130
#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
131
#define __cpuc_flush_user_all cpu_cache.flush_user_all
132
#define __cpuc_flush_user_range cpu_cache.flush_user_range
133
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
134
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
135
#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
136
137
/*
138
* These are private to the dma-mapping API. Do not use directly.
139
* Their sole purpose is to ensure that data held in the cache
140
* is visible to DMA, or data written by DMA to system memory is
141
* visible to the CPU.
142
*/
143
#define dmac_map_area cpu_cache.dma_map_area
144
#define dmac_unmap_area cpu_cache.dma_unmap_area
145
#define dmac_flush_range cpu_cache.dma_flush_range
146
147
#else
148
149
extern
void
__cpuc_flush_icache_all
(
void
);
150
extern
void
__cpuc_flush_kern_all
(
void
);
151
extern
void
__cpuc_flush_kern_louis
(
void
);
152
extern
void
__cpuc_flush_user_all
(
void
);
153
extern
void
__cpuc_flush_user_range
(
unsigned
long
,
unsigned
long
,
unsigned
int
);
154
extern
void
__cpuc_coherent_kern_range
(
unsigned
long
,
unsigned
long
);
155
extern
int
__cpuc_coherent_user_range
(
unsigned
long
,
unsigned
long
);
156
extern
void
__cpuc_flush_dcache_area
(
void
*,
size_t
);
157
158
/*
159
* These are private to the dma-mapping API. Do not use directly.
160
* Their sole purpose is to ensure that data held in the cache
161
* is visible to DMA, or data written by DMA to system memory is
162
* visible to the CPU.
163
*/
164
extern
void
dmac_map_area
(
const
void
*,
size_t
,
int
);
165
extern
void
dmac_unmap_area
(
const
void
*,
size_t
,
int
);
166
extern
void
dmac_flush_range
(
const
void
*,
const
void
*);
167
168
#endif
169
170
/*
171
* Copy user data from/to a page which is mapped into a different
172
* processes address space. Really, we want to allow our "user
173
* space" model to handle this.
174
*/
175
extern
void
copy_to_user_page
(
struct
vm_area_struct
*,
struct
page
*,
176
unsigned
long
,
void
*,
const
void
*,
unsigned
long
);
177
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
178
do { \
179
memcpy(dst, src, len); \
180
} while (0)
181
182
/*
183
* Convert calls to our calling convention.
184
*/
185
186
/* Invalidate I-cache */
187
#define __flush_icache_all_generic() \
188
asm("mcr p15, 0, %0, c7, c5, 0" \
189
: : "r" (0));
190
191
/* Invalidate I-cache inner shareable */
192
#define __flush_icache_all_v7_smp() \
193
asm("mcr p15, 0, %0, c7, c1, 0" \
194
: : "r" (0));
195
196
/*
197
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7
198
* will fall through to use __flush_icache_all_generic.
199
*/
200
#if (defined(CONFIG_CPU_V7) && \
201
(defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
202
defined(CONFIG_SMP_ON_UP)
203
#define __flush_icache_preferred __cpuc_flush_icache_all
204
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
205
#define __flush_icache_preferred __flush_icache_all_v7_smp
206
#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
207
#define __flush_icache_preferred __cpuc_flush_icache_all
208
#else
209
#define __flush_icache_preferred __flush_icache_all_generic
210
#endif
211
212
static
inline
void
__flush_icache_all(
void
)
213
{
214
__flush_icache_preferred
();
215
}
216
217
/*
218
* Flush caches up to Level of Unification Inner Shareable
219
*/
220
#define flush_cache_louis() __cpuc_flush_kern_louis()
221
222
#define flush_cache_all() __cpuc_flush_kern_all()
223
224
static
inline
void
vivt_flush_cache_mm(
struct
mm_struct
*mm)
225
{
226
if
(
cpumask_test_cpu
(
smp_processor_id
(), mm_cpumask(mm)))
227
__cpuc_flush_user_all
();
228
}
229
230
static
inline
void
231
vivt_flush_cache_range(
struct
vm_area_struct
*vma,
unsigned
long
start
,
unsigned
long
end
)
232
{
233
struct
mm_struct
*mm = vma->
vm_mm
;
234
235
if
(!mm ||
cpumask_test_cpu
(
smp_processor_id
(), mm_cpumask(mm)))
236
__cpuc_flush_user_range
(start &
PAGE_MASK
,
PAGE_ALIGN
(end),
237
vma->
vm_flags
);
238
}
239
240
static
inline
void
241
vivt_flush_cache_page(
struct
vm_area_struct
*vma,
unsigned
long
user_addr,
unsigned
long
pfn)
242
{
243
struct
mm_struct
*mm = vma->
vm_mm
;
244
245
if
(!mm ||
cpumask_test_cpu
(
smp_processor_id
(), mm_cpumask(mm))) {
246
unsigned
long
addr
= user_addr &
PAGE_MASK
;
247
__cpuc_flush_user_range
(addr, addr +
PAGE_SIZE
, vma->
vm_flags
);
248
}
249
}
250
251
#ifndef CONFIG_CPU_CACHE_VIPT
252
#define flush_cache_mm(mm) \
253
vivt_flush_cache_mm(mm)
254
#define flush_cache_range(vma,start,end) \
255
vivt_flush_cache_range(vma,start,end)
256
#define flush_cache_page(vma,addr,pfn) \
257
vivt_flush_cache_page(vma,addr,pfn)
258
#else
259
extern
void
flush_cache_mm
(
struct
mm_struct
*mm);
260
extern
void
flush_cache_range
(
struct
vm_area_struct
*vma,
unsigned
long
start,
unsigned
long
end);
261
extern
void
flush_cache_page
(
struct
vm_area_struct
*vma,
unsigned
long
user_addr,
unsigned
long
pfn);
262
#endif
263
264
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
265
266
/*
267
* flush_cache_user_range is used when we want to ensure that the
268
* Harvard caches are synchronised for the user space address range.
269
* This is used for the ARM private sys_cacheflush system call.
270
*/
271
#define flush_cache_user_range(start,end) \
272
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
273
274
/*
275
* Perform necessary cache operations to ensure that data previously
276
* stored within this range of addresses can be executed by the CPU.
277
*/
278
#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
279
280
/*
281
* Perform necessary cache operations to ensure that the TLB will
282
* see data written in the specified area.
283
*/
284
#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
285
286
/*
287
* flush_dcache_page is used when the kernel has written to the page
288
* cache page at virtual address page->virtual.
289
*
290
* If this page isn't mapped (ie, page_mapping == NULL), or it might
291
* have userspace mappings, then we _must_ always clean + invalidate
292
* the dcache entries associated with the kernel mapping.
293
*
294
* Otherwise we can defer the operation, and clean the cache when we are
295
* about to change to user space. This is the same method as used on SPARC64.
296
* See update_mmu_cache for the user space part.
297
*/
298
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
299
extern
void
flush_dcache_page
(
struct
page
*);
300
301
static
inline
void
flush_kernel_vmap_range(
void
*
addr
,
int
size
)
302
{
303
if
((
cache_is_vivt
() ||
cache_is_vipt_aliasing
()))
304
__cpuc_flush_dcache_area
(addr, (
size_t
)size);
305
}
306
static
inline
void
invalidate_kernel_vmap_range(
void
*addr,
int
size)
307
{
308
if
((
cache_is_vivt
() ||
cache_is_vipt_aliasing
()))
309
__cpuc_flush_dcache_area
(addr, (
size_t
)size);
310
}
311
312
#define ARCH_HAS_FLUSH_ANON_PAGE
313
static
inline
void
flush_anon_page(
struct
vm_area_struct
*vma,
314
struct
page
*
page
,
unsigned
long
vmaddr)
315
{
316
extern
void
__flush_anon_page
(
struct
vm_area_struct
*vma,
317
struct
page *,
unsigned
long
);
318
if
(PageAnon(page))
319
__flush_anon_page
(vma, page, vmaddr);
320
}
321
322
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
323
static
inline
void
flush_kernel_dcache_page(
struct
page
*
page
)
324
{
325
}
326
327
#define flush_dcache_mmap_lock(mapping) \
328
spin_lock_irq(&(mapping)->tree_lock)
329
#define flush_dcache_mmap_unlock(mapping) \
330
spin_unlock_irq(&(mapping)->tree_lock)
331
332
#define flush_icache_user_range(vma,page,addr,len) \
333
flush_dcache_page(page)
334
335
/*
336
* We don't appear to need to do anything here. In fact, if we did, we'd
337
* duplicate cache flushing elsewhere performed by flush_dcache_page().
338
*/
339
#define flush_icache_page(vma,page) do { } while (0)
340
341
/*
342
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
343
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
344
* caches, since the direct-mappings of these pages may contain cached
345
* data, we need to do a full cache flush to ensure that writebacks
346
* don't corrupt data placed into these pages via the new mappings.
347
*/
348
static
inline
void
flush_cache_vmap
(
unsigned
long
start,
unsigned
long
end)
349
{
350
if
(!
cache_is_vipt_nonaliasing
())
351
flush_cache_all
();
352
else
353
/*
354
* set_pte_at() called from vmap_pte_range() does not
355
* have a DSB after cleaning the cache line.
356
*/
357
dsb
();
358
}
359
360
static
inline
void
flush_cache_vunmap
(
unsigned
long
start,
unsigned
long
end)
361
{
362
if
(!
cache_is_vipt_nonaliasing
())
363
flush_cache_all
();
364
}
365
366
#endif
Generated on Thu Jan 10 2013 12:49:14 for Linux Kernel by
1.8.2