26 #include <asm/cacheflush.h>
29 #define CACHE_LINE_SIZE 32
33 static u32 l2x0_way_mask;
45 static inline void cache_wait_way(
void __iomem *
reg,
unsigned long mask)
52 #ifdef CONFIG_CACHE_PL310
58 #define cache_wait cache_wait_way
61 static inline void cache_sync(
void)
65 writel_relaxed(0, base + sync_reg_offset);
69 static inline void l2x0_clean_line(
unsigned long addr)
76 static inline void l2x0_inv_line(
unsigned long addr)
83 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
84 static inline void debug_writel(
unsigned long val)
86 if (outer_cache.set_debug)
87 outer_cache.set_debug(val);
96 static inline void debug_writel(
unsigned long val)
100 #define pl310_set_debug NULL
103 #ifdef CONFIG_PL310_ERRATA_588369
104 static inline void l2x0_flush_line(
unsigned long addr)
116 static inline void l2x0_flush_line(
unsigned long addr)
124 static void l2x0_cache_sync(
void)
133 static void __l2x0_flush_all(
void)
142 static void l2x0_flush_all(
void)
152 static void l2x0_clean_all(
void)
164 static void l2x0_inv_all(
void)
172 writel_relaxed(l2x0_way_mask, l2x0_base +
L2X0_INV_WAY);
173 cache_wait_way(l2x0_base +
L2X0_INV_WAY, l2x0_way_mask);
178 static void l2x0_inv_range(
unsigned long start,
unsigned long end)
187 l2x0_flush_line(start);
195 l2x0_flush_line(end);
199 while (start < end) {
200 unsigned long blk_end = start +
min(end - start, 4096
UL);
202 while (start < blk_end) {
203 l2x0_inv_line(start);
217 static void l2x0_clean_range(
unsigned long start,
unsigned long end)
219 void __iomem *base = l2x0_base;
222 if ((end - start) >= l2x0_size) {
229 while (start < end) {
230 unsigned long blk_end = start +
min(end - start, 4096
UL);
232 while (start < blk_end) {
233 l2x0_clean_line(start);
247 static void l2x0_flush_range(
unsigned long start,
unsigned long end)
249 void __iomem *base = l2x0_base;
252 if ((end - start) >= l2x0_size) {
259 while (start < end) {
260 unsigned long blk_end = start +
min(end - start, 4096
UL);
263 while (start < blk_end) {
264 l2x0_flush_line(start);
279 static void l2x0_disable(
void)
285 writel_relaxed(0, l2x0_base +
L2X0_CTRL);
290 static void l2x0_unlock(
u32 cache_id)
301 for (i = 0; i < lockregs; i++) {
333 #ifdef CONFIG_PL310_ERRATA_753970
340 ways = (aux >> 13) & 0xf;
346 type =
"L2x0 series";
350 l2x0_way_mask = (1 << ways) - 1;
356 way_size = 1 << (way_size + 3);
357 l2x0_size = ways * way_size *
SZ_1K;
366 l2x0_unlock(cache_id);
374 writel_relaxed(1, l2x0_base +
L2X0_CTRL);
383 outer_cache.inv_range = l2x0_inv_range;
384 outer_cache.clean_range = l2x0_clean_range;
385 outer_cache.flush_range = l2x0_flush_range;
386 outer_cache.sync = l2x0_cache_sync;
387 outer_cache.flush_all = l2x0_flush_all;
388 outer_cache.inv_all = l2x0_inv_all;
389 outer_cache.disable = l2x0_disable;
392 printk(
KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
393 ways, cache_id, aux, l2x0_size);
398 u32 *aux_val,
u32 *aux_mask)
405 of_property_read_u32(np,
"arm,tag-latency", &tag);
413 if (data[0] && data[1]) {
420 of_property_read_u32(np,
"arm,dirty-latency", &dirty);
432 u32 *aux_val,
u32 *aux_mask)
434 u32 data[3] = { 0, 0, 0 };
435 u32 tag[3] = { 0, 0, 0 };
439 if (tag[0] && tag[1] && tag[2])
448 if (data[0] && data[1] && data[2])
458 writel_relaxed(
ALIGN(filter[0] + filter[1],
SZ_1M),
465 static void __init pl310_save(
void)
494 static void l2x0_resume(
void)
505 writel_relaxed(1, l2x0_base +
L2X0_CTRL);
509 static void pl310_resume(
void)
552 { .compatible =
"arm,pl310-cache", .data = (
void *)&pl310_data },
553 { .compatible =
"arm,l220-cache", .data = (
void *)&l2x0_data },
554 { .compatible =
"arm,l210-cache", .data = (
void *)&l2x0_data },
582 data->
setup(np, &aux_val, &aux_mask);
590 outer_cache.resume = data->
resume;