14 #include <linux/mman.h>
17 #include <asm/processor.h>
18 #include <asm/cache.h>
19 #include <asm/pgalloc.h>
20 #include <asm/uaccess.h>
21 #include <asm/mmu_context.h>
26 static unsigned long long dtlb_cache_slot;
34 sh64_setup_dtlb_cache_slot(
unsigned long eaddr,
unsigned long asid,
41 static inline void sh64_teardown_dtlb_cache_slot(
void)
47 static inline void sh64_icache_inv_all(
void)
61 "getcfg %3, 0, %0\n\t"
63 "putcfg %3, 0, %0\n\t"
66 :
"0" (data),
"r" (flag),
"r" (addr));
71 static void sh64_icache_inv_kernel_range(
unsigned long start,
unsigned long end)
76 unsigned long long ullend,
addr, aligned_start;
77 aligned_start = (
unsigned long long)(
signed long long)(
signed long) start;
79 ullend = (
unsigned long long) (
signed long long) (
signed long) end;
81 while (addr <= ullend) {
82 __asm__ __volatile__ (
"icbi %0, 0" : :
"r" (addr));
87 static void sh64_icache_inv_user_page(
struct vm_area_struct *vma,
unsigned long eaddr)
92 unsigned long long addr, end_addr;
93 unsigned long flags = 0;
94 unsigned long running_asid, vma_asid;
112 running_asid = get_asid();
114 if (running_asid != vma_asid) {
118 while (addr < end_addr) {
120 __asm__ __volatile__(
"icbi %0, 0" : :
"r" (addr));
121 __asm__ __volatile__(
"icbi %0, 32" : :
"r" (addr));
122 __asm__ __volatile__(
"icbi %0, 64" : :
"r" (addr));
123 __asm__ __volatile__(
"icbi %0, 96" : :
"r" (addr));
126 if (running_asid != vma_asid) {
132 static void sh64_icache_inv_user_page_range(
struct mm_struct *mm,
133 unsigned long start,
unsigned long end)
156 sh64_icache_inv_all();
158 unsigned long aligned_start;
160 unsigned long after_last_page_start;
161 unsigned long mm_asid, current_asid;
162 unsigned long flags = 0;
165 current_asid = get_asid();
167 if (mm_asid != current_asid) {
174 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
176 while (aligned_start < after_last_page_start) {
178 unsigned long vma_end;
180 if (!vma || (aligned_start <= vma->
vm_end)) {
188 eaddr = aligned_start;
189 while (eaddr < vma_end) {
190 sh64_icache_inv_user_page(vma, eaddr);
194 aligned_start = vma->
vm_end;
197 if (mm_asid != current_asid) {
204 static void sh64_icache_inv_current_user_range(
unsigned long start,
unsigned long end)
210 unsigned long long aligned_start;
211 unsigned long long ull_end;
212 unsigned long long addr;
223 addr = aligned_start;
224 while (addr < ull_end) {
225 __asm__ __volatile__ (
"icbi %0, 0" : :
"r" (addr));
234 #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
237 static void inline sh64_dcache_purge_sets(
int sets_to_purge_base,
int n_sets)
243 int dummy_buffer_base_set;
244 unsigned long long eaddr, eaddr0,
eaddr1;
248 dummy_buffer_base_set = ((
int)&dummy_alloco_area &
251 set_offset = sets_to_purge_base - dummy_buffer_base_set;
253 for (j = 0; j < n_sets; j++, set_offset++) {
254 set_offset &= (
cpu_data->dcache.sets - 1);
255 eaddr0 = (
unsigned long long)dummy_alloco_area +
256 (set_offset <<
cpu_data->dcache.entry_shift);
265 eaddr1 = eaddr0 +
cpu_data->dcache.way_size *
268 for (eaddr = eaddr0; eaddr <
eaddr1;
269 eaddr +=
cpu_data->dcache.way_size) {
270 __asm__ __volatile__ (
"alloco %0, 0" : :
"r" (eaddr));
271 __asm__ __volatile__ (
"synco");
274 eaddr1 = eaddr0 +
cpu_data->dcache.way_size *
277 for (eaddr = eaddr0; eaddr <
eaddr1;
278 eaddr +=
cpu_data->dcache.way_size) {
303 static void sh64_dcache_purge_all(
void)
306 sh64_dcache_purge_sets(0,
cpu_data->dcache.sets);
312 #define MAGIC_PAGE0_START 0xffffffffec000000ULL
325 static void sh64_dcache_purge_coloured_phy_page(
unsigned long paddr,
328 unsigned long long magic_page_start;
329 unsigned long long magic_eaddr, magic_eaddr_end;
335 sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
337 magic_eaddr = magic_page_start;
338 magic_eaddr_end = magic_eaddr +
PAGE_SIZE;
340 while (magic_eaddr < magic_eaddr_end) {
344 __asm__ __volatile__ (
"ocbp %0, 0" : :
"r" (magic_eaddr));
348 sh64_teardown_dtlb_cache_slot();
359 static void sh64_dcache_purge_phy_page(
unsigned long paddr)
361 unsigned long long eaddr_start, eaddr, eaddr_end;
368 sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
372 while (eaddr < eaddr_end) {
373 __asm__ __volatile__ (
"ocbp %0, 0" : :
"r" (eaddr));
377 sh64_teardown_dtlb_cache_slot();
382 static void sh64_dcache_purge_user_pages(
struct mm_struct *mm,
383 unsigned long addr,
unsigned long end)
408 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
414 sh64_dcache_purge_coloured_phy_page(paddr, addr);
415 }
while (pte++, addr += PAGE_SIZE, addr != end);
416 pte_unmap_unlock(pte - 1, ptl);
467 static void sh64_dcache_purge_user_range(
struct mm_struct *mm,
468 unsigned long start,
unsigned long end)
472 if (n_pages >= 64 || ((start ^ (end - 1)) &
PMD_MASK)) {
473 sh64_dcache_purge_all();
478 sh64_dcache_purge_user_pages(mm, start, end);
486 static void sh5_flush_cache_all(
void *
unused)
488 sh64_dcache_purge_all();
489 sh64_icache_inv_all();
513 static void sh5_flush_cache_mm(
void *
unused)
515 sh64_dcache_purge_all();
525 static void sh5_flush_cache_range(
void *args)
527 struct flusher_data *data = args;
535 sh64_dcache_purge_user_range(vma->
vm_mm, start, end);
536 sh64_icache_inv_user_page_range(vma->
vm_mm, start, end);
548 static void sh5_flush_cache_page(
void *args)
550 struct flusher_data *data = args;
552 unsigned long eaddr, pfn;
558 sh64_dcache_purge_phy_page(pfn <<
PAGE_SHIFT);
561 sh64_icache_inv_user_page(vma, eaddr);
564 static void sh5_flush_dcache_page(
void *
page)
566 sh64_dcache_purge_phy_page(
page_to_phys((
struct page *)page));
578 static void sh5_flush_icache_range(
void *args)
580 struct flusher_data *data = args;
588 sh64_icache_inv_kernel_range(start, end);
597 static void sh5_flush_cache_sigtramp(
void *
vaddr)
603 sh64_icache_inv_current_user_range((
unsigned long)vaddr, end);