17 #include <linux/kernel.h>
20 #include <linux/list.h>
26 #include <linux/sysctl.h>
28 #include <linux/ptrace.h>
29 #include <linux/timex.h>
32 #include <linux/module.h>
36 #include <asm/sections.h>
37 #include <asm/tlbflush.h>
38 #include <asm/pgalloc.h>
46 #if CHIP_HAS_COHERENT_LOCAL_CACHE()
54 static int __init set_noallocl2(
char *
str)
73 static void hv_flush_update(
const struct cpumask *cache_cpumask,
75 unsigned long tlb_va,
unsigned long tlb_length,
83 cpumask_or(&mask, &mask, cache_cpumask);
84 if (tlb_cpumask && tlb_length) {
85 cpumask_or(&mask, &mask, tlb_cpumask);
88 for (i = 0; i < asidcount; ++
i)
89 cpumask_set_cpu(asids[i].
y *
smp_width + asids[i].
x, &mask);
117 const struct cpumask *cache_cpumask_orig,
119 unsigned long tlb_pgsize,
120 const struct cpumask *tlb_cpumask_orig,
124 struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
125 struct cpumask *cache_cpumask, *tlb_cpumask;
134 if (cache_cpumask_orig && cache_control) {
135 cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
136 cache_cpumask = &cache_cpumask_copy;
138 cpumask_clear(&cache_cpumask_copy);
139 cache_cpumask =
NULL;
141 if (cache_cpumask ==
NULL)
143 if (tlb_cpumask_orig && tlb_length) {
144 cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
145 tlb_cpumask = &tlb_cpumask_copy;
147 cpumask_clear(&tlb_cpumask_copy);
151 hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
156 tlb_va, tlb_length, tlb_pgsize,
161 cpumask_scnprintf(cache_buf,
sizeof(cache_buf), &cache_cpumask_copy);
162 cpumask_scnprintf(tlb_buf,
sizeof(tlb_buf), &tlb_cpumask_copy);
164 pr_err(
"hv_flush_remote(%#llx, %#lx, %p [%s],"
165 " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
166 cache_pa, cache_control, cache_cpumask, cache_buf,
167 (
unsigned long)tlb_va, tlb_length, tlb_pgsize,
168 tlb_cpumask, tlb_buf,
169 asids, asidcount, rc);
170 panic(
"Unsafe to continue.");
173 static void homecache_finv_page_va(
void*
va,
int home)
177 }
else if (home == PAGE_HOME_HASH) {
195 #ifdef CONFIG_HIGHMEM
204 homecache_finv_page_va((
void *)va, home);
207 #ifdef CONFIG_HIGHMEM
208 kmap_atomic_idx_pop();
213 static void homecache_finv_page_home(
struct page *
page,
int home)
215 if (!PageHighMem(page) && home ==
page_home(page))
221 static inline bool incoherent_home(
int home)
226 static void homecache_finv_page_internal(
struct page *page,
int force_map)
231 if (incoherent_home(home)) {
239 homecache_finv_page_home(page, home);
246 homecache_finv_page_internal(page, 0);
257 if (hv_pte_get_nc(pte))
259 switch (hv_pte_get_mode(pte)) {
266 #if CHIP_HAS_CBOX_HOME_MAP()
268 return PAGE_HOME_HASH;
271 panic(
"Bad PTE %#llx\n", pte.val);
297 pte = hv_pte_clear_nc(pte);
298 pr_err(
"non-immutable page incoherently referenced: %#llx\n",
317 BUG_ON(hv_pte_get_writable(pte));
322 pte = hv_pte_set_mode(pte,
326 #if CHIP_HAS_CBOX_HOME_MAP()
332 pte = hv_pte_set_nc(pte);
335 #if CHIP_HAS_CBOX_HOME_MAP()
349 #if CHIP_HAS_NC_AND_NOALLOC_BITS()
351 pte = hv_pte_set_no_alloc_l2(pte);
354 if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
361 BUG_ON(hv_pte_get_mode(pte) == 0);
374 #if CHIP_HAS_CBOX_HOME_MAP()
375 static inline int initial_page_home(
void) {
return PAGE_HOME_HASH; }
377 static inline int initial_page_home(
void) {
return 0; }
382 if (PageHighMem(page)) {
383 return initial_page_home();
396 BUG_ON(PageHighMem(page));
397 BUG_ON(page_count(page) > 1);
398 BUG_ON(page_mapcount(page) != 0);
406 pte_t pteval = *ptep;
413 unsigned int order,
int home)
425 unsigned int order,
int home)
429 page = alloc_pages_node(nid, gfp_mask, order);
437 if (put_page_testzero(page)) {
442 init_page_count(page);