17 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
21 #include <asm/cputable.h>
22 #include <asm/cacheflush.h>
24 #include <linux/compiler.h>
32 static void slb_allocate(
unsigned long ea)
40 #define slb_esid_mask(ssize) \
41 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
43 static inline unsigned long mk_esid_data(
unsigned long ea,
int ssize,
49 #define slb_vsid_shift(ssize) \
50 ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
52 static inline unsigned long mk_vsid_data(
unsigned long ea,
int ssize,
55 return (get_kernel_vsid(ea, ssize) <<
slb_vsid_shift(ssize)) | flags |
59 static inline void slb_shadow_update(
unsigned long ea,
int ssize,
68 get_slb_shadow()->save_area[
entry].esid = 0;
69 get_slb_shadow()->save_area[
entry].vsid = mk_vsid_data(ea, ssize, flags);
70 get_slb_shadow()->save_area[
entry].esid = mk_esid_data(ea, ssize, entry);
73 static inline void slb_shadow_clear(
unsigned long entry)
75 get_slb_shadow()->save_area[
entry].esid = 0;
78 static inline void create_shadowed_slbe(
unsigned long ea,
int ssize,
87 slb_shadow_update(ea, ssize, flags, entry);
89 asm volatile(
"slbmte %0,%1" :
90 :
"r" (mk_vsid_data(ea, ssize, flags)),
91 "r" (mk_esid_data(ea, ssize, entry))
95 static void __slb_flush_and_rebolt(
void)
99 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
100 unsigned long ksp_esid_data, ksp_vsid_data;
108 if ((ksp_esid_data & ~0xfffffffUL) <=
PAGE_OFFSET) {
115 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
120 asm volatile(
"isync\n"
145 __slb_flush_and_rebolt();
146 get_paca()->slb_cache_ptr = 0;
151 unsigned long vflags;
164 static inline int esids_match(
unsigned long addr1,
unsigned long addr2)
169 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
176 if (esid_1t_count == 0)
180 if (esid_1t_count == 1)
191 unsigned long slbie_data = 0;
194 unsigned long exec_base;
203 offset = get_paca()->slb_cache_ptr;
204 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
207 asm volatile(
"isync" : : :
"memory");
208 for (i = 0; i <
offset; i++) {
209 slbie_data = (
unsigned long)get_paca()->slb_cache[
i]
211 slbie_data |= user_segment_size(slbie_data)
214 asm volatile(
"slbie %0" : :
"r" (slbie_data));
216 asm volatile(
"isync" : : :
"memory");
218 __slb_flush_and_rebolt();
223 asm volatile(
"slbie %0" : :
"r" (slbie_data));
225 get_paca()->slb_cache_ptr = 0;
226 get_paca()->context = mm->
context;
233 exec_base = 0x10000000;
241 if (!esids_match(pc, stack))
244 if (!esids_match(pc, exec_base) &&
245 !esids_match(stack, exec_base))
246 slb_allocate(exec_base);
249 static inline void patch_slb_encoding(
unsigned int *insn_addr,
252 int insn = (*insn_addr & 0xffff0000) | immed;
258 extern unsigned int *slb_compare_rr_to_size;
264 patch_slb_encoding(slb_compare_rr_to_size,
mmu_slb_size);
269 unsigned long linear_llp, vmalloc_llp, io_llp;
270 unsigned long lflags, vflags;
271 static int slb_encoding_inited;
272 extern unsigned int *slb_miss_kernel_load_linear;
273 extern unsigned int *slb_miss_kernel_load_io;
274 extern unsigned int *slb_compare_rr_to_size;
275 #ifdef CONFIG_SPARSEMEM_VMEMMAP
276 extern unsigned int *slb_miss_kernel_load_vmemmap;
277 unsigned long vmemmap_llp;
285 #ifdef CONFIG_SPARSEMEM_VMEMMAP
288 if (!slb_encoding_inited) {
289 slb_encoding_inited = 1;
290 patch_slb_encoding(slb_miss_kernel_load_linear,
292 patch_slb_encoding(slb_miss_kernel_load_io,
294 patch_slb_encoding(slb_compare_rr_to_size,
297 pr_devel(
"SLB: linear LLP = %04lx\n", linear_llp);
298 pr_devel(
"SLB: io LLP = %04lx\n", io_llp);
300 #ifdef CONFIG_SPARSEMEM_VMEMMAP
301 patch_slb_encoding(slb_miss_kernel_load_vmemmap,
303 pr_devel(
"SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
313 asm volatile(
"isync":::
"memory");
314 asm volatile(
"slbmte %0,%0"::
"r" (0) :
"memory");
315 asm volatile(
"isync; slbia; isync":::
"memory");
328 create_shadowed_slbe(get_paca()->kstack,
331 asm volatile(
"isync":::
"memory");