17 #include <asm/pgtable.h>
19 #include <asm/mmu_context.h>
21 #include <asm/cputable.h>
29 #define NR_STAB_CACHE_ENTRIES 8
36 static int make_ste(
unsigned long stab,
unsigned long esid,
unsigned long vsid)
38 unsigned long esid_data, vsid_data;
39 unsigned long entry,
group, old_esid, castout_entry,
i;
40 unsigned int global_entry;
42 unsigned long kernel_segment = (esid << SID_SHIFT) >=
PAGE_OFFSET;
50 global_entry = (esid & 0x1f) << 3;
51 ste = (
struct stab_entry *)(stab | ((esid & 0x1f) << 7));
54 for (group = 0; group < 2; group++) {
55 for (entry = 0; entry < 8; entry++, ste++) {
60 return (global_entry | entry);
64 global_entry = ((~esid) & 0x1f) << 3;
65 ste = (
struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
72 castout_entry = get_paca()->stab_rr;
73 for (i = 0; i < 16; i++) {
74 if (castout_entry < 8) {
75 global_entry = (esid & 0x1f) << 3;
76 ste = (
struct stab_entry *)(stab | ((esid & 0x1f) << 7));
77 castout_ste = ste + castout_entry;
79 global_entry = ((~esid) & 0x1f) << 3;
80 ste = (
struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
81 castout_ste = ste + (castout_entry - 8);
88 castout_entry = (castout_entry + 1) & 0xf;
91 get_paca()->stab_rr = (castout_entry + 1) & 0xf;
96 asm volatile(
"isync" : : :
"memory");
101 asm volatile(
"sync" : : :
"memory");
107 asm volatile(
"slbie %0" : :
"r" (old_esid <<
SID_SHIFT));
109 asm volatile(
"sync" : : :
"memory");
111 return (global_entry | (castout_entry & 0x7));
117 static int __ste_allocate(
unsigned long ea,
struct mm_struct *mm)
127 if ((ea >= TASK_SIZE_USER64) || (! mm))
133 stab_entry = make_ste(get_paca()->stab_addr,
GET_ESID(ea), vsid);
144 asm volatile(
"sync":::
"memory");
152 return __ste_allocate(ea,
current->mm);
162 struct stab_entry *stab = (
struct stab_entry *) get_paca()->stab_addr;
163 struct stab_entry *ste;
167 unsigned long unmapped_base;
170 asm volatile(
"isync" : : :
"memory");
184 for (i = 0; i <
offset; i++) {
207 asm volatile(
"sync; slbia; sync":::
"memory");
212 if (test_tsk_thread_flag(tsk, TIF_32BIT))
213 unmapped_base = TASK_UNMAPPED_BASE_USER32;
215 unmapped_base = TASK_UNMAPPED_BASE_USER64;
217 __ste_allocate(pc, mm);
222 __ste_allocate(stack, mm);
228 __ste_allocate(unmapped_base, mm);
231 asm volatile(
"sync" : : :
"memory");
243 if (mmu_has_feature(MMU_FTR_SLB))
247 unsigned long newstab;
254 newstab = (
unsigned long)
__va(newstab);
261 "virtual, 0x%llx absolute\n",
262 cpu,
paca[cpu].stab_addr,
paca[cpu].stab_real);
274 unsigned long stabreal;
276 asm volatile(
"isync; slbia; isync":::
"memory");
280 asm volatile(
"sync":::
"memory");
283 stabreal = get_paca()->stab_real | 0x1ul;
285 mtspr(SPRN_ASR, stabreal);