16 #include <linux/bitops.h>
21 #include <asm/machdep.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
27 #include <asm/cputable.h>
29 #include <asm/kexec.h>
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
35 #define DBG_LOW(fmt...)
38 #define HPTE_LOCK_BIT 3
42 static inline void __tlbie(
unsigned long vpn,
int psize,
int ssize)
60 va &= ~(0xffffULL << 48);
83 static inline void __tlbiel(
unsigned long vpn,
int psize,
int ssize)
95 va &= ~(0xffffULL << 48);
100 asm volatile(
".long 0x7c000224 | (%0 << 11) | (0 << 21)"
101 : :
"r"(
va) :
"memory");
110 asm volatile(
".long 0x7c000224 | (%0 << 11) | (1 << 21)"
111 : :
"r"(
va) :
"memory");
117 static inline void tlbie(
unsigned long vpn,
int psize,
int ssize,
int local)
119 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
120 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
124 if (lock_tlbie && !use_local)
126 asm volatile(
"ptesync": : :
"memory");
128 __tlbiel(vpn, psize, ssize);
129 asm volatile(
"ptesync": : :
"memory");
131 __tlbie(vpn, psize, ssize);
132 asm volatile(
"eieio; tlbsync; ptesync": : :
"memory");
134 if (lock_tlbie && !use_local)
138 static inline void native_lock_hpte(
struct hash_pte *hptep)
140 unsigned long *
word = &hptep->
v;
150 static inline void native_unlock_hpte(
struct hash_pte *hptep)
152 unsigned long *word = &hptep->
v;
157 static long native_hpte_insert(
unsigned long hpte_group,
unsigned long vpn,
158 unsigned long pa,
unsigned long rflags,
159 unsigned long vflags,
int psize,
int ssize)
162 unsigned long hpte_v, hpte_r;
166 DBG_LOW(
" insert(group=%lx, vpn=%016lx, pa=%016lx,"
167 " rflags=%lx, vflags=%lx, psize=%d)\n",
168 hpte_group, vpn, pa, rflags, vflags, psize);
174 native_lock_hpte(hptep);
177 native_unlock_hpte(hptep);
183 if (i == HPTES_PER_GROUP)
186 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags |
HPTE_V_VALID;
187 hpte_r = hpte_encode_r(pa, psize) |
rflags;
189 if (!(vflags & HPTE_V_BOLTED)) {
190 DBG_LOW(
" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
203 __asm__ __volatile__ (
"ptesync" : : :
"memory");
208 static long native_hpte_remove(
unsigned long hpte_group)
213 unsigned long hpte_v;
215 DBG_LOW(
" remove(group=%lx)\n", hpte_group);
218 slot_offset = mftb() & 0x7;
224 if ((hpte_v &
HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
226 native_lock_hpte(hptep);
228 if ((hpte_v & HPTE_V_VALID)
229 && !(hpte_v & HPTE_V_BOLTED))
231 native_unlock_hpte(hptep);
238 if (i == HPTES_PER_GROUP)
247 static long native_hpte_updatepp(
unsigned long slot,
unsigned long newpp,
248 unsigned long vpn,
int psize,
int ssize,
252 unsigned long hpte_v, want_v;
255 want_v = hpte_encode_v(vpn, psize, ssize);
257 DBG_LOW(
" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
260 native_lock_hpte(hptep);
265 if (!
HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
274 native_unlock_hpte(hptep);
277 tlbie(vpn, psize, ssize, local);
282 static long native_hpte_find(
unsigned long vpn,
int psize,
int ssize)
288 unsigned long want_v, hpte_v;
291 want_v = hpte_encode_v(vpn, psize, ssize);
315 static void native_hpte_updateboltedpp(
unsigned long newpp,
unsigned long ea,
316 int psize,
int ssize)
323 vsid = get_kernel_vsid(ea, ssize);
324 vpn = hpt_vpn(ea, vsid, ssize);
326 slot = native_hpte_find(vpn, psize, ssize);
328 panic(
"could not find page to bolt\n");
336 tlbie(vpn, psize, ssize, 0);
339 static void native_hpte_invalidate(
unsigned long slot,
unsigned long vpn,
340 int psize,
int ssize,
int local)
343 unsigned long hpte_v;
344 unsigned long want_v;
349 DBG_LOW(
" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
351 want_v = hpte_encode_v(vpn, psize, ssize);
352 native_lock_hpte(hptep);
357 native_unlock_hpte(hptep);
363 tlbie(vpn, psize, ssize, local);
370 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
372 static void hpte_decode(
struct hash_pte *hpte,
unsigned long slot,
373 int *psize,
int *ssize,
unsigned long *vpn)
375 unsigned long avpn, pteg, vpi;
376 unsigned long hpte_r = hpte->
r;
377 unsigned long hpte_v = hpte->
v;
378 unsigned long vsid, seg_off;
379 int i,
size, shift, penc;
384 for (i = 0; i <
LP_BITS; i++) {
389 for (size = 0; size < MMU_PAGE_COUNT; size++) {
392 if (size == MMU_PAGE_4K)
416 seg_off = (avpn & 0x1f) << 23;
421 seg_off |= vpi << shift;
426 seg_off = (avpn & 0x1ffff) << 23;
430 seg_off |= vpi << shift;
447 static void native_hpte_clear(
void)
449 unsigned long vpn = 0;
452 unsigned long hpte_v;
453 unsigned long pteg_count;
467 for (slot = 0; slot <
slots; slot++, hptep++) {
479 if (hpte_v & HPTE_V_VALID) {
480 hpte_decode(hptep, slot, &psize, &ssize, &vpn);
482 __tlbie(vpn, psize, ssize);
486 asm volatile(
"eieio; tlbsync; ptesync":::
"memory");
495 static void native_flush_hash_range(
unsigned long number,
int local)
500 unsigned long hpte_v;
501 unsigned long want_v;
504 struct ppc64_tlb_batch *batch = &
__get_cpu_var(ppc64_tlb_batch);
505 unsigned long psize = batch->psize;
506 int ssize = batch->ssize;
511 for (i = 0; i < number; i++) {
516 hash = hpt_hash(vpn, shift, ssize);
518 if (hidx & _PTEIDX_SECONDARY)
521 slot += hidx & _PTEIDX_GROUP_IX;
523 want_v = hpte_encode_v(vpn, psize, ssize);
524 native_lock_hpte(hptep);
527 !(hpte_v & HPTE_V_VALID))
528 native_unlock_hpte(hptep);
534 if (mmu_has_feature(MMU_FTR_TLBIEL) &&
536 asm volatile(
"ptesync":::
"memory");
537 for (i = 0; i < number; i++) {
543 __tlbiel(vpn, psize, ssize);
546 asm volatile(
"ptesync":::
"memory");
548 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
553 asm volatile(
"ptesync":::
"memory");
554 for (i = 0; i < number; i++) {
560 __tlbie(vpn, psize, ssize);
563 asm volatile(
"eieio; tlbsync; ptesync":::
"memory");
574 ppc_md.hpte_invalidate = native_hpte_invalidate;
575 ppc_md.hpte_updatepp = native_hpte_updatepp;
576 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
577 ppc_md.hpte_insert = native_hpte_insert;
578 ppc_md.hpte_remove = native_hpte_remove;
579 ppc_md.hpte_clear_all = native_hpte_clear;
580 ppc_md.flush_hash_range = native_flush_hash_range;