18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
25 #include <linux/slab.h>
27 #include <asm/delay.h>
28 #include <asm/mmu_context.h>
29 #include <asm/pgalloc.h>
31 #include <asm/tlbflush.h>
33 #include <asm/processor.h>
73 unsigned long flush_bit;
77 ia64_ctx.
bitmap[
i] ^= flush_bit;
93 per_cpu(ia64_need_tlb_flush, i) = 1;
124 asm volatile (
"ld8.c.nc %0=[%1]" :
"=r"(serve) :
"r"(&ss->
serve) :
"memory");
131 static inline void up_spin(
struct spinaphore *ss)
137 static u16 nptcg = 1;
138 static int need_ptcg_sem = 1;
139 static int toolatetochangeptcgsem = 0;
179 static int kp_override;
180 static int palo_override;
181 static int firstcpu = 1;
183 if (toolatetochangeptcgsem) {
187 BUG_ON(max_purges < nptcg);
206 panic(
"Whoa! Platform does not support global TLB purges.\n");
221 if (max_purges == 0) max_purges = 1;
227 if (max_purges < nptcg)
236 spinaphore_init(&ptcg_sem, max_purges);
241 unsigned long end,
unsigned long nbits)
245 toolatetochangeptcgsem = 1;
247 if (mm != active_mm) {
249 if (mm && active_mm) {
258 down_spin(&ptcg_sem);
266 start += (1
UL << nbits);
267 }
while (start < end);
272 if (mm != active_mm) {
289 for (i = 0; i < count0; ++
i) {
309 if (mm !=
current->active_mm) {
315 nbits = ia64_fls(size + 0xfff);
316 while (
unlikely (((1
UL << nbits) & purge.mask) == 0) &&
317 (nbits < purge.max_bits))
319 if (nbits > purge.max_bits)
320 nbits = purge.max_bits;
321 start &= ~((1
UL << nbits) - 1);
325 if (mm !=
current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
333 start += (1
UL << nbits);
334 }
while (start < end);
350 if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
352 "defaulting to architected purge page-sizes.\n", status);
353 purge.mask = 0x115557000
UL;
355 purge.max_bits = ia64_fls(purge.mask);
357 ia64_get_ptce(&ptce_info);
365 status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
373 if (
per_cpu(ia64_tr_num, cpu) >
378 static int justonce = 1;
383 "IA64_TR_ALLOC_MAX!\n");
403 tr_log_size = (p->
itir & 0xff) >> 2;
404 tr_end = p->
ifa + (1<<tr_log_size) - 1;
406 if (va > tr_end || p->
ifa > va_end)
433 if (!ia64_idtrs[cpu]) {
436 if (!ia64_idtrs[cpu])
441 if (target_mask & 0x1) {
446 if (is_tr_overlap(p, va, log_size)) {
448 "Inserted for TR Reigster!!\n");
453 if (target_mask & 0x2) {
458 if (is_tr_overlap(p, va, log_size)) {
460 "Inserted for TR Reigster!!\n");
467 switch (target_mask & 0x3) {
469 if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
477 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
487 if (i >=
per_cpu(ia64_tr_num, cpu))
491 if (i >
per_cpu(ia64_tr_used, cpu))
494 psr = ia64_clear_ic();
495 if (target_mask & 0x1) {
496 ia64_itr(0x1, i, va, pte, log_size);
498 p = ia64_idtrs[
cpu] +
i;
501 p->
itir = log_size << 2;
504 if (target_mask & 0x2) {
505 ia64_itr(0x2, i, va, pte, log_size);
510 p->
itir = log_size << 2;
537 if (target_mask & 0x1) {
539 if ((p->
pte&0x1) && is_tr_overlap(p, p->
ifa, p->
itir>>2)) {
541 ia64_ptr(0x1, p->
ifa, p->
itir>>2);
546 if (target_mask & 0x2) {
548 if ((p->
pte & 0x1) && is_tr_overlap(p, p->
ifa, p->
itir>>2)) {
550 ia64_ptr(0x2, p->
ifa, p->
itir>>2);
556 if (((ia64_idtrs[cpu] + i)->
pte & 0x1) ||