36 #define pr_hard(args...) printk(KERN_DEBUG args)
37 #define pr_hardcont(args...) printk(KERN_CONT args)
39 #define pr_hard(args...) do { } while(0)
40 #define pr_hardcont(args...) do { } while(0)
43 #include <linux/kernel.h>
50 #include <linux/slab.h>
52 #include <asm/mmu_context.h>
53 #include <asm/tlbflush.h>
55 static unsigned int first_context, last_context;
58 static unsigned long *stale_map[
NR_CPUS];
62 #define CTX_MAP_SIZE \
63 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
82 static unsigned int steal_context_smp(
unsigned int id)
87 max = last_context - first_context;
99 if (
id > last_context)
114 for (i = cpu_first_thread_sibling(cpu);
115 i <= cpu_last_thread_sibling(cpu); i++)
139 static unsigned int steal_context_up(
unsigned int id)
161 #ifdef DEBUG_MAP_CONSISTENCY
162 static void context_check_map(
void)
164 unsigned int id, nrf, nact;
167 for (
id = first_context;
id <= last_context;
id++) {
171 if (used != (context_mm[
id] !=
NULL))
172 pr_err(
"MMU: Context %d is %s and MM is %p !\n",
173 id, used ?
"used" :
"free", context_mm[
id]);
174 if (context_mm[
id] !=
NULL)
178 pr_err(
"MMU: Free context count out of sync ! (%d vs %d)\n",
183 pr_err(
"MMU: More active contexts than CPUs ! (%d vs %d)\n",
186 pr_err(
"MMU: Context 0 has been freed !!!\n");
189 static void context_check_map(
void) { }
200 pr_hard(
"[%d] activating context for mm @%p, active=%d, id=%d",
218 #ifdef DEBUG_MAP_CONSISTENCY
219 if (context_mm[
id] != next)
220 pr_err(
"MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
221 next,
id,
id, context_mm[
id]);
228 if (
id > last_context)
236 id = steal_context_smp(
id);
242 id = steal_context_up(
id);
250 if (
id > last_context)
254 next_context =
id + 1;
267 id, cpu_first_thread_sibling(cpu),
268 cpu_last_thread_sibling(cpu));
273 for (i = cpu_first_thread_sibling(cpu);
274 i <= cpu_last_thread_sibling(cpu); i++) {
281 set_context(
id, next->
pgd);
290 pr_hard(
"initing context for mm @%p\n", mm);
295 #ifdef CONFIG_PPC_MM_SLICES
321 #ifdef DEBUG_MAP_CONSISTENCY
333 unsigned long action,
void *hcpu)
335 unsigned int cpu = (
unsigned int)(
long)hcpu;
346 pr_devel(
"MMU: Allocating stale context map for CPU %d\n", cpu);
349 #ifdef CONFIG_HOTPLUG_CPU
354 pr_devel(
"MMU: Freeing stale context map for CPU %d\n", cpu);
355 kfree(stale_map[cpu]);
359 clear_tasks_mm_cpumask(cpu);
367 .notifier_call = mmu_context_cpu_notify,
404 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
407 }
else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
409 last_context = 65535;
411 #ifdef CONFIG_PPC_BOOK3E_MMU
412 if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
417 last_context = (1
UL << (pid_bits + 1)) - 1;
425 #ifdef DEBUG_CLAMP_LAST_CONTEXT
426 last_context = DEBUG_CLAMP_LAST_CONTEXT;
432 context_mm =
alloc_bootmem(
sizeof(
void *) * (last_context + 1));
438 register_cpu_notifier(&mmu_context_cpu_nb);
442 "MMU: Allocated %zu bytes of context maps for %d contexts\n",
443 2 *
CTX_MAP_SIZE + (
sizeof(
void *) * (last_context + 1)),
444 last_context - first_context + 1);
453 next_context = first_context;