Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
stab.c
Go to the documentation of this file.
1 /*
2  * PowerPC64 Segment Translation Support.
3  *
4  * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5  * Copyright (c) 2001 Dave Engebretsen
6  *
7  * Copyright (C) 2002 Anton Blanchard <[email protected]>, IBM
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/memblock.h>
16 
17 #include <asm/pgtable.h>
18 #include <asm/mmu.h>
19 #include <asm/mmu_context.h>
20 #include <asm/paca.h>
21 #include <asm/cputable.h>
22 #include <asm/prom.h>
23 
24 struct stab_entry {
25  unsigned long esid_data;
26  unsigned long vsid_data;
27 };
28 
29 #define NR_STAB_CACHE_ENTRIES 8
30 static DEFINE_PER_CPU(long, stab_cache_ptr);
31 static DEFINE_PER_CPU(long [NR_STAB_CACHE_ENTRIES], stab_cache);
32 
33 /*
34  * Create a segment table entry for the given esid/vsid pair.
35  */
36 static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
37 {
38  unsigned long esid_data, vsid_data;
39  unsigned long entry, group, old_esid, castout_entry, i;
40  unsigned int global_entry;
41  struct stab_entry *ste, *castout_ste;
42  unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
43 
44  vsid_data = vsid << STE_VSID_SHIFT;
45  esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
46  if (! kernel_segment)
47  esid_data |= STE_ESID_KS;
48 
49  /* Search the primary group first. */
50  global_entry = (esid & 0x1f) << 3;
51  ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
52 
53  /* Find an empty entry, if one exists. */
54  for (group = 0; group < 2; group++) {
55  for (entry = 0; entry < 8; entry++, ste++) {
56  if (!(ste->esid_data & STE_ESID_V)) {
57  ste->vsid_data = vsid_data;
58  eieio();
59  ste->esid_data = esid_data;
60  return (global_entry | entry);
61  }
62  }
63  /* Now search the secondary group. */
64  global_entry = ((~esid) & 0x1f) << 3;
65  ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
66  }
67 
68  /*
69  * Could not find empty entry, pick one with a round robin selection.
70  * Search all entries in the two groups.
71  */
72  castout_entry = get_paca()->stab_rr;
73  for (i = 0; i < 16; i++) {
74  if (castout_entry < 8) {
75  global_entry = (esid & 0x1f) << 3;
76  ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
77  castout_ste = ste + castout_entry;
78  } else {
79  global_entry = ((~esid) & 0x1f) << 3;
80  ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
81  castout_ste = ste + (castout_entry - 8);
82  }
83 
84  /* Dont cast out the first kernel segment */
85  if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
86  break;
87 
88  castout_entry = (castout_entry + 1) & 0xf;
89  }
90 
91  get_paca()->stab_rr = (castout_entry + 1) & 0xf;
92 
93  /* Modify the old entry to the new value. */
94 
95  /* Force previous translations to complete. DRENG */
96  asm volatile("isync" : : : "memory");
97 
98  old_esid = castout_ste->esid_data >> SID_SHIFT;
99  castout_ste->esid_data = 0; /* Invalidate old entry */
100 
101  asm volatile("sync" : : : "memory"); /* Order update */
102 
103  castout_ste->vsid_data = vsid_data;
104  eieio(); /* Order update */
105  castout_ste->esid_data = esid_data;
106 
107  asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
108  /* Ensure completion of slbie */
109  asm volatile("sync" : : : "memory");
110 
111  return (global_entry | (castout_entry & 0x7));
112 }
113 
114 /*
115  * Allocate a segment table entry for the given ea and mm
116  */
117 static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
118 {
119  unsigned long vsid;
120  unsigned char stab_entry;
121  unsigned long offset;
122 
123  /* Kernel or user address? */
124  if (is_kernel_addr(ea)) {
125  vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
126  } else {
127  if ((ea >= TASK_SIZE_USER64) || (! mm))
128  return 1;
129 
130  vsid = get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M);
131  }
132 
133  stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
134 
135  if (!is_kernel_addr(ea)) {
136  offset = __get_cpu_var(stab_cache_ptr);
137  if (offset < NR_STAB_CACHE_ENTRIES)
138  __get_cpu_var(stab_cache[offset++]) = stab_entry;
139  else
140  offset = NR_STAB_CACHE_ENTRIES+1;
141  __get_cpu_var(stab_cache_ptr) = offset;
142 
143  /* Order update */
144  asm volatile("sync":::"memory");
145  }
146 
147  return 0;
148 }
149 
150 int ste_allocate(unsigned long ea)
151 {
152  return __ste_allocate(ea, current->mm);
153 }
154 
155 /*
156  * Do the segment table work for a context switch: flush all user
157  * entries from the table, then preload some probably useful entries
158  * for the new task
159  */
160 void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
161 {
162  struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
163  struct stab_entry *ste;
164  unsigned long offset;
165  unsigned long pc = KSTK_EIP(tsk);
166  unsigned long stack = KSTK_ESP(tsk);
167  unsigned long unmapped_base;
168 
169  /* Force previous translations to complete. DRENG */
170  asm volatile("isync" : : : "memory");
171 
172  /*
173  * We need interrupts hard-disabled here, not just soft-disabled,
174  * so that a PMU interrupt can't occur, which might try to access
175  * user memory (to get a stack trace) and possible cause an STAB miss
176  * which would update the stab_cache/stab_cache_ptr per-cpu variables.
177  */
179 
180  offset = __get_cpu_var(stab_cache_ptr);
181  if (offset <= NR_STAB_CACHE_ENTRIES) {
182  int i;
183 
184  for (i = 0; i < offset; i++) {
185  ste = stab + __get_cpu_var(stab_cache[i]);
186  ste->esid_data = 0; /* invalidate entry */
187  }
188  } else {
189  unsigned long entry;
190 
191  /* Invalidate all entries. */
192  ste = stab;
193 
194  /* Never flush the first entry. */
195  ste += 1;
196  for (entry = 1;
197  entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
198  entry++, ste++) {
199  unsigned long ea;
200  ea = ste->esid_data & ESID_MASK;
201  if (!is_kernel_addr(ea)) {
202  ste->esid_data = 0;
203  }
204  }
205  }
206 
207  asm volatile("sync; slbia; sync":::"memory");
208 
209  __get_cpu_var(stab_cache_ptr) = 0;
210 
211  /* Now preload some entries for the new task */
212  if (test_tsk_thread_flag(tsk, TIF_32BIT))
213  unmapped_base = TASK_UNMAPPED_BASE_USER32;
214  else
215  unmapped_base = TASK_UNMAPPED_BASE_USER64;
216 
217  __ste_allocate(pc, mm);
218 
219  if (GET_ESID(pc) == GET_ESID(stack))
220  return;
221 
222  __ste_allocate(stack, mm);
223 
224  if ((GET_ESID(pc) == GET_ESID(unmapped_base))
225  || (GET_ESID(stack) == GET_ESID(unmapped_base)))
226  return;
227 
228  __ste_allocate(unmapped_base, mm);
229 
230  /* Order update */
231  asm volatile("sync" : : : "memory");
232 }
233 
234 /*
235  * Allocate segment tables for secondary CPUs. These must all go in
236  * the first (bolted) segment, so that do_stab_bolted won't get a
237  * recursive segment miss on the segment table itself.
238  */
239 void __init stabs_alloc(void)
240 {
241  int cpu;
242 
243  if (mmu_has_feature(MMU_FTR_SLB))
244  return;
245 
246  for_each_possible_cpu(cpu) {
247  unsigned long newstab;
248 
249  if (cpu == 0)
250  continue; /* stab for CPU 0 is statically allocated */
251 
253  1<<SID_SHIFT);
254  newstab = (unsigned long)__va(newstab);
255 
256  memset((void *)newstab, 0, HW_PAGE_SIZE);
257 
258  paca[cpu].stab_addr = newstab;
259  paca[cpu].stab_real = __pa(newstab);
260  printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
261  "virtual, 0x%llx absolute\n",
262  cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
263  }
264 }
265 
266 /*
267  * Build an entry for the base kernel segment and put it into
268  * the segment table or SLB. All other segment table or SLB
269  * entries are faulted in.
270  */
271 void stab_initialize(unsigned long stab)
272 {
273  unsigned long vsid = get_kernel_vsid(PAGE_OFFSET, MMU_SEGSIZE_256M);
274  unsigned long stabreal;
275 
276  asm volatile("isync; slbia; isync":::"memory");
277  make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
278 
279  /* Order update */
280  asm volatile("sync":::"memory");
281 
282  /* Set ASR */
283  stabreal = get_paca()->stab_real | 0x1ul;
284 
285  mtspr(SPRN_ASR, stabreal);
286 }