Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
stackframe.h
Go to the documentation of this file.
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License. See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
7  * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
8  * Copyright (C) 1999 Silicon Graphics, Inc.
9  * Copyright (C) 2007 Maciej W. Rozycki
10  */
11 #ifndef _ASM_STACKFRAME_H
12 #define _ASM_STACKFRAME_H
13 
14 #include <linux/threads.h>
15 
16 #include <asm/asm.h>
17 #include <asm/asmmacro.h>
18 #include <asm/mipsregs.h>
19 #include <asm/asm-offsets.h>
20 
21 /*
22  * For SMTC kernel, global IE should be left set, and interrupts
23  * controlled exclusively via IXMT.
24  */
25 #ifdef CONFIG_MIPS_MT_SMTC
26 #define STATMASK 0x1e
27 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
28 #define STATMASK 0x3f
29 #else
30 #define STATMASK 0x1f
31 #endif
32 
33 #ifdef CONFIG_MIPS_MT_SMTC
34 #include <asm/mipsmtregs.h>
35 #endif /* CONFIG_MIPS_MT_SMTC */
36 
37  .macro SAVE_AT
38  .set push
39  .set noat
41  .set pop
42  .endm
43 
44  .macro SAVE_TEMP
45 #ifdef CONFIG_CPU_HAS_SMARTMIPS
46  mflhxu v1
47  LONG_S v1, PT_LO(sp)
48  mflhxu v1
49  LONG_S v1, PT_HI(sp)
50  mflhxu v1
51  LONG_S v1, PT_ACX(sp)
52 #else
53  mfhi v1
54 #endif
55 #ifdef CONFIG_32BIT
56  LONG_S $8, PT_R8(sp)
57  LONG_S $9, PT_R9(sp)
58 #endif
59  LONG_S $10, PT_R10(sp)
60  LONG_S $11, PT_R11(sp)
61  LONG_S $12, PT_R12(sp)
62 #ifndef CONFIG_CPU_HAS_SMARTMIPS
63  LONG_S v1, PT_HI(sp)
64  mflo v1
65 #endif
66  LONG_S $13, PT_R13(sp)
67  LONG_S $14, PT_R14(sp)
68  LONG_S $15, PT_R15(sp)
69  LONG_S $24, PT_R24(sp)
70 #ifndef CONFIG_CPU_HAS_SMARTMIPS
71  LONG_S v1, PT_LO(sp)
72 #endif
73  .endm
74 
75  .macro SAVE_STATIC
76  LONG_S $16, PT_R16(sp)
77  LONG_S $17, PT_R17(sp)
78  LONG_S $18, PT_R18(sp)
79  LONG_S $19, PT_R19(sp)
80  LONG_S $20, PT_R20(sp)
81  LONG_S $21, PT_R21(sp)
82  LONG_S $22, PT_R22(sp)
83  LONG_S $23, PT_R23(sp)
84  LONG_S $30, PT_R30(sp)
85  .endm
86 
87 #ifdef CONFIG_SMP
88 #ifdef CONFIG_MIPS_MT_SMTC
89 #define PTEBASE_SHIFT 19 /* TCBIND */
90 #define CPU_ID_REG CP0_TCBIND
91 #define CPU_ID_MFC0 mfc0
92 #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
93 #define PTEBASE_SHIFT 48 /* XCONTEXT */
94 #define CPU_ID_REG CP0_XCONTEXT
95 #define CPU_ID_MFC0 MFC0
96 #else
97 #define PTEBASE_SHIFT 23 /* CONTEXT */
98 #define CPU_ID_REG CP0_CONTEXT
99 #define CPU_ID_MFC0 MFC0
100 #endif
101  .macro get_saved_sp /* SMP variation */
102  CPU_ID_MFC0 k0, CPU_ID_REG
103 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
104  lui k1, %hi(kernelsp)
105 #else
106  lui k1, %highest(kernelsp)
107  daddiu k1, %higher(kernelsp)
108  dsll k1, 16
109  daddiu k1, %hi(kernelsp)
110  dsll k1, 16
111 #endif
112  LONG_SRL k0, PTEBASE_SHIFT
113  LONG_ADDU k1, k0
114  LONG_L k1, %lo(kernelsp)(k1)
115  .endm
116 
117  .macro set_saved_sp stackp temp temp2
118  CPU_ID_MFC0 \temp, CPU_ID_REG
119  LONG_SRL \temp, PTEBASE_SHIFT
120  LONG_S \stackp, kernelsp(\temp)
121  .endm
122 #else
123  .macro get_saved_sp /* Uniprocessor variation */
124 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
125  /*
126  * Clear BTB (branch target buffer), forbid RAS (return address
127  * stack) to workaround the Out-of-order Issue in Loongson2F
128  * via its diagnostic register.
129  */
130  move k0, ra
131  jal 1f
132  nop
133 1: jal 1f
134  nop
135 1: jal 1f
136  nop
137 1: jal 1f
138  nop
139 1: move ra, k0
140  li k0, 3
141  mtc0 k0, $22
142 #endif /* CONFIG_CPU_LOONGSON2F */
143 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
144  lui k1, %hi(kernelsp)
145 #else
146  lui k1, %highest(kernelsp)
147  daddiu k1, %higher(kernelsp)
148  dsll k1, k1, 16
149  daddiu k1, %hi(kernelsp)
150  dsll k1, k1, 16
151 #endif
152  LONG_L k1, %lo(kernelsp)(k1)
153  .endm
154 
155  .macro set_saved_sp stackp temp temp2
156  LONG_S \stackp, kernelsp
157  .endm
158 #endif
159 
160  .macro SAVE_SOME
161  .set push
162  .set noat
163  .set reorder
164  mfc0 k0, CP0_STATUS
165  sll k0, 3 /* extract cu0 bit */
166  .set noreorder
167  bltz k0, 8f
168  move k1, sp
169  .set reorder
170  /* Called from user mode, new stack. */
172 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
173 8: move k0, sp
174  PTR_SUBU sp, k1, PT_SIZE
175 #else
176  .set at=k0
177 8: PTR_SUBU k1, PT_SIZE
178  .set noat
179  move k0, sp
180  move sp, k1
181 #endif
182  LONG_S k0, PT_R29(sp)
183  LONG_S $3, PT_R3(sp)
184  /*
185  * You might think that you don't need to save $0,
186  * but the FPU emulator and gdb remote debug stub
187  * need it to operate correctly
188  */
189  LONG_S $0, PT_R0(sp)
190  mfc0 v1, CP0_STATUS
191  LONG_S $2, PT_R2(sp)
192 #ifdef CONFIG_MIPS_MT_SMTC
193  /*
194  * Ideally, these instructions would be shuffled in
195  * to cover the pipeline delay.
196  */
197  .set mips32
198  mfc0 k0, CP0_TCSTATUS
199  .set mips0
200  LONG_S k0, PT_TCSTATUS(sp)
201 #endif /* CONFIG_MIPS_MT_SMTC */
202  LONG_S $4, PT_R4(sp)
203  LONG_S $5, PT_R5(sp)
204  LONG_S v1, PT_STATUS(sp)
205  mfc0 v1, CP0_CAUSE
206  LONG_S $6, PT_R6(sp)
207  LONG_S $7, PT_R7(sp)
208  LONG_S v1, PT_CAUSE(sp)
209  MFC0 v1, CP0_EPC
210 #ifdef CONFIG_64BIT
211  LONG_S $8, PT_R8(sp)
212  LONG_S $9, PT_R9(sp)
213 #endif
214  LONG_S $25, PT_R25(sp)
215  LONG_S $28, PT_R28(sp)
216  LONG_S $31, PT_R31(sp)
217  LONG_S v1, PT_EPC(sp)
218  ori $28, sp, _THREAD_MASK
219  xori $28, _THREAD_MASK
220 #ifdef CONFIG_CPU_CAVIUM_OCTEON
221  .set mips64
222  pref 0, 0($28) /* Prefetch the current pointer */
223  pref 0, PT_R31(sp) /* Prefetch the $31(ra) */
224  /* The Octeon multiplier state is affected by general multiply
225  instructions. It must be saved before and kernel code might
226  corrupt it */
227  jal octeon_mult_save
228  LONG_L v1, 0($28) /* Load the current pointer */
229  /* Restore $31(ra) that was changed by the jal */
230  LONG_L ra, PT_R31(sp)
231  pref 0, 0(v1) /* Prefetch the current thread */
232 #endif
233  .set pop
234  .endm
235 
236  .macro SAVE_ALL
237  SAVE_SOME
238  SAVE_AT
239  SAVE_TEMP
240  SAVE_STATIC
241  .endm
242 
243  .macro RESTORE_AT
244  .set push
245  .set noat
246  LONG_L $1, PT_R1(sp)
247  .set pop
248  .endm
249 
250  .macro RESTORE_TEMP
251 #ifdef CONFIG_CPU_HAS_SMARTMIPS
252  LONG_L $24, PT_ACX(sp)
253  mtlhx $24
254  LONG_L $24, PT_HI(sp)
255  mtlhx $24
256  LONG_L $24, PT_LO(sp)
257  mtlhx $24
258 #else
259  LONG_L $24, PT_LO(sp)
260  mtlo $24
261  LONG_L $24, PT_HI(sp)
262  mthi $24
263 #endif
264 #ifdef CONFIG_32BIT
265  LONG_L $8, PT_R8(sp)
266  LONG_L $9, PT_R9(sp)
267 #endif
268  LONG_L $10, PT_R10(sp)
269  LONG_L $11, PT_R11(sp)
270  LONG_L $12, PT_R12(sp)
271  LONG_L $13, PT_R13(sp)
272  LONG_L $14, PT_R14(sp)
273  LONG_L $15, PT_R15(sp)
274  LONG_L $24, PT_R24(sp)
275  .endm
276 
277  .macro RESTORE_STATIC
278  LONG_L $16, PT_R16(sp)
279  LONG_L $17, PT_R17(sp)
280  LONG_L $18, PT_R18(sp)
281  LONG_L $19, PT_R19(sp)
282  LONG_L $20, PT_R20(sp)
283  LONG_L $21, PT_R21(sp)
284  LONG_L $22, PT_R22(sp)
285  LONG_L $23, PT_R23(sp)
286  LONG_L $30, PT_R30(sp)
287  .endm
288 
289 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
290 
291  .macro RESTORE_SOME
292  .set push
293  .set reorder
294  .set noat
295  mfc0 a0, CP0_STATUS
296  li v1, 0xff00
297  ori a0, STATMASK
298  xori a0, STATMASK
299  mtc0 a0, CP0_STATUS
300  and a0, v1
301  LONG_L v0, PT_STATUS(sp)
302  nor v1, $0, v1
303  and v0, v1
304  or v0, a0
305  mtc0 v0, CP0_STATUS
306  LONG_L $31, PT_R31(sp)
307  LONG_L $28, PT_R28(sp)
308  LONG_L $25, PT_R25(sp)
309  LONG_L $7, PT_R7(sp)
310  LONG_L $6, PT_R6(sp)
311  LONG_L $5, PT_R5(sp)
312  LONG_L $4, PT_R4(sp)
313  LONG_L $3, PT_R3(sp)
314  LONG_L $2, PT_R2(sp)
315  .set pop
316  .endm
317 
318  .macro RESTORE_SP_AND_RET
319  .set push
320  .set noreorder
321  LONG_L k0, PT_EPC(sp)
322  LONG_L sp, PT_R29(sp)
323  jr k0
324  rfe
325  .set pop
326  .endm
327 
328 #else
329  .macro RESTORE_SOME
330  .set push
331  .set reorder
332  .set noat
333 #ifdef CONFIG_MIPS_MT_SMTC
334  .set mips32r2
335  /*
336  * We need to make sure the read-modify-write
337  * of Status below isn't perturbed by an interrupt
338  * or cross-TC access, so we need to do at least a DMT,
339  * protected by an interrupt-inhibit. But setting IXMT
340  * also creates a few-cycle window where an IPI could
341  * be queued and not be detected before potentially
342  * returning to a WAIT or user-mode loop. It must be
343  * replayed.
344  *
345  * We're in the middle of a context switch, and
346  * we can't dispatch it directly without trashing
347  * some registers, so we'll try to detect this unlikely
348  * case and program a software interrupt in the VPE,
349  * as would be done for a cross-VPE IPI. To accommodate
350  * the handling of that case, we're doing a DVPE instead
351  * of just a DMT here to protect against other threads.
352  * This is a lot of cruft to cover a tiny window.
353  * If you can find a better design, implement it!
354  *
355  */
356  mfc0 v0, CP0_TCSTATUS
357  ori v0, TCSTATUS_IXMT
358  mtc0 v0, CP0_TCSTATUS
359  _ehb
360  DVPE 5 # dvpe a1
361  jal mips_ihb
362 #endif /* CONFIG_MIPS_MT_SMTC */
363 #ifdef CONFIG_CPU_CAVIUM_OCTEON
364  /* Restore the Octeon multiplier state */
365  jal octeon_mult_restore
366 #endif
367  mfc0 a0, CP0_STATUS
369  xori a0, STATMASK
370  mtc0 a0, CP0_STATUS
371  li v1, 0xff00
372  and a0, v1
373  LONG_L v0, PT_STATUS(sp)
374  nor v1, $0, v1
375  and v0, v1
376  or v0, a0
377  mtc0 v0, CP0_STATUS
378 #ifdef CONFIG_MIPS_MT_SMTC
379 /*
380  * Only after EXL/ERL have been restored to status can we
381  * restore TCStatus.IXMT.
382  */
383  LONG_L v1, PT_TCSTATUS(sp)
384  _ehb
385  mfc0 a0, CP0_TCSTATUS
386  andi v1, TCSTATUS_IXMT
387  bnez v1, 0f
388 
389 /*
390  * We'd like to detect any IPIs queued in the tiny window
391  * above and request an software interrupt to service them
392  * when we ERET.
393  *
394  * Computing the offset into the IPIQ array of the executing
395  * TC's IPI queue in-line would be tedious. We use part of
396  * the TCContext register to hold 16 bits of offset that we
397  * can add in-line to find the queue head.
398  */
399  mfc0 v0, CP0_TCCONTEXT
400  la a2, IPIQ
401  srl v0, v0, 16
402  addu a2, a2, v0
403  LONG_L v0, 0(a2)
404  beqz v0, 0f
405 /*
406  * If we have a queue, provoke dispatch within the VPE by setting C_SW1
407  */
408  mfc0 v0, CP0_CAUSE
409  ori v0, v0, C_SW1
410  mtc0 v0, CP0_CAUSE
411 0:
412  /*
413  * This test should really never branch but
414  * let's be prudent here. Having atomized
415  * the shared register modifications, we can
416  * now EVPE, and must do so before interrupts
417  * are potentially re-enabled.
418  */
419  andi a1, a1, MVPCONTROL_EVP
420  beqz a1, 1f
421  evpe
422 1:
423  /* We know that TCStatua.IXMT should be set from above */
424  xori a0, a0, TCSTATUS_IXMT
425  or a0, a0, v1
426  mtc0 a0, CP0_TCSTATUS
427  _ehb
428 
429  .set mips0
430 #endif /* CONFIG_MIPS_MT_SMTC */
431  LONG_L v1, PT_EPC(sp)
432  MTC0 v1, CP0_EPC
433  LONG_L $31, PT_R31(sp)
434  LONG_L $28, PT_R28(sp)
435  LONG_L $25, PT_R25(sp)
436 #ifdef CONFIG_64BIT
437  LONG_L $8, PT_R8(sp)
438  LONG_L $9, PT_R9(sp)
439 #endif
440  LONG_L $7, PT_R7(sp)
441  LONG_L $6, PT_R6(sp)
442  LONG_L $5, PT_R5(sp)
443  LONG_L $4, PT_R4(sp)
444  LONG_L $3, PT_R3(sp)
445  LONG_L $2, PT_R2(sp)
446  .set pop
447  .endm
448 
449  .macro RESTORE_SP_AND_RET
450  LONG_L sp, PT_R29(sp)
451  .set mips3
452  eret
453  .set mips0
454  .endm
455 
456 #endif
457 
458  .macro RESTORE_SP
459  LONG_L sp, PT_R29(sp)
460  .endm
461 
462  .macro RESTORE_ALL
463  RESTORE_TEMP
464  RESTORE_STATIC
465  RESTORE_AT
466  RESTORE_SOME
467  RESTORE_SP
468  .endm
469 
470  .macro RESTORE_ALL_AND_RET
471  RESTORE_TEMP
472  RESTORE_STATIC
473  RESTORE_AT
474  RESTORE_SOME
475  RESTORE_SP_AND_RET
476  .endm
477 
478 /*
479  * Move to kernel mode and disable interrupts.
480  * Set cp0 enable bit as sign that we're running on the kernel stack
481  */
482  .macro CLI
483 #if !defined(CONFIG_MIPS_MT_SMTC)
484  mfc0 t0, CP0_STATUS
486  or t0, t1
487  xori t0, STATMASK
488  mtc0 t0, CP0_STATUS
489 #else /* CONFIG_MIPS_MT_SMTC */
490  /*
491  * For SMTC, we need to set privilege
492  * and disable interrupts only for the
493  * current TC, using the TCStatus register.
494  */
495  mfc0 t0, CP0_TCSTATUS
496  /* Fortunately CU 0 is in the same place in both registers */
497  /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
498  li t1, ST0_CU0 | 0x08001c00
499  or t0, t1
500  /* Clear TKSU, leave IXMT */
501  xori t0, 0x00001800
502  mtc0 t0, CP0_TCSTATUS
503  _ehb
504  /* We need to leave the global IE bit set, but clear EXL...*/
505  mfc0 t0, CP0_STATUS
506  ori t0, ST0_EXL | ST0_ERL
507  xori t0, ST0_EXL | ST0_ERL
508  mtc0 t0, CP0_STATUS
509 #endif /* CONFIG_MIPS_MT_SMTC */
510  irq_disable_hazard
511  .endm
512 
513 /*
514  * Move to kernel mode and enable interrupts.
515  * Set cp0 enable bit as sign that we're running on the kernel stack
516  */
517  .macro STI
518 #if !defined(CONFIG_MIPS_MT_SMTC)
519  mfc0 t0, CP0_STATUS
520  li t1, ST0_CU0 | STATMASK
521  or t0, t1
522  xori t0, STATMASK & ~1
523  mtc0 t0, CP0_STATUS
524 #else /* CONFIG_MIPS_MT_SMTC */
525  /*
526  * For SMTC, we need to set privilege
527  * and enable interrupts only for the
528  * current TC, using the TCStatus register.
529  */
530  _ehb
531  mfc0 t0, CP0_TCSTATUS
532  /* Fortunately CU 0 is in the same place in both registers */
533  /* Set TCU0, TKSU (for later inversion) and IXMT */
534  li t1, ST0_CU0 | 0x08001c00
535  or t0, t1
536  /* Clear TKSU *and* IXMT */
537  xori t0, 0x00001c00
538  mtc0 t0, CP0_TCSTATUS
539  _ehb
540  /* We need to leave the global IE bit set, but clear EXL...*/
541  mfc0 t0, CP0_STATUS
542  ori t0, ST0_EXL
543  xori t0, ST0_EXL
544  mtc0 t0, CP0_STATUS
545  /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
546 #endif /* CONFIG_MIPS_MT_SMTC */
547  irq_enable_hazard
548  .endm
549 
550 /*
551  * Just move to kernel mode and leave interrupts as they are. Note
552  * for the R3000 this means copying the previous enable from IEp.
553  * Set cp0 enable bit as sign that we're running on the kernel stack
554  */
555  .macro KMODE
556 #ifdef CONFIG_MIPS_MT_SMTC
557  /*
558  * This gets baroque in SMTC. We want to
559  * protect the non-atomic clearing of EXL
560  * with DMT/EMT, but we don't want to take
561  * an interrupt while DMT is still in effect.
562  */
563 
564  /* KMODE gets invoked from both reorder and noreorder code */
565  .set push
566  .set mips32r2
567  .set noreorder
568  mfc0 v0, CP0_TCSTATUS
570  ori v0, TCSTATUS_IXMT
571  mtc0 v0, CP0_TCSTATUS
572  _ehb
573  DMT 2 # dmt v0
574  /*
575  * We don't know a priori if ra is "live"
576  */
577  move t0, ra
578  jal mips_ihb
579  nop /* delay slot */
580  move ra, t0
581 #endif /* CONFIG_MIPS_MT_SMTC */
582  mfc0 t0, CP0_STATUS
583  li t1, ST0_CU0 | (STATMASK & ~1)
584 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
585  andi t2, t0, ST0_IEP
586  srl t2, 2
587  or t0, t2
588 #endif
589  or t0, t1
590  xori t0, STATMASK & ~1
591  mtc0 t0, CP0_STATUS
592 #ifdef CONFIG_MIPS_MT_SMTC
593  _ehb
595  beqz v0, 2f
596  nop /* delay slot */
597  emt
598 2:
599  mfc0 v0, CP0_TCSTATUS
600  /* Clear IXMT, then OR in previous value */
601  ori v0, TCSTATUS_IXMT
602  xori v0, TCSTATUS_IXMT
603  or v0, v1, v0
604  mtc0 v0, CP0_TCSTATUS
605  /*
606  * irq_disable_hazard below should expand to EHB
607  * on 24K/34K CPUS
608  */
609  .set pop
610 #endif /* CONFIG_MIPS_MT_SMTC */
611  irq_disable_hazard
612  .endm
613 
614 #endif /* _ASM_STACKFRAME_H */