11 #include <linux/types.h>
15 #include <linux/kernel.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
25 #include <asm/cputype.h>
67 if (thread->vfpstate.hard.
cpu != cpu)
70 return vfp_current_hw_state[
cpu] == &thread->vfpstate;
80 if (vfp_state_in_hw(cpu, thread)) {
82 vfp_current_hw_state[
cpu] =
NULL;
92 static void vfp_thread_flush(
struct thread_info *thread)
106 if (vfp_current_hw_state[cpu] == vfp)
107 vfp_current_hw_state[
cpu] =
NULL;
120 static void vfp_thread_exit(
struct thread_info *thread)
123 union vfp_state *vfp = &thread->vfpstate;
126 if (vfp_current_hw_state[cpu] == vfp)
127 vfp_current_hw_state[
cpu] =
NULL;
131 static void vfp_thread_copy(
struct thread_info *thread)
136 thread->vfpstate = parent->vfpstate;
174 case THREAD_NOTIFY_SWITCH:
185 if ((fpexc &
FPEXC_EN) && vfp_current_hw_state[cpu])
196 case THREAD_NOTIFY_FLUSH:
197 vfp_thread_flush(thread);
200 case THREAD_NOTIFY_EXIT:
201 vfp_thread_exit(thread);
204 case THREAD_NOTIFY_COPY:
205 vfp_thread_copy(thread);
213 .notifier_call = vfp_notifier,
220 static void vfp_raise_sigfpe(
unsigned int sicode,
struct pt_regs *
regs)
224 memset(&info, 0,
sizeof(info));
234 current->thread.error_code = 0;
240 static void vfp_panic(
char *
reason,
u32 inst)
244 pr_err(
"VFP: Error: %s\n", reason);
245 pr_err(
"VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
247 for (i = 0; i < 32; i += 2)
248 pr_err(
"VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
255 static void vfp_raise_exceptions(
u32 exceptions,
u32 inst,
u32 fpscr,
struct pt_regs *regs)
259 pr_debug(
"VFP: raising exceptions %08x\n", exceptions);
262 vfp_panic(
"unhandled bounce", inst);
263 vfp_raise_sigfpe(0, regs);
279 #define RAISE(stat,en,sig) \
280 if (exceptions & stat && fpscr & en) \
293 vfp_raise_sigfpe(si_code, regs);
299 static u32 vfp_emulate_instruction(
u32 inst,
u32 fpscr,
struct pt_regs *regs)
303 pr_debug(
"VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);
337 u32 fpscr, orig_fpscr, fpsid, exceptions;
339 pr_debug(
"VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
371 #ifndef CONFIG_CPU_FEROCEON
394 if (fpexc & (FPEXC_EX |
FPEXC_VV)) {
408 exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
410 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
427 exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
429 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
434 static void vfp_enable(
void *
unused)
439 access = get_copro_access();
448 static int vfp_pm_suspend(
void)
455 pr_debug(
"%s: saving vfp state\n", __func__);
460 }
else if (vfp_current_hw_state[ti->
cpu]) {
469 vfp_current_hw_state[ti->
cpu] =
NULL;
474 static void vfp_pm_resume(
void)
483 static int vfp_cpu_pm_notifier(
struct notifier_block *
self,
unsigned long cmd,
502 static void vfp_pm_init(
void)
508 static inline void vfp_pm_init(
void) { }
519 if (vfp_state_in_hw(cpu, thread)) {
538 vfp_force_reload(cpu, thread);
654 static int __init vfp_init(
void)
677 pr_cont(
"no double precision support\n");
682 pr_cont(
"implementor %02x architecture %d part %02x variant %x rev %x\n",
691 thread_register_notifier(&vfp_notifier_block);
720 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
722 if ((
fmrx(
MVFR1) & 0x000fff00) == 0x00011100)
726 if ((
fmrx(
MVFR1) & 0xf0000000) == 0x10000000)