2 * linux/arch/arm/vfp/vfpmodule.c
4 * Copyright (C) 2004 ARM Limited.
5 * Written by Deep Blue Solutions Limited.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
18 #include <asm/cputype.h>
19 #include <asm/thread_notify.h>
26 * Our undef handlers (in entry.S)
28 void vfp_testing_entry(void);
29 void vfp_support_entry(void);
30 void vfp_null_entry(void);
32 void (*vfp_vector
)(void) = vfp_null_entry
;
33 union vfp_state
*last_VFP_context
[NR_CPUS
];
37 * Used in startup: set to non-zero if VFP checks fail
38 * After startup, holds VFP architecture
40 unsigned int VFP_arch
;
43 * Per-thread VFP initialization.
45 static void vfp_thread_flush(struct thread_info
*thread
)
47 union vfp_state
*vfp
= &thread
->vfpstate
;
50 memset(vfp
, 0, sizeof(union vfp_state
));
52 vfp
->hard
.fpexc
= FPEXC_EN
;
53 vfp
->hard
.fpscr
= FPSCR_ROUND_NEAREST
;
56 * Disable VFP to ensure we initialize it first. We must ensure
57 * that the modification of last_VFP_context[] and hardware disable
58 * are done for the same CPU and without preemption.
61 if (last_VFP_context
[cpu
] == vfp
)
62 last_VFP_context
[cpu
] = NULL
;
63 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
67 static void vfp_thread_exit(struct thread_info
*thread
)
69 /* release case: Per-thread VFP cleanup. */
70 union vfp_state
*vfp
= &thread
->vfpstate
;
71 unsigned int cpu
= get_cpu();
73 if (last_VFP_context
[cpu
] == vfp
)
74 last_VFP_context
[cpu
] = NULL
;
79 * When this function is called with the following 'cmd's, the following
80 * is true while this function is being run:
81 * THREAD_NOFTIFY_SWTICH:
82 * - the previously running thread will not be scheduled onto another CPU.
83 * - the next thread to be run (v) will not be running on another CPU.
84 * - thread->cpu is the local CPU number
85 * - not preemptible as we're called in the middle of a thread switch
86 * THREAD_NOTIFY_FLUSH:
87 * - the thread (v) will be running on the local CPU, so
88 * v === current_thread_info()
89 * - thread->cpu is the local CPU number at the time it is accessed,
90 * but may change at any time.
91 * - we could be preempted if tree preempt rcu is enabled, so
92 * it is unsafe to use thread->cpu.
94 * - the thread (v) will be running on the local CPU, so
95 * v === current_thread_info()
96 * - thread->cpu is the local CPU number at the time it is accessed,
97 * but may change at any time.
98 * - we could be preempted if tree preempt rcu is enabled, so
99 * it is unsafe to use thread->cpu.
101 static int vfp_notifier(struct notifier_block
*self
, unsigned long cmd
, void *v
)
103 struct thread_info
*thread
= v
;
105 if (likely(cmd
== THREAD_NOTIFY_SWITCH
)) {
106 u32 fpexc
= fmrx(FPEXC
);
109 unsigned int cpu
= thread
->cpu
;
112 * On SMP, if VFP is enabled, save the old state in
113 * case the thread migrates to a different CPU. The
114 * restoring is done lazily.
116 if ((fpexc
& FPEXC_EN
) && last_VFP_context
[cpu
]) {
117 vfp_save_state(last_VFP_context
[cpu
], fpexc
);
118 last_VFP_context
[cpu
]->hard
.cpu
= cpu
;
121 * Thread migration, just force the reloading of the
122 * state on the new CPU in case the VFP registers
123 * contain stale data.
125 if (thread
->vfpstate
.hard
.cpu
!= cpu
)
126 last_VFP_context
[cpu
] = NULL
;
130 * Always disable VFP so we can lazily save/restore the
133 fmxr(FPEXC
, fpexc
& ~FPEXC_EN
);
137 if (cmd
== THREAD_NOTIFY_FLUSH
)
138 vfp_thread_flush(thread
);
140 vfp_thread_exit(thread
);
145 static struct notifier_block vfp_notifier_block
= {
146 .notifier_call
= vfp_notifier
,
150 * Raise a SIGFPE for the current process.
151 * sicode describes the signal being raised.
153 void vfp_raise_sigfpe(unsigned int sicode
, struct pt_regs
*regs
)
157 memset(&info
, 0, sizeof(info
));
159 info
.si_signo
= SIGFPE
;
160 info
.si_code
= sicode
;
161 info
.si_addr
= (void __user
*)(instruction_pointer(regs
) - 4);
164 * This is the same as NWFPE, because it's not clear what
167 current
->thread
.error_code
= 0;
168 current
->thread
.trap_no
= 6;
170 send_sig_info(SIGFPE
, &info
, current
);
173 static void vfp_panic(char *reason
, u32 inst
)
177 printk(KERN_ERR
"VFP: Error: %s\n", reason
);
178 printk(KERN_ERR
"VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",
179 fmrx(FPEXC
), fmrx(FPSCR
), inst
);
180 for (i
= 0; i
< 32; i
+= 2)
181 printk(KERN_ERR
"VFP: s%2u: 0x%08x s%2u: 0x%08x\n",
182 i
, vfp_get_float(i
), i
+1, vfp_get_float(i
+1));
186 * Process bitmask of exception conditions.
188 static void vfp_raise_exceptions(u32 exceptions
, u32 inst
, u32 fpscr
, struct pt_regs
*regs
)
192 pr_debug("VFP: raising exceptions %08x\n", exceptions
);
194 if (exceptions
== VFP_EXCEPTION_ERROR
) {
195 vfp_panic("unhandled bounce", inst
);
196 vfp_raise_sigfpe(0, regs
);
201 * If any of the status flags are set, update the FPSCR.
202 * Comparison instructions always return at least one of
205 if (exceptions
& (FPSCR_N
|FPSCR_Z
|FPSCR_C
|FPSCR_V
))
206 fpscr
&= ~(FPSCR_N
|FPSCR_Z
|FPSCR_C
|FPSCR_V
);
212 #define RAISE(stat,en,sig) \
213 if (exceptions & stat && fpscr & en) \
217 * These are arranged in priority order, least to highest.
219 RAISE(FPSCR_DZC
, FPSCR_DZE
, FPE_FLTDIV
);
220 RAISE(FPSCR_IXC
, FPSCR_IXE
, FPE_FLTRES
);
221 RAISE(FPSCR_UFC
, FPSCR_UFE
, FPE_FLTUND
);
222 RAISE(FPSCR_OFC
, FPSCR_OFE
, FPE_FLTOVF
);
223 RAISE(FPSCR_IOC
, FPSCR_IOE
, FPE_FLTINV
);
226 vfp_raise_sigfpe(si_code
, regs
);
230 * Emulate a VFP instruction.
232 static u32
vfp_emulate_instruction(u32 inst
, u32 fpscr
, struct pt_regs
*regs
)
234 u32 exceptions
= VFP_EXCEPTION_ERROR
;
236 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst
, fpscr
);
238 if (INST_CPRTDO(inst
)) {
239 if (!INST_CPRT(inst
)) {
243 if (vfp_single(inst
)) {
244 exceptions
= vfp_single_cpdo(inst
, fpscr
);
246 exceptions
= vfp_double_cpdo(inst
, fpscr
);
250 * A CPRT instruction can not appear in FPINST2, nor
251 * can it cause an exception. Therefore, we do not
252 * have to emulate it.
257 * A CPDT instruction can not appear in FPINST2, nor can
258 * it cause an exception. Therefore, we do not have to
262 return exceptions
& ~VFP_NAN_FLAG
;
266 * Package up a bounce condition.
268 void VFP_bounce(u32 trigger
, u32 fpexc
, struct pt_regs
*regs
)
270 u32 fpscr
, orig_fpscr
, fpsid
, exceptions
;
272 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger
, fpexc
);
275 * At this point, FPEXC can have the following configuration:
278 * 0 1 x - synchronous exception
279 * 1 x 0 - asynchronous exception
280 * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
281 * 0 0 1 - synchronous on VFP9 (non-standard subarch 1
282 * implementation), undefined otherwise
284 * Clear various bits and enable access to the VFP so we can
287 fmxr(FPEXC
, fpexc
& ~(FPEXC_EX
|FPEXC_DEX
|FPEXC_FP2V
|FPEXC_VV
|FPEXC_TRAP_MASK
));
290 orig_fpscr
= fpscr
= fmrx(FPSCR
);
293 * Check for the special VFP subarch 1 and FPSCR.IXE bit case
295 if ((fpsid
& FPSID_ARCH_MASK
) == (1 << FPSID_ARCH_BIT
)
296 && (fpscr
& FPSCR_IXE
)) {
298 * Synchronous exception, emulate the trigger instruction
303 if (fpexc
& FPEXC_EX
) {
304 #ifndef CONFIG_CPU_FEROCEON
306 * Asynchronous exception. The instruction is read from FPINST
307 * and the interrupted instruction has to be restarted.
309 trigger
= fmrx(FPINST
);
312 } else if (!(fpexc
& FPEXC_DEX
)) {
314 * Illegal combination of bits. It can be caused by an
315 * unallocated VFP instruction but with FPSCR.IXE set and not
318 vfp_raise_exceptions(VFP_EXCEPTION_ERROR
, trigger
, fpscr
, regs
);
323 * Modify fpscr to indicate the number of iterations remaining.
324 * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
325 * whether FPEXC.VECITR or FPSCR.LEN is used.
327 if (fpexc
& (FPEXC_EX
| FPEXC_VV
)) {
330 len
= fpexc
+ (1 << FPEXC_LENGTH_BIT
);
332 fpscr
&= ~FPSCR_LENGTH_MASK
;
333 fpscr
|= (len
& FPEXC_LENGTH_MASK
) << (FPSCR_LENGTH_BIT
- FPEXC_LENGTH_BIT
);
337 * Handle the first FP instruction. We used to take note of the
338 * FPEXC bounce reason, but this appears to be unreliable.
339 * Emulate the bounced instruction instead.
341 exceptions
= vfp_emulate_instruction(trigger
, fpscr
, regs
);
343 vfp_raise_exceptions(exceptions
, trigger
, orig_fpscr
, regs
);
346 * If there isn't a second FP instruction, exit now. Note that
347 * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
349 if (fpexc
^ (FPEXC_EX
| FPEXC_FP2V
))
353 * The barrier() here prevents fpinst2 being read
354 * before the condition above.
357 trigger
= fmrx(FPINST2
);
360 exceptions
= vfp_emulate_instruction(trigger
, orig_fpscr
, regs
);
362 vfp_raise_exceptions(exceptions
, trigger
, orig_fpscr
, regs
);
367 static void vfp_enable(void *unused
)
369 u32 access
= get_copro_access();
372 * Enable full access to VFP (cp10 and cp11)
374 set_copro_access(access
| CPACC_FULL(10) | CPACC_FULL(11));
378 #include <linux/sysdev.h>
380 static int vfp_pm_suspend(struct sys_device
*dev
, pm_message_t state
)
382 struct thread_info
*ti
= current_thread_info();
383 u32 fpexc
= fmrx(FPEXC
);
385 /* if vfp is on, then save state for resumption */
386 if (fpexc
& FPEXC_EN
) {
387 printk(KERN_DEBUG
"%s: saving vfp state\n", __func__
);
388 vfp_save_state(&ti
->vfpstate
, fpexc
);
390 /* disable, just in case */
391 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
394 /* clear any information we had about last context state */
395 memset(last_VFP_context
, 0, sizeof(last_VFP_context
));
400 static int vfp_pm_resume(struct sys_device
*dev
)
402 /* ensure we have access to the vfp */
405 /* and disable it to ensure the next usage restores the state */
406 fmxr(FPEXC
, fmrx(FPEXC
) & ~FPEXC_EN
);
411 static struct sysdev_class vfp_pm_sysclass
= {
413 .suspend
= vfp_pm_suspend
,
414 .resume
= vfp_pm_resume
,
417 static struct sys_device vfp_pm_sysdev
= {
418 .cls
= &vfp_pm_sysclass
,
421 static void vfp_pm_init(void)
423 sysdev_class_register(&vfp_pm_sysclass
);
424 sysdev_register(&vfp_pm_sysdev
);
429 static inline void vfp_pm_init(void) { }
430 #endif /* CONFIG_PM */
432 void vfp_sync_hwstate(struct thread_info
*thread
)
434 unsigned int cpu
= get_cpu();
437 * If the thread we're interested in is the current owner of the
438 * hardware VFP state, then we need to save its state.
440 if (last_VFP_context
[cpu
] == &thread
->vfpstate
) {
441 u32 fpexc
= fmrx(FPEXC
);
444 * Save the last VFP state on this CPU.
446 fmxr(FPEXC
, fpexc
| FPEXC_EN
);
447 vfp_save_state(&thread
->vfpstate
, fpexc
| FPEXC_EN
);
454 void vfp_flush_hwstate(struct thread_info
*thread
)
456 unsigned int cpu
= get_cpu();
459 * If the thread we're interested in is the current owner of the
460 * hardware VFP state, then we need to save its state.
462 if (last_VFP_context
[cpu
] == &thread
->vfpstate
) {
463 u32 fpexc
= fmrx(FPEXC
);
465 fmxr(FPEXC
, fpexc
& ~FPEXC_EN
);
468 * Set the context to NULL to force a reload the next time
469 * the thread uses the VFP.
471 last_VFP_context
[cpu
] = NULL
;
476 * For SMP we still have to take care of the case where the thread
477 * migrates to another CPU and then back to the original CPU on which
478 * the last VFP user is still the same thread. Mark the thread VFP
479 * state as belonging to a non-existent CPU so that the saved one will
480 * be reloaded in the above case.
482 thread
->vfpstate
.hard
.cpu
= NR_CPUS
;
487 #include <linux/smp.h>
490 * VFP support code initialisation.
492 static int __init
vfp_init(void)
495 unsigned int cpu_arch
= cpu_architecture();
497 if (cpu_arch
>= CPU_ARCH_ARMv6
)
501 * First check that there is a VFP that we can use.
502 * The handler is already setup to just log calls, so
503 * we just need to read the VFPSID register.
505 vfp_vector
= vfp_testing_entry
;
507 vfpsid
= fmrx(FPSID
);
509 vfp_vector
= vfp_null_entry
;
511 printk(KERN_INFO
"VFP support v0.3: ");
513 printk("not present\n");
514 else if (vfpsid
& FPSID_NODOUBLE
) {
515 printk("no double precision support\n");
517 smp_call_function(vfp_enable
, NULL
, 1);
519 VFP_arch
= (vfpsid
& FPSID_ARCH_MASK
) >> FPSID_ARCH_BIT
; /* Extract the architecture version */
520 printk("implementor %02x architecture %d part %02x variant %x rev %x\n",
521 (vfpsid
& FPSID_IMPLEMENTER_MASK
) >> FPSID_IMPLEMENTER_BIT
,
522 (vfpsid
& FPSID_ARCH_MASK
) >> FPSID_ARCH_BIT
,
523 (vfpsid
& FPSID_PART_MASK
) >> FPSID_PART_BIT
,
524 (vfpsid
& FPSID_VARIANT_MASK
) >> FPSID_VARIANT_BIT
,
525 (vfpsid
& FPSID_REV_MASK
) >> FPSID_REV_BIT
);
527 vfp_vector
= vfp_support_entry
;
529 thread_register_notifier(&vfp_notifier_block
);
533 * We detected VFP, and the support code is
534 * in place; report VFP support to userspace.
536 elf_hwcap
|= HWCAP_VFP
;
539 elf_hwcap
|= HWCAP_VFPv3
;
542 * Check for VFPv3 D16. CPUs in this configuration
543 * only have 16 x 64bit registers.
545 if (((fmrx(MVFR0
) & MVFR0_A_SIMD_MASK
)) == 1)
546 elf_hwcap
|= HWCAP_VFPv3D16
;
551 * Check for the presence of the Advanced SIMD
552 * load/store instructions, integer and single
553 * precision floating point operations. Only check
554 * for NEON if the hardware has the MVFR registers.
556 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
557 if ((fmrx(MVFR1
) & 0x000fff00) == 0x00011100)
558 elf_hwcap
|= HWCAP_NEON
;
565 late_initcall(vfp_init
);