icount: Take iothread lock when running QEMU timers
[qemu/ar7.git] / target / sparc / helper.c
blobc4358bba84aa9fe7345bb47ab46fd38269a6db38
1 /*
2 * Misc Sparc helpers
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/timer.h"
24 #include "qemu/host-utils.h"
25 #include "exec/helper-proto.h"
27 void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra)
29 CPUState *cs = env_cpu(env);
31 cs->exception_index = tt;
32 cpu_loop_exit_restore(cs, ra);
35 void helper_raise_exception(CPUSPARCState *env, int tt)
37 CPUState *cs = env_cpu(env);
39 cs->exception_index = tt;
40 cpu_loop_exit(cs);
43 void helper_debug(CPUSPARCState *env)
45 CPUState *cs = env_cpu(env);
47 cs->exception_index = EXCP_DEBUG;
48 cpu_loop_exit(cs);
51 #ifdef TARGET_SPARC64
52 void helper_tick_set_count(void *opaque, uint64_t count)
54 #if !defined(CONFIG_USER_ONLY)
55 cpu_tick_set_count(opaque, count);
56 #endif
59 uint64_t helper_tick_get_count(CPUSPARCState *env, void *opaque, int mem_idx)
61 #if !defined(CONFIG_USER_ONLY)
62 CPUTimer *timer = opaque;
64 if (timer->npt && mem_idx < MMU_KERNEL_IDX) {
65 cpu_raise_exception_ra(env, TT_PRIV_INSN, GETPC());
68 return cpu_tick_get_count(timer);
69 #else
70 /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
71 Just pass through the host cpu clock ticks. */
72 return cpu_get_host_ticks();
73 #endif
76 void helper_tick_set_limit(void *opaque, uint64_t limit)
78 #if !defined(CONFIG_USER_ONLY)
79 cpu_tick_set_limit(opaque, limit);
80 #endif
82 #endif
84 static target_ulong do_udiv(CPUSPARCState *env, target_ulong a,
85 target_ulong b, int cc, uintptr_t ra)
87 int overflow = 0;
88 uint64_t x0;
89 uint32_t x1;
91 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
92 x1 = (b & 0xffffffff);
94 if (x1 == 0) {
95 cpu_raise_exception_ra(env, TT_DIV_ZERO, ra);
98 x0 = x0 / x1;
99 if (x0 > UINT32_MAX) {
100 x0 = UINT32_MAX;
101 overflow = 1;
104 if (cc) {
105 env->cc_dst = x0;
106 env->cc_src2 = overflow;
107 env->cc_op = CC_OP_DIV;
109 return x0;
112 target_ulong helper_udiv(CPUSPARCState *env, target_ulong a, target_ulong b)
114 return do_udiv(env, a, b, 0, GETPC());
117 target_ulong helper_udiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b)
119 return do_udiv(env, a, b, 1, GETPC());
122 static target_ulong do_sdiv(CPUSPARCState *env, target_ulong a,
123 target_ulong b, int cc, uintptr_t ra)
125 int overflow = 0;
126 int64_t x0;
127 int32_t x1;
129 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
130 x1 = (b & 0xffffffff);
132 if (x1 == 0) {
133 cpu_raise_exception_ra(env, TT_DIV_ZERO, ra);
134 } else if (x1 == -1 && x0 == INT64_MIN) {
135 x0 = INT32_MAX;
136 overflow = 1;
137 } else {
138 x0 = x0 / x1;
139 if ((int32_t) x0 != x0) {
140 x0 = x0 < 0 ? INT32_MIN : INT32_MAX;
141 overflow = 1;
145 if (cc) {
146 env->cc_dst = x0;
147 env->cc_src2 = overflow;
148 env->cc_op = CC_OP_DIV;
150 return x0;
153 target_ulong helper_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b)
155 return do_sdiv(env, a, b, 0, GETPC());
158 target_ulong helper_sdiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b)
160 return do_sdiv(env, a, b, 1, GETPC());
163 #ifdef TARGET_SPARC64
164 int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b)
166 if (b == 0) {
167 /* Raise divide by zero trap. */
168 cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
169 } else if (b == -1) {
170 /* Avoid overflow trap with i386 divide insn. */
171 return -a;
172 } else {
173 return a / b;
177 uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b)
179 if (b == 0) {
180 /* Raise divide by zero trap. */
181 cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
183 return a / b;
185 #endif
187 target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
188 target_ulong src2)
190 target_ulong dst;
192 /* Tag overflow occurs if either input has bits 0 or 1 set. */
193 if ((src1 | src2) & 3) {
194 goto tag_overflow;
197 dst = src1 + src2;
199 /* Tag overflow occurs if the addition overflows. */
200 if (~(src1 ^ src2) & (src1 ^ dst) & (1u << 31)) {
201 goto tag_overflow;
204 /* Only modify the CC after any exceptions have been generated. */
205 env->cc_op = CC_OP_TADDTV;
206 env->cc_src = src1;
207 env->cc_src2 = src2;
208 env->cc_dst = dst;
209 return dst;
211 tag_overflow:
212 cpu_raise_exception_ra(env, TT_TOVF, GETPC());
215 target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
216 target_ulong src2)
218 target_ulong dst;
220 /* Tag overflow occurs if either input has bits 0 or 1 set. */
221 if ((src1 | src2) & 3) {
222 goto tag_overflow;
225 dst = src1 - src2;
227 /* Tag overflow occurs if the subtraction overflows. */
228 if ((src1 ^ src2) & (src1 ^ dst) & (1u << 31)) {
229 goto tag_overflow;
232 /* Only modify the CC after any exceptions have been generated. */
233 env->cc_op = CC_OP_TSUBTV;
234 env->cc_src = src1;
235 env->cc_src2 = src2;
236 env->cc_dst = dst;
237 return dst;
239 tag_overflow:
240 cpu_raise_exception_ra(env, TT_TOVF, GETPC());
243 #ifndef TARGET_SPARC64
244 void helper_power_down(CPUSPARCState *env)
246 CPUState *cs = env_cpu(env);
248 cs->halted = 1;
249 cs->exception_index = EXCP_HLT;
250 env->pc = env->npc;
251 env->npc = env->pc + 4;
252 cpu_loop_exit(cs);
254 #endif