pci core: function pci_bus_init() cleanup
[qemu/ar7.git] / target-alpha / vax_helper.c
blobe74ac3e042dad28d0d50e549fafa347acafba0cf
1 /*
2 * Helpers for vax floating point instructions.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "fpu/softfloat.h"
25 #define FP_STATUS (env->fp_status)
28 /* F floating (VAX) */
29 static uint64_t float32_to_f(float32 fa)
31 uint64_t r, exp, mant, sig;
32 CPU_FloatU a;
34 a.f = fa;
35 sig = ((uint64_t)a.l & 0x80000000) << 32;
36 exp = (a.l >> 23) & 0xff;
37 mant = ((uint64_t)a.l & 0x007fffff) << 29;
39 if (exp == 255) {
40 /* NaN or infinity */
41 r = 1; /* VAX dirty zero */
42 } else if (exp == 0) {
43 if (mant == 0) {
44 /* Zero */
45 r = 0;
46 } else {
47 /* Denormalized */
48 r = sig | ((exp + 1) << 52) | mant;
50 } else {
51 if (exp >= 253) {
52 /* Overflow */
53 r = 1; /* VAX dirty zero */
54 } else {
55 r = sig | ((exp + 2) << 52);
59 return r;
62 static float32 f_to_float32(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
64 uint32_t exp, mant_sig;
65 CPU_FloatU r;
67 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
68 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
70 if (unlikely(!exp && mant_sig)) {
71 /* Reserved operands / Dirty zero */
72 dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
75 if (exp < 3) {
76 /* Underflow */
77 r.l = 0;
78 } else {
79 r.l = ((exp - 2) << 23) | mant_sig;
82 return r.f;
85 uint32_t helper_f_to_memory(uint64_t a)
87 uint32_t r;
88 r = (a & 0x00001fffe0000000ull) >> 13;
89 r |= (a & 0x07ffe00000000000ull) >> 45;
90 r |= (a & 0xc000000000000000ull) >> 48;
91 return r;
94 uint64_t helper_memory_to_f(uint32_t a)
96 uint64_t r;
97 r = ((uint64_t)(a & 0x0000c000)) << 48;
98 r |= ((uint64_t)(a & 0x003fffff)) << 45;
99 r |= ((uint64_t)(a & 0xffff0000)) << 13;
100 if (!(a & 0x00004000)) {
101 r |= 0x7ll << 59;
103 return r;
106 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
107 either implement VAX arithmetic properly or just signal invalid opcode. */
109 uint64_t helper_addf(CPUAlphaState *env, uint64_t a, uint64_t b)
111 float32 fa, fb, fr;
113 fa = f_to_float32(env, GETPC(), a);
114 fb = f_to_float32(env, GETPC(), b);
115 fr = float32_add(fa, fb, &FP_STATUS);
116 return float32_to_f(fr);
119 uint64_t helper_subf(CPUAlphaState *env, uint64_t a, uint64_t b)
121 float32 fa, fb, fr;
123 fa = f_to_float32(env, GETPC(), a);
124 fb = f_to_float32(env, GETPC(), b);
125 fr = float32_sub(fa, fb, &FP_STATUS);
126 return float32_to_f(fr);
129 uint64_t helper_mulf(CPUAlphaState *env, uint64_t a, uint64_t b)
131 float32 fa, fb, fr;
133 fa = f_to_float32(env, GETPC(), a);
134 fb = f_to_float32(env, GETPC(), b);
135 fr = float32_mul(fa, fb, &FP_STATUS);
136 return float32_to_f(fr);
139 uint64_t helper_divf(CPUAlphaState *env, uint64_t a, uint64_t b)
141 float32 fa, fb, fr;
143 fa = f_to_float32(env, GETPC(), a);
144 fb = f_to_float32(env, GETPC(), b);
145 fr = float32_div(fa, fb, &FP_STATUS);
146 return float32_to_f(fr);
149 uint64_t helper_sqrtf(CPUAlphaState *env, uint64_t t)
151 float32 ft, fr;
153 ft = f_to_float32(env, GETPC(), t);
154 fr = float32_sqrt(ft, &FP_STATUS);
155 return float32_to_f(fr);
159 /* G floating (VAX) */
160 static uint64_t float64_to_g(float64 fa)
162 uint64_t r, exp, mant, sig;
163 CPU_DoubleU a;
165 a.d = fa;
166 sig = a.ll & 0x8000000000000000ull;
167 exp = (a.ll >> 52) & 0x7ff;
168 mant = a.ll & 0x000fffffffffffffull;
170 if (exp == 2047) {
171 /* NaN or infinity */
172 r = 1; /* VAX dirty zero */
173 } else if (exp == 0) {
174 if (mant == 0) {
175 /* Zero */
176 r = 0;
177 } else {
178 /* Denormalized */
179 r = sig | ((exp + 1) << 52) | mant;
181 } else {
182 if (exp >= 2045) {
183 /* Overflow */
184 r = 1; /* VAX dirty zero */
185 } else {
186 r = sig | ((exp + 2) << 52);
190 return r;
193 static float64 g_to_float64(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
195 uint64_t exp, mant_sig;
196 CPU_DoubleU r;
198 exp = (a >> 52) & 0x7ff;
199 mant_sig = a & 0x800fffffffffffffull;
201 if (!exp && mant_sig) {
202 /* Reserved operands / Dirty zero */
203 dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
206 if (exp < 3) {
207 /* Underflow */
208 r.ll = 0;
209 } else {
210 r.ll = ((exp - 2) << 52) | mant_sig;
213 return r.d;
216 uint64_t helper_g_to_memory(uint64_t a)
218 uint64_t r;
219 r = (a & 0x000000000000ffffull) << 48;
220 r |= (a & 0x00000000ffff0000ull) << 16;
221 r |= (a & 0x0000ffff00000000ull) >> 16;
222 r |= (a & 0xffff000000000000ull) >> 48;
223 return r;
226 uint64_t helper_memory_to_g(uint64_t a)
228 uint64_t r;
229 r = (a & 0x000000000000ffffull) << 48;
230 r |= (a & 0x00000000ffff0000ull) << 16;
231 r |= (a & 0x0000ffff00000000ull) >> 16;
232 r |= (a & 0xffff000000000000ull) >> 48;
233 return r;
236 uint64_t helper_addg(CPUAlphaState *env, uint64_t a, uint64_t b)
238 float64 fa, fb, fr;
240 fa = g_to_float64(env, GETPC(), a);
241 fb = g_to_float64(env, GETPC(), b);
242 fr = float64_add(fa, fb, &FP_STATUS);
243 return float64_to_g(fr);
246 uint64_t helper_subg(CPUAlphaState *env, uint64_t a, uint64_t b)
248 float64 fa, fb, fr;
250 fa = g_to_float64(env, GETPC(), a);
251 fb = g_to_float64(env, GETPC(), b);
252 fr = float64_sub(fa, fb, &FP_STATUS);
253 return float64_to_g(fr);
256 uint64_t helper_mulg(CPUAlphaState *env, uint64_t a, uint64_t b)
258 float64 fa, fb, fr;
260 fa = g_to_float64(env, GETPC(), a);
261 fb = g_to_float64(env, GETPC(), b);
262 fr = float64_mul(fa, fb, &FP_STATUS);
263 return float64_to_g(fr);
266 uint64_t helper_divg(CPUAlphaState *env, uint64_t a, uint64_t b)
268 float64 fa, fb, fr;
270 fa = g_to_float64(env, GETPC(), a);
271 fb = g_to_float64(env, GETPC(), b);
272 fr = float64_div(fa, fb, &FP_STATUS);
273 return float64_to_g(fr);
276 uint64_t helper_sqrtg(CPUAlphaState *env, uint64_t a)
278 float64 fa, fr;
280 fa = g_to_float64(env, GETPC(), a);
281 fr = float64_sqrt(fa, &FP_STATUS);
282 return float64_to_g(fr);
285 uint64_t helper_cmpgeq(CPUAlphaState *env, uint64_t a, uint64_t b)
287 float64 fa, fb;
289 fa = g_to_float64(env, GETPC(), a);
290 fb = g_to_float64(env, GETPC(), b);
292 if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
293 return 0x4000000000000000ULL;
294 } else {
295 return 0;
299 uint64_t helper_cmpgle(CPUAlphaState *env, uint64_t a, uint64_t b)
301 float64 fa, fb;
303 fa = g_to_float64(env, GETPC(), a);
304 fb = g_to_float64(env, GETPC(), b);
306 if (float64_le(fa, fb, &FP_STATUS)) {
307 return 0x4000000000000000ULL;
308 } else {
309 return 0;
313 uint64_t helper_cmpglt(CPUAlphaState *env, uint64_t a, uint64_t b)
315 float64 fa, fb;
317 fa = g_to_float64(env, GETPC(), a);
318 fb = g_to_float64(env, GETPC(), b);
320 if (float64_lt(fa, fb, &FP_STATUS)) {
321 return 0x4000000000000000ULL;
322 } else {
323 return 0;
327 uint64_t helper_cvtqf(CPUAlphaState *env, uint64_t a)
329 float32 fr = int64_to_float32(a, &FP_STATUS);
330 return float32_to_f(fr);
333 uint64_t helper_cvtgf(CPUAlphaState *env, uint64_t a)
335 float64 fa;
336 float32 fr;
338 fa = g_to_float64(env, GETPC(), a);
339 fr = float64_to_float32(fa, &FP_STATUS);
340 return float32_to_f(fr);
343 uint64_t helper_cvtgq(CPUAlphaState *env, uint64_t a)
345 float64 fa = g_to_float64(env, GETPC(), a);
346 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
349 uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a)
351 float64 fr;
352 fr = int64_to_float64(a, &FP_STATUS);
353 return float64_to_g(fr);