4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #define SIGNBIT (uint32_t)0x80000000
24 #define SIGNBIT64 ((uint64_t)1 << 63)
26 void raise_exception(int tt
)
28 env
->exception_index
= tt
;
34 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
38 spin_lock(&global_cpu_lock
);
43 spin_unlock(&global_cpu_lock
);
46 uint32_t HELPER(neon_tbl
)(uint32_t ireg
, uint32_t def
,
47 uint32_t rn
, uint32_t maxindex
)
54 table
= (uint64_t *)&env
->vfp
.regs
[rn
];
56 for (shift
= 0; shift
< 32; shift
+= 8) {
57 index
= (ireg
>> shift
) & 0xff;
58 if (index
< maxindex
) {
59 tmp
= (table
[index
>> 3] >> (index
& 7)) & 0xff;
62 val
|= def
& (0xff << shift
);
68 #if !defined(CONFIG_USER_ONLY)
70 #define MMUSUFFIX _mmu
73 #include "softmmu_template.h"
76 #include "softmmu_template.h"
79 #include "softmmu_template.h"
82 #include "softmmu_template.h"
84 /* try to fill the TLB and return an exception if error. If retaddr is
85 NULL, it means that the function was called in C code (i.e. not
86 from generated code or from helper.c) */
87 /* XXX: fix it to restore all registers */
88 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
95 /* XXX: hack to restore env in all cases, even if not called from
99 ret
= cpu_arm_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
100 if (__builtin_expect(ret
, 0)) {
102 /* now we have a real cpu fault */
103 pc
= (unsigned long)retaddr
;
106 /* the PC is inside the translated code. It means that we have
107 a virtual CPU fault */
108 cpu_restore_state(tb
, env
, pc
, NULL
);
111 raise_exception(env
->exception_index
);
117 /* FIXME: Pass an axplicit pointer to QF to CPUState, and move saturating
118 instructions into helper.c */
119 uint32_t HELPER(add_setq
)(uint32_t a
, uint32_t b
)
121 uint32_t res
= a
+ b
;
122 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
))
127 uint32_t HELPER(add_saturate
)(uint32_t a
, uint32_t b
)
129 uint32_t res
= a
+ b
;
130 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
132 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
137 uint32_t HELPER(sub_saturate
)(uint32_t a
, uint32_t b
)
139 uint32_t res
= a
- b
;
140 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
142 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
147 uint32_t HELPER(double_saturate
)(int32_t val
)
150 if (val
>= 0x40000000) {
153 } else if (val
<= (int32_t)0xc0000000) {
162 uint32_t HELPER(add_usaturate
)(uint32_t a
, uint32_t b
)
164 uint32_t res
= a
+ b
;
172 uint32_t HELPER(sub_usaturate
)(uint32_t a
, uint32_t b
)
174 uint32_t res
= a
- b
;
182 /* Signed saturation. */
183 static inline uint32_t do_ssat(int32_t val
, int shift
)
190 mask
= (1u << shift
) - 1;
194 } else if (top
< -1) {
201 /* Unsigned saturation. */
202 static inline uint32_t do_usat(int32_t val
, int shift
)
207 max
= (1u << shift
) - 1;
211 } else if (val
> max
) {
218 /* Signed saturate. */
219 uint32_t HELPER(ssat
)(uint32_t x
, uint32_t shift
)
221 return do_ssat(x
, shift
);
224 /* Dual halfword signed saturate. */
225 uint32_t HELPER(ssat16
)(uint32_t x
, uint32_t shift
)
229 res
= (uint16_t)do_ssat((int16_t)x
, shift
);
230 res
|= do_ssat(((int32_t)x
) >> 16, shift
) << 16;
234 /* Unsigned saturate. */
235 uint32_t HELPER(usat
)(uint32_t x
, uint32_t shift
)
237 return do_usat(x
, shift
);
240 /* Dual halfword unsigned saturate. */
241 uint32_t HELPER(usat16
)(uint32_t x
, uint32_t shift
)
245 res
= (uint16_t)do_usat((int16_t)x
, shift
);
246 res
|= do_usat(((int32_t)x
) >> 16, shift
) << 16;
250 void HELPER(wfi
)(void)
252 env
->exception_index
= EXCP_HLT
;
257 void HELPER(exception
)(uint32_t excp
)
259 env
->exception_index
= excp
;
263 uint32_t HELPER(cpsr_read
)(void)
265 return cpsr_read(env
) & ~CPSR_EXEC
;
268 void HELPER(cpsr_write
)(uint32_t val
, uint32_t mask
)
270 cpsr_write(env
, val
, mask
);
273 /* Access to user mode registers from privileged modes. */
274 uint32_t HELPER(get_user_reg
)(uint32_t regno
)
279 val
= env
->banked_r13
[0];
280 } else if (regno
== 14) {
281 val
= env
->banked_r14
[0];
282 } else if (regno
>= 8
283 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
284 val
= env
->usr_regs
[regno
- 8];
286 val
= env
->regs
[regno
];
291 void HELPER(set_user_reg
)(uint32_t regno
, uint32_t val
)
294 env
->banked_r13
[0] = val
;
295 } else if (regno
== 14) {
296 env
->banked_r14
[0] = val
;
297 } else if (regno
>= 8
298 && (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_FIQ
) {
299 env
->usr_regs
[regno
- 8] = val
;
301 env
->regs
[regno
] = val
;
305 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
306 The only way to do that in TCG is a conditional branch, which clobbers
307 all our temporaries. For now implement these as helper functions. */
309 uint32_t HELPER (add_cc
)(uint32_t a
, uint32_t b
)
313 env
->NF
= env
->ZF
= result
;
314 env
->CF
= result
< a
;
315 env
->VF
= (a
^ b
^ -1) & (a
^ result
);
319 uint32_t HELPER(adc_cc
)(uint32_t a
, uint32_t b
)
324 env
->CF
= result
< a
;
327 env
->CF
= result
<= a
;
329 env
->VF
= (a
^ b
^ -1) & (a
^ result
);
330 env
->NF
= env
->ZF
= result
;
334 uint32_t HELPER(sub_cc
)(uint32_t a
, uint32_t b
)
338 env
->NF
= env
->ZF
= result
;
340 env
->VF
= (a
^ b
) & (a
^ result
);
344 uint32_t HELPER(sbc_cc
)(uint32_t a
, uint32_t b
)
354 env
->VF
= (a
^ b
) & (a
^ result
);
355 env
->NF
= env
->ZF
= result
;
359 /* Similarly for variable shift instructions. */
361 uint32_t HELPER(shl
)(uint32_t x
, uint32_t i
)
363 int shift
= i
& 0xff;
369 uint32_t HELPER(shr
)(uint32_t x
, uint32_t i
)
371 int shift
= i
& 0xff;
374 return (uint32_t)x
>> shift
;
377 uint32_t HELPER(sar
)(uint32_t x
, uint32_t i
)
379 int shift
= i
& 0xff;
382 return (int32_t)x
>> shift
;
385 uint32_t HELPER(ror
)(uint32_t x
, uint32_t i
)
387 int shift
= i
& 0xff;
390 return (x
>> shift
) | (x
<< (32 - shift
));
393 uint32_t HELPER(shl_cc
)(uint32_t x
, uint32_t i
)
395 int shift
= i
& 0xff;
402 } else if (shift
!= 0) {
403 env
->CF
= (x
>> (32 - shift
)) & 1;
409 uint32_t HELPER(shr_cc
)(uint32_t x
, uint32_t i
)
411 int shift
= i
& 0xff;
414 env
->CF
= (x
>> 31) & 1;
418 } else if (shift
!= 0) {
419 env
->CF
= (x
>> (shift
- 1)) & 1;
425 uint32_t HELPER(sar_cc
)(uint32_t x
, uint32_t i
)
427 int shift
= i
& 0xff;
429 env
->CF
= (x
>> 31) & 1;
430 return (int32_t)x
>> 31;
431 } else if (shift
!= 0) {
432 env
->CF
= (x
>> (shift
- 1)) & 1;
433 return (int32_t)x
>> shift
;
438 uint32_t HELPER(ror_cc
)(uint32_t x
, uint32_t i
)
442 shift
= shift1
& 0x1f;
445 env
->CF
= (x
>> 31) & 1;
448 env
->CF
= (x
>> (shift
- 1)) & 1;
449 return ((uint32_t)x
>> shift
) | (x
<< (32 - shift
));
453 uint64_t HELPER(neon_add_saturate_s64
)(uint64_t src1
, uint64_t src2
)
458 if (((res
^ src1
) & SIGNBIT64
) && !((src1
^ src2
) & SIGNBIT64
)) {
460 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
465 uint64_t HELPER(neon_add_saturate_u64
)(uint64_t src1
, uint64_t src2
)
477 uint64_t HELPER(neon_sub_saturate_s64
)(uint64_t src1
, uint64_t src2
)
482 if (((res
^ src1
) & SIGNBIT64
) && ((src1
^ src2
) & SIGNBIT64
)) {
484 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
489 uint64_t HELPER(neon_sub_saturate_u64
)(uint64_t src1
, uint64_t src2
)
502 /* These need to return a pair of value, so still use T0/T1. */
503 /* Transpose. Argument order is rather strange to avoid special casing
505 On input T0 = rm, T1 = rd. On output T0 = rd, T1 = rm */
506 void HELPER(neon_trn_u8
)(void)
510 rd
= ((T0
& 0x00ff00ff) << 8) | (T1
& 0x00ff00ff);
511 rm
= ((T1
& 0xff00ff00) >> 8) | (T0
& 0xff00ff00);
517 void HELPER(neon_trn_u16
)(void)
521 rd
= (T0
<< 16) | (T1
& 0xffff);
522 rm
= (T1
>> 16) | (T0
& 0xffff0000);
528 /* Worker routines for zip and unzip. */
529 void HELPER(neon_unzip_u8
)(void)
533 rd
= (T0
& 0xff) | ((T0
>> 8) & 0xff00)
534 | ((T1
<< 16) & 0xff0000) | ((T1
<< 8) & 0xff000000);
535 rm
= ((T0
>> 8) & 0xff) | ((T0
>> 16) & 0xff00)
536 | ((T1
<< 8) & 0xff0000) | (T1
& 0xff000000);
542 void HELPER(neon_zip_u8
)(void)
546 rd
= (T0
& 0xff) | ((T1
<< 8) & 0xff00)
547 | ((T0
<< 16) & 0xff0000) | ((T1
<< 24) & 0xff000000);
548 rm
= ((T0
>> 16) & 0xff) | ((T1
>> 8) & 0xff00)
549 | ((T0
>> 8) & 0xff0000) | (T1
& 0xff000000);
555 void HELPER(neon_zip_u16
)(void)
559 tmp
= (T0
& 0xffff) | (T1
<< 16);
560 T1
= (T1
& 0xffff0000) | (T0
>> 16);