docker: ensure NOUSER for travis images
[qemu/kevin.git] / target / m68k / op_helper.c
blob7b5126c88d44a87b415a461b548f3762e179e160
1 /*
2 * M68K helper routines
4 * Copyright (c) 2007 CodeSourcery
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "cpu.h"
21 #include "exec/helper-proto.h"
22 #include "exec/exec-all.h"
23 #include "exec/cpu_ldst.h"
24 #include "exec/semihost.h"
26 #if defined(CONFIG_USER_ONLY)
28 void m68k_cpu_do_interrupt(CPUState *cs)
30 cs->exception_index = -1;
33 static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
37 #else
39 /* Try to fill the TLB and return an exception if error. If retaddr is
40 NULL, it means that the function was called in C code (i.e. not
41 from generated code or from helper.c) */
42 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
43 int mmu_idx, uintptr_t retaddr)
45 int ret;
47 ret = m68k_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
48 if (unlikely(ret)) {
49 if (retaddr) {
50 /* now we have a real cpu fault */
51 cpu_restore_state(cs, retaddr);
53 cpu_loop_exit(cs);
57 static void do_rte(CPUM68KState *env)
59 uint32_t sp;
60 uint32_t fmt;
62 sp = env->aregs[7];
63 fmt = cpu_ldl_kernel(env, sp);
64 env->pc = cpu_ldl_kernel(env, sp + 4);
65 sp |= (fmt >> 28) & 3;
66 env->aregs[7] = sp + 8;
68 helper_set_sr(env, fmt);
71 static void do_interrupt_all(CPUM68KState *env, int is_hw)
73 CPUState *cs = CPU(m68k_env_get_cpu(env));
74 uint32_t sp;
75 uint32_t fmt;
76 uint32_t retaddr;
77 uint32_t vector;
79 fmt = 0;
80 retaddr = env->pc;
82 if (!is_hw) {
83 switch (cs->exception_index) {
84 case EXCP_RTE:
85 /* Return from an exception. */
86 do_rte(env);
87 return;
88 case EXCP_HALT_INSN:
89 if (semihosting_enabled()
90 && (env->sr & SR_S) != 0
91 && (env->pc & 3) == 0
92 && cpu_lduw_code(env, env->pc - 4) == 0x4e71
93 && cpu_ldl_code(env, env->pc) == 0x4e7bf000) {
94 env->pc += 4;
95 do_m68k_semihosting(env, env->dregs[0]);
96 return;
98 cs->halted = 1;
99 cs->exception_index = EXCP_HLT;
100 cpu_loop_exit(cs);
101 return;
103 if (cs->exception_index >= EXCP_TRAP0
104 && cs->exception_index <= EXCP_TRAP15) {
105 /* Move the PC after the trap instruction. */
106 retaddr += 2;
110 vector = cs->exception_index << 2;
112 fmt |= 0x40000000;
113 fmt |= vector << 16;
114 fmt |= env->sr;
115 fmt |= cpu_m68k_get_ccr(env);
117 env->sr |= SR_S;
118 if (is_hw) {
119 env->sr = (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT);
120 env->sr &= ~SR_M;
122 m68k_switch_sp(env);
123 sp = env->aregs[7];
124 fmt |= (sp & 3) << 28;
126 /* ??? This could cause MMU faults. */
127 sp &= ~3;
128 sp -= 4;
129 cpu_stl_kernel(env, sp, retaddr);
130 sp -= 4;
131 cpu_stl_kernel(env, sp, fmt);
132 env->aregs[7] = sp;
133 /* Jump to vector. */
134 env->pc = cpu_ldl_kernel(env, env->vbr + vector);
137 void m68k_cpu_do_interrupt(CPUState *cs)
139 M68kCPU *cpu = M68K_CPU(cs);
140 CPUM68KState *env = &cpu->env;
142 do_interrupt_all(env, 0);
145 static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
147 do_interrupt_all(env, 1);
149 #endif
151 bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
153 M68kCPU *cpu = M68K_CPU(cs);
154 CPUM68KState *env = &cpu->env;
156 if (interrupt_request & CPU_INTERRUPT_HARD
157 && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) {
158 /* Real hardware gets the interrupt vector via an IACK cycle
159 at this point. Current emulated hardware doesn't rely on
160 this, so we provide/save the vector when the interrupt is
161 first signalled. */
162 cs->exception_index = env->pending_vector;
163 do_interrupt_m68k_hardirq(env);
164 return true;
166 return false;
169 static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr)
171 CPUState *cs = CPU(m68k_env_get_cpu(env));
173 cs->exception_index = tt;
174 cpu_loop_exit_restore(cs, raddr);
177 static void raise_exception(CPUM68KState *env, int tt)
179 raise_exception_ra(env, tt, 0);
182 void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)
184 raise_exception(env, tt);
187 void HELPER(divuw)(CPUM68KState *env, int destr, uint32_t den)
189 uint32_t num = env->dregs[destr];
190 uint32_t quot, rem;
192 if (den == 0) {
193 raise_exception_ra(env, EXCP_DIV0, GETPC());
195 quot = num / den;
196 rem = num % den;
198 env->cc_c = 0; /* always cleared, even if overflow */
199 if (quot > 0xffff) {
200 env->cc_v = -1;
201 /* real 68040 keeps N and unset Z on overflow,
202 * whereas documentation says "undefined"
204 env->cc_z = 1;
205 return;
207 env->dregs[destr] = deposit32(quot, 16, 16, rem);
208 env->cc_z = (int16_t)quot;
209 env->cc_n = (int16_t)quot;
210 env->cc_v = 0;
213 void HELPER(divsw)(CPUM68KState *env, int destr, int32_t den)
215 int32_t num = env->dregs[destr];
216 uint32_t quot, rem;
218 if (den == 0) {
219 raise_exception_ra(env, EXCP_DIV0, GETPC());
221 quot = num / den;
222 rem = num % den;
224 env->cc_c = 0; /* always cleared, even if overflow */
225 if (quot != (int16_t)quot) {
226 env->cc_v = -1;
227 /* nothing else is modified */
228 /* real 68040 keeps N and unset Z on overflow,
229 * whereas documentation says "undefined"
231 env->cc_z = 1;
232 return;
234 env->dregs[destr] = deposit32(quot, 16, 16, rem);
235 env->cc_z = (int16_t)quot;
236 env->cc_n = (int16_t)quot;
237 env->cc_v = 0;
240 void HELPER(divul)(CPUM68KState *env, int numr, int regr, uint32_t den)
242 uint32_t num = env->dregs[numr];
243 uint32_t quot, rem;
245 if (den == 0) {
246 raise_exception_ra(env, EXCP_DIV0, GETPC());
248 quot = num / den;
249 rem = num % den;
251 env->cc_c = 0;
252 env->cc_z = quot;
253 env->cc_n = quot;
254 env->cc_v = 0;
256 if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
257 if (numr == regr) {
258 env->dregs[numr] = quot;
259 } else {
260 env->dregs[regr] = rem;
262 } else {
263 env->dregs[regr] = rem;
264 env->dregs[numr] = quot;
268 void HELPER(divsl)(CPUM68KState *env, int numr, int regr, int32_t den)
270 int32_t num = env->dregs[numr];
271 int32_t quot, rem;
273 if (den == 0) {
274 raise_exception_ra(env, EXCP_DIV0, GETPC());
276 quot = num / den;
277 rem = num % den;
279 env->cc_c = 0;
280 env->cc_z = quot;
281 env->cc_n = quot;
282 env->cc_v = 0;
284 if (m68k_feature(env, M68K_FEATURE_CF_ISA_A)) {
285 if (numr == regr) {
286 env->dregs[numr] = quot;
287 } else {
288 env->dregs[regr] = rem;
290 } else {
291 env->dregs[regr] = rem;
292 env->dregs[numr] = quot;
296 void HELPER(divull)(CPUM68KState *env, int numr, int regr, uint32_t den)
298 uint64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
299 uint64_t quot;
300 uint32_t rem;
302 if (den == 0) {
303 raise_exception_ra(env, EXCP_DIV0, GETPC());
305 quot = num / den;
306 rem = num % den;
308 env->cc_c = 0; /* always cleared, even if overflow */
309 if (quot > 0xffffffffULL) {
310 env->cc_v = -1;
311 /* real 68040 keeps N and unset Z on overflow,
312 * whereas documentation says "undefined"
314 env->cc_z = 1;
315 return;
317 env->cc_z = quot;
318 env->cc_n = quot;
319 env->cc_v = 0;
322 * If Dq and Dr are the same, the quotient is returned.
323 * therefore we set Dq last.
326 env->dregs[regr] = rem;
327 env->dregs[numr] = quot;
330 void HELPER(divsll)(CPUM68KState *env, int numr, int regr, int32_t den)
332 int64_t num = deposit64(env->dregs[numr], 32, 32, env->dregs[regr]);
333 int64_t quot;
334 int32_t rem;
336 if (den == 0) {
337 raise_exception_ra(env, EXCP_DIV0, GETPC());
339 quot = num / den;
340 rem = num % den;
342 env->cc_c = 0; /* always cleared, even if overflow */
343 if (quot != (int32_t)quot) {
344 env->cc_v = -1;
345 /* real 68040 keeps N and unset Z on overflow,
346 * whereas documentation says "undefined"
348 env->cc_z = 1;
349 return;
351 env->cc_z = quot;
352 env->cc_n = quot;
353 env->cc_v = 0;
356 * If Dq and Dr are the same, the quotient is returned.
357 * therefore we set Dq last.
360 env->dregs[regr] = rem;
361 env->dregs[numr] = quot;
364 void HELPER(cas2w)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
366 uint32_t Dc1 = extract32(regs, 9, 3);
367 uint32_t Dc2 = extract32(regs, 6, 3);
368 uint32_t Du1 = extract32(regs, 3, 3);
369 uint32_t Du2 = extract32(regs, 0, 3);
370 int16_t c1 = env->dregs[Dc1];
371 int16_t c2 = env->dregs[Dc2];
372 int16_t u1 = env->dregs[Du1];
373 int16_t u2 = env->dregs[Du2];
374 int16_t l1, l2;
375 uintptr_t ra = GETPC();
377 if (parallel_cpus) {
378 /* Tell the main loop we need to serialize this insn. */
379 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
380 } else {
381 /* We're executing in a serial context -- no need to be atomic. */
382 l1 = cpu_lduw_data_ra(env, a1, ra);
383 l2 = cpu_lduw_data_ra(env, a2, ra);
384 if (l1 == c1 && l2 == c2) {
385 cpu_stw_data_ra(env, a1, u1, ra);
386 cpu_stw_data_ra(env, a2, u2, ra);
390 if (c1 != l1) {
391 env->cc_n = l1;
392 env->cc_v = c1;
393 } else {
394 env->cc_n = l2;
395 env->cc_v = c2;
397 env->cc_op = CC_OP_CMPW;
398 env->dregs[Dc1] = deposit32(env->dregs[Dc1], 0, 16, l1);
399 env->dregs[Dc2] = deposit32(env->dregs[Dc2], 0, 16, l2);
402 void HELPER(cas2l)(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2)
404 uint32_t Dc1 = extract32(regs, 9, 3);
405 uint32_t Dc2 = extract32(regs, 6, 3);
406 uint32_t Du1 = extract32(regs, 3, 3);
407 uint32_t Du2 = extract32(regs, 0, 3);
408 uint32_t c1 = env->dregs[Dc1];
409 uint32_t c2 = env->dregs[Dc2];
410 uint32_t u1 = env->dregs[Du1];
411 uint32_t u2 = env->dregs[Du2];
412 uint32_t l1, l2;
413 uintptr_t ra = GETPC();
414 #if defined(CONFIG_ATOMIC64) && !defined(CONFIG_USER_ONLY)
415 int mmu_idx = cpu_mmu_index(env, 0);
416 TCGMemOpIdx oi;
417 #endif
419 if (parallel_cpus) {
420 /* We're executing in a parallel context -- must be atomic. */
421 #ifdef CONFIG_ATOMIC64
422 uint64_t c, u, l;
423 if ((a1 & 7) == 0 && a2 == a1 + 4) {
424 c = deposit64(c2, 32, 32, c1);
425 u = deposit64(u2, 32, 32, u1);
426 #ifdef CONFIG_USER_ONLY
427 l = helper_atomic_cmpxchgq_be(env, a1, c, u);
428 #else
429 oi = make_memop_idx(MO_BEQ, mmu_idx);
430 l = helper_atomic_cmpxchgq_be_mmu(env, a1, c, u, oi, ra);
431 #endif
432 l1 = l >> 32;
433 l2 = l;
434 } else if ((a2 & 7) == 0 && a1 == a2 + 4) {
435 c = deposit64(c1, 32, 32, c2);
436 u = deposit64(u1, 32, 32, u2);
437 #ifdef CONFIG_USER_ONLY
438 l = helper_atomic_cmpxchgq_be(env, a2, c, u);
439 #else
440 oi = make_memop_idx(MO_BEQ, mmu_idx);
441 l = helper_atomic_cmpxchgq_be_mmu(env, a2, c, u, oi, ra);
442 #endif
443 l2 = l >> 32;
444 l1 = l;
445 } else
446 #endif
448 /* Tell the main loop we need to serialize this insn. */
449 cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
451 } else {
452 /* We're executing in a serial context -- no need to be atomic. */
453 l1 = cpu_ldl_data_ra(env, a1, ra);
454 l2 = cpu_ldl_data_ra(env, a2, ra);
455 if (l1 == c1 && l2 == c2) {
456 cpu_stl_data_ra(env, a1, u1, ra);
457 cpu_stl_data_ra(env, a2, u2, ra);
461 if (c1 != l1) {
462 env->cc_n = l1;
463 env->cc_v = c1;
464 } else {
465 env->cc_n = l2;
466 env->cc_v = c2;
468 env->cc_op = CC_OP_CMPL;
469 env->dregs[Dc1] = l1;
470 env->dregs[Dc2] = l2;
473 struct bf_data {
474 uint32_t addr;
475 uint32_t bofs;
476 uint32_t blen;
477 uint32_t len;
480 static struct bf_data bf_prep(uint32_t addr, int32_t ofs, uint32_t len)
482 int bofs, blen;
484 /* Bound length; map 0 to 32. */
485 len = ((len - 1) & 31) + 1;
487 /* Note that ofs is signed. */
488 addr += ofs / 8;
489 bofs = ofs % 8;
490 if (bofs < 0) {
491 bofs += 8;
492 addr -= 1;
495 /* Compute the number of bytes required (minus one) to
496 satisfy the bitfield. */
497 blen = (bofs + len - 1) / 8;
499 /* Canonicalize the bit offset for data loaded into a 64-bit big-endian
500 word. For the cases where BLEN is not a power of 2, adjust ADDR so
501 that we can use the next power of two sized load without crossing a
502 page boundary, unless the field itself crosses the boundary. */
503 switch (blen) {
504 case 0:
505 bofs += 56;
506 break;
507 case 1:
508 bofs += 48;
509 break;
510 case 2:
511 if (addr & 1) {
512 bofs += 8;
513 addr -= 1;
515 /* fallthru */
516 case 3:
517 bofs += 32;
518 break;
519 case 4:
520 if (addr & 3) {
521 bofs += 8 * (addr & 3);
522 addr &= -4;
524 break;
525 default:
526 g_assert_not_reached();
529 return (struct bf_data){
530 .addr = addr,
531 .bofs = bofs,
532 .blen = blen,
533 .len = len,
537 static uint64_t bf_load(CPUM68KState *env, uint32_t addr, int blen,
538 uintptr_t ra)
540 switch (blen) {
541 case 0:
542 return cpu_ldub_data_ra(env, addr, ra);
543 case 1:
544 return cpu_lduw_data_ra(env, addr, ra);
545 case 2:
546 case 3:
547 return cpu_ldl_data_ra(env, addr, ra);
548 case 4:
549 return cpu_ldq_data_ra(env, addr, ra);
550 default:
551 g_assert_not_reached();
555 static void bf_store(CPUM68KState *env, uint32_t addr, int blen,
556 uint64_t data, uintptr_t ra)
558 switch (blen) {
559 case 0:
560 cpu_stb_data_ra(env, addr, data, ra);
561 break;
562 case 1:
563 cpu_stw_data_ra(env, addr, data, ra);
564 break;
565 case 2:
566 case 3:
567 cpu_stl_data_ra(env, addr, data, ra);
568 break;
569 case 4:
570 cpu_stq_data_ra(env, addr, data, ra);
571 break;
572 default:
573 g_assert_not_reached();
577 uint32_t HELPER(bfexts_mem)(CPUM68KState *env, uint32_t addr,
578 int32_t ofs, uint32_t len)
580 uintptr_t ra = GETPC();
581 struct bf_data d = bf_prep(addr, ofs, len);
582 uint64_t data = bf_load(env, d.addr, d.blen, ra);
584 return (int64_t)(data << d.bofs) >> (64 - d.len);
587 uint64_t HELPER(bfextu_mem)(CPUM68KState *env, uint32_t addr,
588 int32_t ofs, uint32_t len)
590 uintptr_t ra = GETPC();
591 struct bf_data d = bf_prep(addr, ofs, len);
592 uint64_t data = bf_load(env, d.addr, d.blen, ra);
594 /* Put CC_N at the top of the high word; put the zero-extended value
595 at the bottom of the low word. */
596 data <<= d.bofs;
597 data >>= 64 - d.len;
598 data |= data << (64 - d.len);
600 return data;
603 uint32_t HELPER(bfins_mem)(CPUM68KState *env, uint32_t addr, uint32_t val,
604 int32_t ofs, uint32_t len)
606 uintptr_t ra = GETPC();
607 struct bf_data d = bf_prep(addr, ofs, len);
608 uint64_t data = bf_load(env, d.addr, d.blen, ra);
609 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
611 data = (data & ~mask) | (((uint64_t)val << (64 - d.len)) >> d.bofs);
613 bf_store(env, d.addr, d.blen, data, ra);
615 /* The field at the top of the word is also CC_N for CC_OP_LOGIC. */
616 return val << (32 - d.len);
619 uint32_t HELPER(bfchg_mem)(CPUM68KState *env, uint32_t addr,
620 int32_t ofs, uint32_t len)
622 uintptr_t ra = GETPC();
623 struct bf_data d = bf_prep(addr, ofs, len);
624 uint64_t data = bf_load(env, d.addr, d.blen, ra);
625 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
627 bf_store(env, d.addr, d.blen, data ^ mask, ra);
629 return ((data & mask) << d.bofs) >> 32;
632 uint32_t HELPER(bfclr_mem)(CPUM68KState *env, uint32_t addr,
633 int32_t ofs, uint32_t len)
635 uintptr_t ra = GETPC();
636 struct bf_data d = bf_prep(addr, ofs, len);
637 uint64_t data = bf_load(env, d.addr, d.blen, ra);
638 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
640 bf_store(env, d.addr, d.blen, data & ~mask, ra);
642 return ((data & mask) << d.bofs) >> 32;
645 uint32_t HELPER(bfset_mem)(CPUM68KState *env, uint32_t addr,
646 int32_t ofs, uint32_t len)
648 uintptr_t ra = GETPC();
649 struct bf_data d = bf_prep(addr, ofs, len);
650 uint64_t data = bf_load(env, d.addr, d.blen, ra);
651 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
653 bf_store(env, d.addr, d.blen, data | mask, ra);
655 return ((data & mask) << d.bofs) >> 32;
658 uint32_t HELPER(bfffo_reg)(uint32_t n, uint32_t ofs, uint32_t len)
660 return (n ? clz32(n) : len) + ofs;
663 uint64_t HELPER(bfffo_mem)(CPUM68KState *env, uint32_t addr,
664 int32_t ofs, uint32_t len)
666 uintptr_t ra = GETPC();
667 struct bf_data d = bf_prep(addr, ofs, len);
668 uint64_t data = bf_load(env, d.addr, d.blen, ra);
669 uint64_t mask = -1ull << (64 - d.len) >> d.bofs;
670 uint64_t n = (data & mask) << d.bofs;
671 uint32_t ffo = helper_bfffo_reg(n >> 32, ofs, d.len);
673 /* Return FFO in the low word and N in the high word.
674 Note that because of MASK and the shift, the low word
675 is already zero. */
676 return n | ffo;