tests: add output filter to python I/O tests helper
[qemu.git] / target-s390x / helper.c
blob92abe7e6769567adb68d35944a3182a29c099258
1 /*
2 * S/390 helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "exec/cpu_ldst.h"
27 #ifndef CONFIG_USER_ONLY
28 #include "sysemu/sysemu.h"
29 #endif
31 //#define DEBUG_S390
32 //#define DEBUG_S390_STDOUT
34 #ifdef DEBUG_S390
35 #ifdef DEBUG_S390_STDOUT
36 #define DPRINTF(fmt, ...) \
37 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
38 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
39 #else
40 #define DPRINTF(fmt, ...) \
41 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
42 #endif
43 #else
44 #define DPRINTF(fmt, ...) \
45 do { } while (0)
46 #endif
49 #ifndef CONFIG_USER_ONLY
50 void s390x_tod_timer(void *opaque)
52 S390CPU *cpu = opaque;
53 CPUS390XState *env = &cpu->env;
55 env->pending_int |= INTERRUPT_TOD;
56 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
59 void s390x_cpu_timer(void *opaque)
61 S390CPU *cpu = opaque;
62 CPUS390XState *env = &cpu->env;
64 env->pending_int |= INTERRUPT_CPUTIMER;
65 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
67 #endif
69 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
71 S390CPU *cpu;
73 cpu = S390_CPU(object_new(TYPE_S390_CPU));
75 return cpu;
78 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
80 S390CPU *cpu;
81 Error *err = NULL;
83 cpu = cpu_s390x_create(cpu_model, &err);
84 if (err != NULL) {
85 goto out;
88 object_property_set_int(OBJECT(cpu), id, "id", &err);
89 if (err != NULL) {
90 goto out;
92 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
94 out:
95 if (err) {
96 error_propagate(errp, err);
97 object_unref(OBJECT(cpu));
98 cpu = NULL;
100 return cpu;
103 S390CPU *cpu_s390x_init(const char *cpu_model)
105 Error *err = NULL;
106 S390CPU *cpu;
107 /* Use to track CPU ID for linux-user only */
108 static int64_t next_cpu_id;
110 cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
111 if (err) {
112 error_report_err(err);
114 return cpu;
117 #if defined(CONFIG_USER_ONLY)
119 void s390_cpu_do_interrupt(CPUState *cs)
121 cs->exception_index = -1;
124 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
125 int rw, int mmu_idx)
127 S390CPU *cpu = S390_CPU(cs);
129 cs->exception_index = EXCP_PGM;
130 cpu->env.int_pgm_code = PGM_ADDRESSING;
131 /* On real machines this value is dropped into LowMem. Since this
132 is userland, simply put this someplace that cpu_loop can find it. */
133 cpu->env.__excp_addr = address;
134 return 1;
137 #else /* !CONFIG_USER_ONLY */
139 /* Ensure to exit the TB after this call! */
140 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
142 CPUState *cs = CPU(s390_env_get_cpu(env));
144 cs->exception_index = EXCP_PGM;
145 env->int_pgm_code = code;
146 env->int_pgm_ilen = ilen;
149 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
150 int rw, int mmu_idx)
152 S390CPU *cpu = S390_CPU(cs);
153 CPUS390XState *env = &cpu->env;
154 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
155 target_ulong vaddr, raddr;
156 int prot;
158 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
159 __func__, orig_vaddr, rw, mmu_idx);
161 orig_vaddr &= TARGET_PAGE_MASK;
162 vaddr = orig_vaddr;
164 /* 31-Bit mode */
165 if (!(env->psw.mask & PSW_MASK_64)) {
166 vaddr &= 0x7fffffff;
169 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
170 /* Translation ended in exception */
171 return 1;
174 /* check out of RAM access */
175 if (raddr > ram_size) {
176 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
177 (uint64_t)raddr, (uint64_t)ram_size);
178 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
179 return 1;
182 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
183 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
185 tlb_set_page(cs, orig_vaddr, raddr, prot,
186 mmu_idx, TARGET_PAGE_SIZE);
188 return 0;
191 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
193 S390CPU *cpu = S390_CPU(cs);
194 CPUS390XState *env = &cpu->env;
195 target_ulong raddr;
196 int prot;
197 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
199 /* 31-Bit mode */
200 if (!(env->psw.mask & PSW_MASK_64)) {
201 vaddr &= 0x7fffffff;
204 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
205 return -1;
207 return raddr;
210 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
212 hwaddr phys_addr;
213 target_ulong page;
215 page = vaddr & TARGET_PAGE_MASK;
216 phys_addr = cpu_get_phys_page_debug(cs, page);
217 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
219 return phys_addr;
222 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
224 uint64_t old_mask = env->psw.mask;
226 env->psw.addr = addr;
227 env->psw.mask = mask;
228 if (tcg_enabled()) {
229 env->cc_op = (mask >> 44) & 3;
232 if ((old_mask ^ mask) & PSW_MASK_PER) {
233 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
236 if (mask & PSW_MASK_WAIT) {
237 S390CPU *cpu = s390_env_get_cpu(env);
238 if (s390_cpu_halt(cpu) == 0) {
239 #ifndef CONFIG_USER_ONLY
240 qemu_system_shutdown_request();
241 #endif
246 static uint64_t get_psw_mask(CPUS390XState *env)
248 uint64_t r = env->psw.mask;
250 if (tcg_enabled()) {
251 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
252 env->cc_vr);
254 r &= ~PSW_MASK_CC;
255 assert(!(env->cc_op & ~3));
256 r |= (uint64_t)env->cc_op << 44;
259 return r;
262 static LowCore *cpu_map_lowcore(CPUS390XState *env)
264 S390CPU *cpu = s390_env_get_cpu(env);
265 LowCore *lowcore;
266 hwaddr len = sizeof(LowCore);
268 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
270 if (len < sizeof(LowCore)) {
271 cpu_abort(CPU(cpu), "Could not map lowcore\n");
274 return lowcore;
277 static void cpu_unmap_lowcore(LowCore *lowcore)
279 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
282 void do_restart_interrupt(CPUS390XState *env)
284 uint64_t mask, addr;
285 LowCore *lowcore;
287 lowcore = cpu_map_lowcore(env);
289 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
290 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
291 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
292 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
294 cpu_unmap_lowcore(lowcore);
296 load_psw(env, mask, addr);
299 static void do_program_interrupt(CPUS390XState *env)
301 uint64_t mask, addr;
302 LowCore *lowcore;
303 int ilen = env->int_pgm_ilen;
305 switch (ilen) {
306 case ILEN_LATER:
307 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
308 break;
309 case ILEN_LATER_INC:
310 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
311 env->psw.addr += ilen;
312 break;
313 default:
314 assert(ilen == 2 || ilen == 4 || ilen == 6);
317 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
318 __func__, env->int_pgm_code, ilen);
320 lowcore = cpu_map_lowcore(env);
322 /* Signal PER events with the exception. */
323 if (env->per_perc_atmid) {
324 env->int_pgm_code |= PGM_PER;
325 lowcore->per_address = cpu_to_be64(env->per_address);
326 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
327 env->per_perc_atmid = 0;
330 lowcore->pgm_ilen = cpu_to_be16(ilen);
331 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
332 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
333 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
334 mask = be64_to_cpu(lowcore->program_new_psw.mask);
335 addr = be64_to_cpu(lowcore->program_new_psw.addr);
336 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
338 cpu_unmap_lowcore(lowcore);
340 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
341 env->int_pgm_code, ilen, env->psw.mask,
342 env->psw.addr);
344 load_psw(env, mask, addr);
347 static void do_svc_interrupt(CPUS390XState *env)
349 uint64_t mask, addr;
350 LowCore *lowcore;
352 lowcore = cpu_map_lowcore(env);
354 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
355 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
356 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
357 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
358 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
359 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
361 cpu_unmap_lowcore(lowcore);
363 load_psw(env, mask, addr);
365 /* When a PER event is pending, the PER exception has to happen
366 immediately after the SERVICE CALL one. */
367 if (env->per_perc_atmid) {
368 env->int_pgm_code = PGM_PER;
369 env->int_pgm_ilen = env->int_svc_ilen;
370 do_program_interrupt(env);
374 #define VIRTIO_SUBCODE_64 0x0D00
376 static void do_ext_interrupt(CPUS390XState *env)
378 S390CPU *cpu = s390_env_get_cpu(env);
379 uint64_t mask, addr;
380 LowCore *lowcore;
381 ExtQueue *q;
383 if (!(env->psw.mask & PSW_MASK_EXT)) {
384 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
387 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
388 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
391 q = &env->ext_queue[env->ext_index];
392 lowcore = cpu_map_lowcore(env);
394 lowcore->ext_int_code = cpu_to_be16(q->code);
395 lowcore->ext_params = cpu_to_be32(q->param);
396 lowcore->ext_params2 = cpu_to_be64(q->param64);
397 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
398 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
399 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
400 mask = be64_to_cpu(lowcore->external_new_psw.mask);
401 addr = be64_to_cpu(lowcore->external_new_psw.addr);
403 cpu_unmap_lowcore(lowcore);
405 env->ext_index--;
406 if (env->ext_index == -1) {
407 env->pending_int &= ~INTERRUPT_EXT;
410 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
411 env->psw.mask, env->psw.addr);
413 load_psw(env, mask, addr);
416 static void do_io_interrupt(CPUS390XState *env)
418 S390CPU *cpu = s390_env_get_cpu(env);
419 LowCore *lowcore;
420 IOIntQueue *q;
421 uint8_t isc;
422 int disable = 1;
423 int found = 0;
425 if (!(env->psw.mask & PSW_MASK_IO)) {
426 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
429 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
430 uint64_t isc_bits;
432 if (env->io_index[isc] < 0) {
433 continue;
435 if (env->io_index[isc] >= MAX_IO_QUEUE) {
436 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
437 isc, env->io_index[isc]);
440 q = &env->io_queue[env->io_index[isc]][isc];
441 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
442 if (!(env->cregs[6] & isc_bits)) {
443 disable = 0;
444 continue;
446 if (!found) {
447 uint64_t mask, addr;
449 found = 1;
450 lowcore = cpu_map_lowcore(env);
452 lowcore->subchannel_id = cpu_to_be16(q->id);
453 lowcore->subchannel_nr = cpu_to_be16(q->nr);
454 lowcore->io_int_parm = cpu_to_be32(q->parm);
455 lowcore->io_int_word = cpu_to_be32(q->word);
456 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
457 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
458 mask = be64_to_cpu(lowcore->io_new_psw.mask);
459 addr = be64_to_cpu(lowcore->io_new_psw.addr);
461 cpu_unmap_lowcore(lowcore);
463 env->io_index[isc]--;
465 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
466 env->psw.mask, env->psw.addr);
467 load_psw(env, mask, addr);
469 if (env->io_index[isc] >= 0) {
470 disable = 0;
472 continue;
475 if (disable) {
476 env->pending_int &= ~INTERRUPT_IO;
481 static void do_mchk_interrupt(CPUS390XState *env)
483 S390CPU *cpu = s390_env_get_cpu(env);
484 uint64_t mask, addr;
485 LowCore *lowcore;
486 MchkQueue *q;
487 int i;
489 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
490 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
493 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
494 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
497 q = &env->mchk_queue[env->mchk_index];
499 if (q->type != 1) {
500 /* Don't know how to handle this... */
501 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
503 if (!(env->cregs[14] & (1 << 28))) {
504 /* CRW machine checks disabled */
505 return;
508 lowcore = cpu_map_lowcore(env);
510 for (i = 0; i < 16; i++) {
511 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
512 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
513 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
514 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
516 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
517 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
518 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
519 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
520 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
521 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
522 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
524 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
525 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
526 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
527 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
528 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
529 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
531 cpu_unmap_lowcore(lowcore);
533 env->mchk_index--;
534 if (env->mchk_index == -1) {
535 env->pending_int &= ~INTERRUPT_MCHK;
538 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
539 env->psw.mask, env->psw.addr);
541 load_psw(env, mask, addr);
544 void s390_cpu_do_interrupt(CPUState *cs)
546 S390CPU *cpu = S390_CPU(cs);
547 CPUS390XState *env = &cpu->env;
549 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
550 __func__, cs->exception_index, env->psw.addr);
552 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
553 /* handle machine checks */
554 if ((env->psw.mask & PSW_MASK_MCHECK) &&
555 (cs->exception_index == -1)) {
556 if (env->pending_int & INTERRUPT_MCHK) {
557 cs->exception_index = EXCP_MCHK;
560 /* handle external interrupts */
561 if ((env->psw.mask & PSW_MASK_EXT) &&
562 cs->exception_index == -1) {
563 if (env->pending_int & INTERRUPT_EXT) {
564 /* code is already in env */
565 cs->exception_index = EXCP_EXT;
566 } else if (env->pending_int & INTERRUPT_TOD) {
567 cpu_inject_ext(cpu, 0x1004, 0, 0);
568 cs->exception_index = EXCP_EXT;
569 env->pending_int &= ~INTERRUPT_EXT;
570 env->pending_int &= ~INTERRUPT_TOD;
571 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
572 cpu_inject_ext(cpu, 0x1005, 0, 0);
573 cs->exception_index = EXCP_EXT;
574 env->pending_int &= ~INTERRUPT_EXT;
575 env->pending_int &= ~INTERRUPT_TOD;
578 /* handle I/O interrupts */
579 if ((env->psw.mask & PSW_MASK_IO) &&
580 (cs->exception_index == -1)) {
581 if (env->pending_int & INTERRUPT_IO) {
582 cs->exception_index = EXCP_IO;
586 switch (cs->exception_index) {
587 case EXCP_PGM:
588 do_program_interrupt(env);
589 break;
590 case EXCP_SVC:
591 do_svc_interrupt(env);
592 break;
593 case EXCP_EXT:
594 do_ext_interrupt(env);
595 break;
596 case EXCP_IO:
597 do_io_interrupt(env);
598 break;
599 case EXCP_MCHK:
600 do_mchk_interrupt(env);
601 break;
603 cs->exception_index = -1;
605 if (!env->pending_int) {
606 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
610 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
612 if (interrupt_request & CPU_INTERRUPT_HARD) {
613 S390CPU *cpu = S390_CPU(cs);
614 CPUS390XState *env = &cpu->env;
616 if (env->psw.mask & PSW_MASK_EXT) {
617 s390_cpu_do_interrupt(cs);
618 return true;
621 return false;
624 void s390_cpu_recompute_watchpoints(CPUState *cs)
626 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
627 S390CPU *cpu = S390_CPU(cs);
628 CPUS390XState *env = &cpu->env;
630 /* We are called when the watchpoints have changed. First
631 remove them all. */
632 cpu_watchpoint_remove_all(cs, BP_CPU);
634 /* Return if PER is not enabled */
635 if (!(env->psw.mask & PSW_MASK_PER)) {
636 return;
639 /* Return if storage-alteration event is not enabled. */
640 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
641 return;
644 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
645 /* We can't create a watchoint spanning the whole memory range, so
646 split it in two parts. */
647 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
648 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
649 } else if (env->cregs[10] > env->cregs[11]) {
650 /* The address range loops, create two watchpoints. */
651 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
652 wp_flags, NULL);
653 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
655 } else {
656 /* Default case, create a single watchpoint. */
657 cpu_watchpoint_insert(cs, env->cregs[10],
658 env->cregs[11] - env->cregs[10] + 1,
659 wp_flags, NULL);
663 void s390x_cpu_debug_excp_handler(CPUState *cs)
665 S390CPU *cpu = S390_CPU(cs);
666 CPUS390XState *env = &cpu->env;
667 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
669 if (wp_hit && wp_hit->flags & BP_CPU) {
670 /* FIXME: When the storage-alteration-space control bit is set,
671 the exception should only be triggered if the memory access
672 is done using an address space with the storage-alteration-event
673 bit set. We have no way to detect that with the current
674 watchpoint code. */
675 cs->watchpoint_hit = NULL;
677 env->per_address = env->psw.addr;
678 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
679 /* FIXME: We currently no way to detect the address space used
680 to trigger the watchpoint. For now just consider it is the
681 current default ASC. This turn to be true except when MVCP
682 and MVCS instrutions are not used. */
683 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
685 /* Remove all watchpoints to re-execute the code. A PER exception
686 will be triggered, it will call load_psw which will recompute
687 the watchpoints. */
688 cpu_watchpoint_remove_all(cs, BP_CPU);
689 cpu_resume_from_signal(cs, NULL);
692 #endif /* CONFIG_USER_ONLY */