ivshmem: Replace int role_val by OnOffAuto master
[qemu/ar7.git] / target-s390x / helper.c
blob76d5fbebe8284038170bdbaf70a7c714a7383fe7
1 /*
2 * S/390 helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/gdbstub.h"
24 #include "qemu/timer.h"
25 #include "exec/cpu_ldst.h"
26 #ifndef CONFIG_USER_ONLY
27 #include "sysemu/sysemu.h"
28 #endif
30 //#define DEBUG_S390
31 //#define DEBUG_S390_STDOUT
33 #ifdef DEBUG_S390
34 #ifdef DEBUG_S390_STDOUT
35 #define DPRINTF(fmt, ...) \
36 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
37 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
38 #else
39 #define DPRINTF(fmt, ...) \
40 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
41 #endif
42 #else
43 #define DPRINTF(fmt, ...) \
44 do { } while (0)
45 #endif
48 #ifndef CONFIG_USER_ONLY
49 void s390x_tod_timer(void *opaque)
51 S390CPU *cpu = opaque;
52 CPUS390XState *env = &cpu->env;
54 env->pending_int |= INTERRUPT_TOD;
55 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
58 void s390x_cpu_timer(void *opaque)
60 S390CPU *cpu = opaque;
61 CPUS390XState *env = &cpu->env;
63 env->pending_int |= INTERRUPT_CPUTIMER;
64 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
66 #endif
68 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
70 S390CPU *cpu;
72 cpu = S390_CPU(object_new(TYPE_S390_CPU));
74 return cpu;
77 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
79 S390CPU *cpu;
80 Error *err = NULL;
82 cpu = cpu_s390x_create(cpu_model, &err);
83 if (err != NULL) {
84 goto out;
87 object_property_set_int(OBJECT(cpu), id, "id", &err);
88 if (err != NULL) {
89 goto out;
91 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
93 out:
94 if (err) {
95 error_propagate(errp, err);
96 object_unref(OBJECT(cpu));
97 cpu = NULL;
99 return cpu;
102 S390CPU *cpu_s390x_init(const char *cpu_model)
104 Error *err = NULL;
105 S390CPU *cpu;
106 /* Use to track CPU ID for linux-user only */
107 static int64_t next_cpu_id;
109 cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
110 if (err) {
111 error_report_err(err);
113 return cpu;
116 #if defined(CONFIG_USER_ONLY)
118 void s390_cpu_do_interrupt(CPUState *cs)
120 cs->exception_index = -1;
123 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
124 int rw, int mmu_idx)
126 S390CPU *cpu = S390_CPU(cs);
128 cs->exception_index = EXCP_PGM;
129 cpu->env.int_pgm_code = PGM_ADDRESSING;
130 /* On real machines this value is dropped into LowMem. Since this
131 is userland, simply put this someplace that cpu_loop can find it. */
132 cpu->env.__excp_addr = address;
133 return 1;
136 #else /* !CONFIG_USER_ONLY */
138 /* Ensure to exit the TB after this call! */
139 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
141 CPUState *cs = CPU(s390_env_get_cpu(env));
143 cs->exception_index = EXCP_PGM;
144 env->int_pgm_code = code;
145 env->int_pgm_ilen = ilen;
148 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
149 int rw, int mmu_idx)
151 S390CPU *cpu = S390_CPU(cs);
152 CPUS390XState *env = &cpu->env;
153 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
154 target_ulong vaddr, raddr;
155 int prot;
157 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
158 __func__, orig_vaddr, rw, mmu_idx);
160 orig_vaddr &= TARGET_PAGE_MASK;
161 vaddr = orig_vaddr;
163 /* 31-Bit mode */
164 if (!(env->psw.mask & PSW_MASK_64)) {
165 vaddr &= 0x7fffffff;
168 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
169 /* Translation ended in exception */
170 return 1;
173 /* check out of RAM access */
174 if (raddr > ram_size) {
175 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
176 (uint64_t)raddr, (uint64_t)ram_size);
177 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
178 return 1;
181 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
182 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
184 tlb_set_page(cs, orig_vaddr, raddr, prot,
185 mmu_idx, TARGET_PAGE_SIZE);
187 return 0;
190 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
192 S390CPU *cpu = S390_CPU(cs);
193 CPUS390XState *env = &cpu->env;
194 target_ulong raddr;
195 int prot;
196 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
198 /* 31-Bit mode */
199 if (!(env->psw.mask & PSW_MASK_64)) {
200 vaddr &= 0x7fffffff;
203 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
204 return -1;
206 return raddr;
209 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
211 hwaddr phys_addr;
212 target_ulong page;
214 page = vaddr & TARGET_PAGE_MASK;
215 phys_addr = cpu_get_phys_page_debug(cs, page);
216 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
218 return phys_addr;
221 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
223 uint64_t old_mask = env->psw.mask;
225 env->psw.addr = addr;
226 env->psw.mask = mask;
227 if (tcg_enabled()) {
228 env->cc_op = (mask >> 44) & 3;
231 if ((old_mask ^ mask) & PSW_MASK_PER) {
232 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
235 if (mask & PSW_MASK_WAIT) {
236 S390CPU *cpu = s390_env_get_cpu(env);
237 if (s390_cpu_halt(cpu) == 0) {
238 #ifndef CONFIG_USER_ONLY
239 qemu_system_shutdown_request();
240 #endif
245 static uint64_t get_psw_mask(CPUS390XState *env)
247 uint64_t r = env->psw.mask;
249 if (tcg_enabled()) {
250 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
251 env->cc_vr);
253 r &= ~PSW_MASK_CC;
254 assert(!(env->cc_op & ~3));
255 r |= (uint64_t)env->cc_op << 44;
258 return r;
261 static LowCore *cpu_map_lowcore(CPUS390XState *env)
263 S390CPU *cpu = s390_env_get_cpu(env);
264 LowCore *lowcore;
265 hwaddr len = sizeof(LowCore);
267 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
269 if (len < sizeof(LowCore)) {
270 cpu_abort(CPU(cpu), "Could not map lowcore\n");
273 return lowcore;
276 static void cpu_unmap_lowcore(LowCore *lowcore)
278 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
281 void do_restart_interrupt(CPUS390XState *env)
283 uint64_t mask, addr;
284 LowCore *lowcore;
286 lowcore = cpu_map_lowcore(env);
288 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
289 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
290 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
291 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
293 cpu_unmap_lowcore(lowcore);
295 load_psw(env, mask, addr);
298 static void do_program_interrupt(CPUS390XState *env)
300 uint64_t mask, addr;
301 LowCore *lowcore;
302 int ilen = env->int_pgm_ilen;
304 switch (ilen) {
305 case ILEN_LATER:
306 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
307 break;
308 case ILEN_LATER_INC:
309 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
310 env->psw.addr += ilen;
311 break;
312 default:
313 assert(ilen == 2 || ilen == 4 || ilen == 6);
316 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
317 __func__, env->int_pgm_code, ilen);
319 lowcore = cpu_map_lowcore(env);
321 /* Signal PER events with the exception. */
322 if (env->per_perc_atmid) {
323 env->int_pgm_code |= PGM_PER;
324 lowcore->per_address = cpu_to_be64(env->per_address);
325 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
326 env->per_perc_atmid = 0;
329 lowcore->pgm_ilen = cpu_to_be16(ilen);
330 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
331 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
332 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
333 mask = be64_to_cpu(lowcore->program_new_psw.mask);
334 addr = be64_to_cpu(lowcore->program_new_psw.addr);
335 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
337 cpu_unmap_lowcore(lowcore);
339 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
340 env->int_pgm_code, ilen, env->psw.mask,
341 env->psw.addr);
343 load_psw(env, mask, addr);
346 static void do_svc_interrupt(CPUS390XState *env)
348 uint64_t mask, addr;
349 LowCore *lowcore;
351 lowcore = cpu_map_lowcore(env);
353 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
354 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
355 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
356 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
357 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
358 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
360 cpu_unmap_lowcore(lowcore);
362 load_psw(env, mask, addr);
364 /* When a PER event is pending, the PER exception has to happen
365 immediately after the SERVICE CALL one. */
366 if (env->per_perc_atmid) {
367 env->int_pgm_code = PGM_PER;
368 env->int_pgm_ilen = env->int_svc_ilen;
369 do_program_interrupt(env);
373 #define VIRTIO_SUBCODE_64 0x0D00
375 static void do_ext_interrupt(CPUS390XState *env)
377 S390CPU *cpu = s390_env_get_cpu(env);
378 uint64_t mask, addr;
379 LowCore *lowcore;
380 ExtQueue *q;
382 if (!(env->psw.mask & PSW_MASK_EXT)) {
383 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
386 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
387 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
390 q = &env->ext_queue[env->ext_index];
391 lowcore = cpu_map_lowcore(env);
393 lowcore->ext_int_code = cpu_to_be16(q->code);
394 lowcore->ext_params = cpu_to_be32(q->param);
395 lowcore->ext_params2 = cpu_to_be64(q->param64);
396 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
397 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
398 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
399 mask = be64_to_cpu(lowcore->external_new_psw.mask);
400 addr = be64_to_cpu(lowcore->external_new_psw.addr);
402 cpu_unmap_lowcore(lowcore);
404 env->ext_index--;
405 if (env->ext_index == -1) {
406 env->pending_int &= ~INTERRUPT_EXT;
409 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
410 env->psw.mask, env->psw.addr);
412 load_psw(env, mask, addr);
415 static void do_io_interrupt(CPUS390XState *env)
417 S390CPU *cpu = s390_env_get_cpu(env);
418 LowCore *lowcore;
419 IOIntQueue *q;
420 uint8_t isc;
421 int disable = 1;
422 int found = 0;
424 if (!(env->psw.mask & PSW_MASK_IO)) {
425 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
428 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
429 uint64_t isc_bits;
431 if (env->io_index[isc] < 0) {
432 continue;
434 if (env->io_index[isc] >= MAX_IO_QUEUE) {
435 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
436 isc, env->io_index[isc]);
439 q = &env->io_queue[env->io_index[isc]][isc];
440 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
441 if (!(env->cregs[6] & isc_bits)) {
442 disable = 0;
443 continue;
445 if (!found) {
446 uint64_t mask, addr;
448 found = 1;
449 lowcore = cpu_map_lowcore(env);
451 lowcore->subchannel_id = cpu_to_be16(q->id);
452 lowcore->subchannel_nr = cpu_to_be16(q->nr);
453 lowcore->io_int_parm = cpu_to_be32(q->parm);
454 lowcore->io_int_word = cpu_to_be32(q->word);
455 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
456 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
457 mask = be64_to_cpu(lowcore->io_new_psw.mask);
458 addr = be64_to_cpu(lowcore->io_new_psw.addr);
460 cpu_unmap_lowcore(lowcore);
462 env->io_index[isc]--;
464 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
465 env->psw.mask, env->psw.addr);
466 load_psw(env, mask, addr);
468 if (env->io_index[isc] >= 0) {
469 disable = 0;
471 continue;
474 if (disable) {
475 env->pending_int &= ~INTERRUPT_IO;
480 static void do_mchk_interrupt(CPUS390XState *env)
482 S390CPU *cpu = s390_env_get_cpu(env);
483 uint64_t mask, addr;
484 LowCore *lowcore;
485 MchkQueue *q;
486 int i;
488 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
489 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
492 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
493 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
496 q = &env->mchk_queue[env->mchk_index];
498 if (q->type != 1) {
499 /* Don't know how to handle this... */
500 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
502 if (!(env->cregs[14] & (1 << 28))) {
503 /* CRW machine checks disabled */
504 return;
507 lowcore = cpu_map_lowcore(env);
509 for (i = 0; i < 16; i++) {
510 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
511 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
512 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
513 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
515 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
516 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
517 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
518 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
519 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
520 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
521 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
523 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
524 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
525 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
526 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
527 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
528 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
530 cpu_unmap_lowcore(lowcore);
532 env->mchk_index--;
533 if (env->mchk_index == -1) {
534 env->pending_int &= ~INTERRUPT_MCHK;
537 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
538 env->psw.mask, env->psw.addr);
540 load_psw(env, mask, addr);
543 void s390_cpu_do_interrupt(CPUState *cs)
545 S390CPU *cpu = S390_CPU(cs);
546 CPUS390XState *env = &cpu->env;
548 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
549 __func__, cs->exception_index, env->psw.addr);
551 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
552 /* handle machine checks */
553 if ((env->psw.mask & PSW_MASK_MCHECK) &&
554 (cs->exception_index == -1)) {
555 if (env->pending_int & INTERRUPT_MCHK) {
556 cs->exception_index = EXCP_MCHK;
559 /* handle external interrupts */
560 if ((env->psw.mask & PSW_MASK_EXT) &&
561 cs->exception_index == -1) {
562 if (env->pending_int & INTERRUPT_EXT) {
563 /* code is already in env */
564 cs->exception_index = EXCP_EXT;
565 } else if (env->pending_int & INTERRUPT_TOD) {
566 cpu_inject_ext(cpu, 0x1004, 0, 0);
567 cs->exception_index = EXCP_EXT;
568 env->pending_int &= ~INTERRUPT_EXT;
569 env->pending_int &= ~INTERRUPT_TOD;
570 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
571 cpu_inject_ext(cpu, 0x1005, 0, 0);
572 cs->exception_index = EXCP_EXT;
573 env->pending_int &= ~INTERRUPT_EXT;
574 env->pending_int &= ~INTERRUPT_TOD;
577 /* handle I/O interrupts */
578 if ((env->psw.mask & PSW_MASK_IO) &&
579 (cs->exception_index == -1)) {
580 if (env->pending_int & INTERRUPT_IO) {
581 cs->exception_index = EXCP_IO;
585 switch (cs->exception_index) {
586 case EXCP_PGM:
587 do_program_interrupt(env);
588 break;
589 case EXCP_SVC:
590 do_svc_interrupt(env);
591 break;
592 case EXCP_EXT:
593 do_ext_interrupt(env);
594 break;
595 case EXCP_IO:
596 do_io_interrupt(env);
597 break;
598 case EXCP_MCHK:
599 do_mchk_interrupt(env);
600 break;
602 cs->exception_index = -1;
604 if (!env->pending_int) {
605 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
609 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
611 if (interrupt_request & CPU_INTERRUPT_HARD) {
612 S390CPU *cpu = S390_CPU(cs);
613 CPUS390XState *env = &cpu->env;
615 if (env->psw.mask & PSW_MASK_EXT) {
616 s390_cpu_do_interrupt(cs);
617 return true;
620 return false;
623 void s390_cpu_recompute_watchpoints(CPUState *cs)
625 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
626 S390CPU *cpu = S390_CPU(cs);
627 CPUS390XState *env = &cpu->env;
629 /* We are called when the watchpoints have changed. First
630 remove them all. */
631 cpu_watchpoint_remove_all(cs, BP_CPU);
633 /* Return if PER is not enabled */
634 if (!(env->psw.mask & PSW_MASK_PER)) {
635 return;
638 /* Return if storage-alteration event is not enabled. */
639 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
640 return;
643 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
644 /* We can't create a watchoint spanning the whole memory range, so
645 split it in two parts. */
646 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
647 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
648 } else if (env->cregs[10] > env->cregs[11]) {
649 /* The address range loops, create two watchpoints. */
650 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
651 wp_flags, NULL);
652 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
654 } else {
655 /* Default case, create a single watchpoint. */
656 cpu_watchpoint_insert(cs, env->cregs[10],
657 env->cregs[11] - env->cregs[10] + 1,
658 wp_flags, NULL);
662 void s390x_cpu_debug_excp_handler(CPUState *cs)
664 S390CPU *cpu = S390_CPU(cs);
665 CPUS390XState *env = &cpu->env;
666 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
668 if (wp_hit && wp_hit->flags & BP_CPU) {
669 /* FIXME: When the storage-alteration-space control bit is set,
670 the exception should only be triggered if the memory access
671 is done using an address space with the storage-alteration-event
672 bit set. We have no way to detect that with the current
673 watchpoint code. */
674 cs->watchpoint_hit = NULL;
676 env->per_address = env->psw.addr;
677 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
678 /* FIXME: We currently no way to detect the address space used
679 to trigger the watchpoint. For now just consider it is the
680 current default ASC. This turn to be true except when MVCP
681 and MVCS instrutions are not used. */
682 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
684 /* Remove all watchpoints to re-execute the code. A PER exception
685 will be triggered, it will call load_psw which will recompute
686 the watchpoints. */
687 cpu_watchpoint_remove_all(cs, BP_CPU);
688 cpu_resume_from_signal(cs, NULL);
691 #endif /* CONFIG_USER_ONLY */