vnc: ensure connection sharing/limits is always configured
[qemu/ar7.git] / target-s390x / helper.c
blob54a51773454ee750826ba2136c4d141753bc529d
1 /*
2 * S/390 helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu/sysemu.h"
31 #endif
33 //#define DEBUG_S390
34 //#define DEBUG_S390_STDOUT
36 #ifdef DEBUG_S390
37 #ifdef DEBUG_S390_STDOUT
38 #define DPRINTF(fmt, ...) \
39 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
40 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
41 #else
42 #define DPRINTF(fmt, ...) \
43 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
44 #endif
45 #else
46 #define DPRINTF(fmt, ...) \
47 do { } while (0)
48 #endif
51 #ifndef CONFIG_USER_ONLY
52 void s390x_tod_timer(void *opaque)
54 S390CPU *cpu = opaque;
55 CPUS390XState *env = &cpu->env;
57 env->pending_int |= INTERRUPT_TOD;
58 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
61 void s390x_cpu_timer(void *opaque)
63 S390CPU *cpu = opaque;
64 CPUS390XState *env = &cpu->env;
66 env->pending_int |= INTERRUPT_CPUTIMER;
67 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
69 #endif
71 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
73 return S390_CPU(object_new(TYPE_S390_CPU));
76 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
78 S390CPU *cpu;
79 Error *err = NULL;
81 cpu = cpu_s390x_create(cpu_model, &err);
82 if (err != NULL) {
83 goto out;
86 object_property_set_int(OBJECT(cpu), id, "id", &err);
87 if (err != NULL) {
88 goto out;
90 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
92 out:
93 if (err) {
94 error_propagate(errp, err);
95 object_unref(OBJECT(cpu));
96 cpu = NULL;
98 return cpu;
101 S390CPU *cpu_s390x_init(const char *cpu_model)
103 Error *err = NULL;
104 S390CPU *cpu;
105 /* Use to track CPU ID for linux-user only */
106 static int64_t next_cpu_id;
108 cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
109 if (err) {
110 error_report_err(err);
112 return cpu;
115 #if defined(CONFIG_USER_ONLY)
117 void s390_cpu_do_interrupt(CPUState *cs)
119 cs->exception_index = -1;
122 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
123 int rw, int mmu_idx)
125 S390CPU *cpu = S390_CPU(cs);
127 cs->exception_index = EXCP_PGM;
128 cpu->env.int_pgm_code = PGM_ADDRESSING;
129 /* On real machines this value is dropped into LowMem. Since this
130 is userland, simply put this someplace that cpu_loop can find it. */
131 cpu->env.__excp_addr = address;
132 return 1;
135 #else /* !CONFIG_USER_ONLY */
137 /* Ensure to exit the TB after this call! */
138 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
140 CPUState *cs = CPU(s390_env_get_cpu(env));
142 cs->exception_index = EXCP_PGM;
143 env->int_pgm_code = code;
144 env->int_pgm_ilen = ilen;
147 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
148 int rw, int mmu_idx)
150 S390CPU *cpu = S390_CPU(cs);
151 CPUS390XState *env = &cpu->env;
152 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
153 target_ulong vaddr, raddr;
154 int prot;
156 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
157 __func__, orig_vaddr, rw, mmu_idx);
159 orig_vaddr &= TARGET_PAGE_MASK;
160 vaddr = orig_vaddr;
162 /* 31-Bit mode */
163 if (!(env->psw.mask & PSW_MASK_64)) {
164 vaddr &= 0x7fffffff;
167 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
168 /* Translation ended in exception */
169 return 1;
172 /* check out of RAM access */
173 if (raddr > ram_size) {
174 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
175 (uint64_t)raddr, (uint64_t)ram_size);
176 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
177 return 1;
180 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
181 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
183 tlb_set_page(cs, orig_vaddr, raddr, prot,
184 mmu_idx, TARGET_PAGE_SIZE);
186 return 0;
189 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
191 S390CPU *cpu = S390_CPU(cs);
192 CPUS390XState *env = &cpu->env;
193 target_ulong raddr;
194 int prot;
195 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
197 /* 31-Bit mode */
198 if (!(env->psw.mask & PSW_MASK_64)) {
199 vaddr &= 0x7fffffff;
202 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
203 return -1;
205 return raddr;
208 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
210 hwaddr phys_addr;
211 target_ulong page;
213 page = vaddr & TARGET_PAGE_MASK;
214 phys_addr = cpu_get_phys_page_debug(cs, page);
215 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
217 return phys_addr;
220 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
222 uint64_t old_mask = env->psw.mask;
224 env->psw.addr = addr;
225 env->psw.mask = mask;
226 if (tcg_enabled()) {
227 env->cc_op = (mask >> 44) & 3;
230 if ((old_mask ^ mask) & PSW_MASK_PER) {
231 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
234 if (mask & PSW_MASK_WAIT) {
235 S390CPU *cpu = s390_env_get_cpu(env);
236 if (s390_cpu_halt(cpu) == 0) {
237 #ifndef CONFIG_USER_ONLY
238 qemu_system_shutdown_request();
239 #endif
244 static uint64_t get_psw_mask(CPUS390XState *env)
246 uint64_t r = env->psw.mask;
248 if (tcg_enabled()) {
249 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
250 env->cc_vr);
252 r &= ~PSW_MASK_CC;
253 assert(!(env->cc_op & ~3));
254 r |= (uint64_t)env->cc_op << 44;
257 return r;
260 static LowCore *cpu_map_lowcore(CPUS390XState *env)
262 S390CPU *cpu = s390_env_get_cpu(env);
263 LowCore *lowcore;
264 hwaddr len = sizeof(LowCore);
266 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
268 if (len < sizeof(LowCore)) {
269 cpu_abort(CPU(cpu), "Could not map lowcore\n");
272 return lowcore;
275 static void cpu_unmap_lowcore(LowCore *lowcore)
277 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
280 void do_restart_interrupt(CPUS390XState *env)
282 uint64_t mask, addr;
283 LowCore *lowcore;
285 lowcore = cpu_map_lowcore(env);
287 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
288 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
289 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
290 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
292 cpu_unmap_lowcore(lowcore);
294 load_psw(env, mask, addr);
297 static void do_program_interrupt(CPUS390XState *env)
299 uint64_t mask, addr;
300 LowCore *lowcore;
301 int ilen = env->int_pgm_ilen;
303 switch (ilen) {
304 case ILEN_LATER:
305 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
306 break;
307 case ILEN_LATER_INC:
308 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
309 env->psw.addr += ilen;
310 break;
311 default:
312 assert(ilen == 2 || ilen == 4 || ilen == 6);
315 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
316 __func__, env->int_pgm_code, ilen);
318 lowcore = cpu_map_lowcore(env);
320 /* Signal PER events with the exception. */
321 if (env->per_perc_atmid) {
322 env->int_pgm_code |= PGM_PER;
323 lowcore->per_address = cpu_to_be64(env->per_address);
324 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
325 env->per_perc_atmid = 0;
328 lowcore->pgm_ilen = cpu_to_be16(ilen);
329 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
330 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
331 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
332 mask = be64_to_cpu(lowcore->program_new_psw.mask);
333 addr = be64_to_cpu(lowcore->program_new_psw.addr);
334 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
336 cpu_unmap_lowcore(lowcore);
338 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
339 env->int_pgm_code, ilen, env->psw.mask,
340 env->psw.addr);
342 load_psw(env, mask, addr);
345 static void do_svc_interrupt(CPUS390XState *env)
347 uint64_t mask, addr;
348 LowCore *lowcore;
350 lowcore = cpu_map_lowcore(env);
352 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
353 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
354 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
355 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
356 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
357 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
359 cpu_unmap_lowcore(lowcore);
361 load_psw(env, mask, addr);
363 /* When a PER event is pending, the PER exception has to happen
364 immediately after the SERVICE CALL one. */
365 if (env->per_perc_atmid) {
366 env->int_pgm_code = PGM_PER;
367 env->int_pgm_ilen = env->int_svc_ilen;
368 do_program_interrupt(env);
372 #define VIRTIO_SUBCODE_64 0x0D00
374 static void do_ext_interrupt(CPUS390XState *env)
376 S390CPU *cpu = s390_env_get_cpu(env);
377 uint64_t mask, addr;
378 LowCore *lowcore;
379 ExtQueue *q;
381 if (!(env->psw.mask & PSW_MASK_EXT)) {
382 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
385 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
386 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
389 q = &env->ext_queue[env->ext_index];
390 lowcore = cpu_map_lowcore(env);
392 lowcore->ext_int_code = cpu_to_be16(q->code);
393 lowcore->ext_params = cpu_to_be32(q->param);
394 lowcore->ext_params2 = cpu_to_be64(q->param64);
395 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
396 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
397 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
398 mask = be64_to_cpu(lowcore->external_new_psw.mask);
399 addr = be64_to_cpu(lowcore->external_new_psw.addr);
401 cpu_unmap_lowcore(lowcore);
403 env->ext_index--;
404 if (env->ext_index == -1) {
405 env->pending_int &= ~INTERRUPT_EXT;
408 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
409 env->psw.mask, env->psw.addr);
411 load_psw(env, mask, addr);
414 static void do_io_interrupt(CPUS390XState *env)
416 S390CPU *cpu = s390_env_get_cpu(env);
417 LowCore *lowcore;
418 IOIntQueue *q;
419 uint8_t isc;
420 int disable = 1;
421 int found = 0;
423 if (!(env->psw.mask & PSW_MASK_IO)) {
424 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
427 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
428 uint64_t isc_bits;
430 if (env->io_index[isc] < 0) {
431 continue;
433 if (env->io_index[isc] >= MAX_IO_QUEUE) {
434 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
435 isc, env->io_index[isc]);
438 q = &env->io_queue[env->io_index[isc]][isc];
439 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
440 if (!(env->cregs[6] & isc_bits)) {
441 disable = 0;
442 continue;
444 if (!found) {
445 uint64_t mask, addr;
447 found = 1;
448 lowcore = cpu_map_lowcore(env);
450 lowcore->subchannel_id = cpu_to_be16(q->id);
451 lowcore->subchannel_nr = cpu_to_be16(q->nr);
452 lowcore->io_int_parm = cpu_to_be32(q->parm);
453 lowcore->io_int_word = cpu_to_be32(q->word);
454 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
455 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
456 mask = be64_to_cpu(lowcore->io_new_psw.mask);
457 addr = be64_to_cpu(lowcore->io_new_psw.addr);
459 cpu_unmap_lowcore(lowcore);
461 env->io_index[isc]--;
463 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
464 env->psw.mask, env->psw.addr);
465 load_psw(env, mask, addr);
467 if (env->io_index[isc] >= 0) {
468 disable = 0;
470 continue;
473 if (disable) {
474 env->pending_int &= ~INTERRUPT_IO;
479 static void do_mchk_interrupt(CPUS390XState *env)
481 S390CPU *cpu = s390_env_get_cpu(env);
482 uint64_t mask, addr;
483 LowCore *lowcore;
484 MchkQueue *q;
485 int i;
487 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
488 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
491 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
492 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
495 q = &env->mchk_queue[env->mchk_index];
497 if (q->type != 1) {
498 /* Don't know how to handle this... */
499 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
501 if (!(env->cregs[14] & (1 << 28))) {
502 /* CRW machine checks disabled */
503 return;
506 lowcore = cpu_map_lowcore(env);
508 for (i = 0; i < 16; i++) {
509 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
510 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
511 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
512 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
514 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
515 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
516 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
517 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
518 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
519 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
520 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
522 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
523 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
524 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
525 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
526 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
527 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
529 cpu_unmap_lowcore(lowcore);
531 env->mchk_index--;
532 if (env->mchk_index == -1) {
533 env->pending_int &= ~INTERRUPT_MCHK;
536 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
537 env->psw.mask, env->psw.addr);
539 load_psw(env, mask, addr);
542 void s390_cpu_do_interrupt(CPUState *cs)
544 S390CPU *cpu = S390_CPU(cs);
545 CPUS390XState *env = &cpu->env;
547 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
548 __func__, cs->exception_index, env->psw.addr);
550 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
551 /* handle machine checks */
552 if ((env->psw.mask & PSW_MASK_MCHECK) &&
553 (cs->exception_index == -1)) {
554 if (env->pending_int & INTERRUPT_MCHK) {
555 cs->exception_index = EXCP_MCHK;
558 /* handle external interrupts */
559 if ((env->psw.mask & PSW_MASK_EXT) &&
560 cs->exception_index == -1) {
561 if (env->pending_int & INTERRUPT_EXT) {
562 /* code is already in env */
563 cs->exception_index = EXCP_EXT;
564 } else if (env->pending_int & INTERRUPT_TOD) {
565 cpu_inject_ext(cpu, 0x1004, 0, 0);
566 cs->exception_index = EXCP_EXT;
567 env->pending_int &= ~INTERRUPT_EXT;
568 env->pending_int &= ~INTERRUPT_TOD;
569 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
570 cpu_inject_ext(cpu, 0x1005, 0, 0);
571 cs->exception_index = EXCP_EXT;
572 env->pending_int &= ~INTERRUPT_EXT;
573 env->pending_int &= ~INTERRUPT_TOD;
576 /* handle I/O interrupts */
577 if ((env->psw.mask & PSW_MASK_IO) &&
578 (cs->exception_index == -1)) {
579 if (env->pending_int & INTERRUPT_IO) {
580 cs->exception_index = EXCP_IO;
584 switch (cs->exception_index) {
585 case EXCP_PGM:
586 do_program_interrupt(env);
587 break;
588 case EXCP_SVC:
589 do_svc_interrupt(env);
590 break;
591 case EXCP_EXT:
592 do_ext_interrupt(env);
593 break;
594 case EXCP_IO:
595 do_io_interrupt(env);
596 break;
597 case EXCP_MCHK:
598 do_mchk_interrupt(env);
599 break;
601 cs->exception_index = -1;
603 if (!env->pending_int) {
604 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
608 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
610 if (interrupt_request & CPU_INTERRUPT_HARD) {
611 S390CPU *cpu = S390_CPU(cs);
612 CPUS390XState *env = &cpu->env;
614 if (env->psw.mask & PSW_MASK_EXT) {
615 s390_cpu_do_interrupt(cs);
616 return true;
619 return false;
622 void s390_cpu_recompute_watchpoints(CPUState *cs)
624 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
625 S390CPU *cpu = S390_CPU(cs);
626 CPUS390XState *env = &cpu->env;
628 /* We are called when the watchpoints have changed. First
629 remove them all. */
630 cpu_watchpoint_remove_all(cs, BP_CPU);
632 /* Return if PER is not enabled */
633 if (!(env->psw.mask & PSW_MASK_PER)) {
634 return;
637 /* Return if storage-alteration event is not enabled. */
638 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
639 return;
642 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
643 /* We can't create a watchoint spanning the whole memory range, so
644 split it in two parts. */
645 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
646 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
647 } else if (env->cregs[10] > env->cregs[11]) {
648 /* The address range loops, create two watchpoints. */
649 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
650 wp_flags, NULL);
651 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
653 } else {
654 /* Default case, create a single watchpoint. */
655 cpu_watchpoint_insert(cs, env->cregs[10],
656 env->cregs[11] - env->cregs[10] + 1,
657 wp_flags, NULL);
661 void s390x_cpu_debug_excp_handler(CPUState *cs)
663 S390CPU *cpu = S390_CPU(cs);
664 CPUS390XState *env = &cpu->env;
665 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
667 if (wp_hit && wp_hit->flags & BP_CPU) {
668 /* FIXME: When the storage-alteration-space control bit is set,
669 the exception should only be triggered if the memory access
670 is done using an address space with the storage-alteration-event
671 bit set. We have no way to detect that with the current
672 watchpoint code. */
673 cs->watchpoint_hit = NULL;
675 env->per_address = env->psw.addr;
676 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
677 /* FIXME: We currently no way to detect the address space used
678 to trigger the watchpoint. For now just consider it is the
679 current default ASC. This turn to be true except when MVCP
680 and MVCS instrutions are not used. */
681 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
683 /* Remove all watchpoints to re-execute the code. A PER exception
684 will be triggered, it will call load_psw which will recompute
685 the watchpoints. */
686 cpu_watchpoint_remove_all(cs, BP_CPU);
687 cpu_loop_exit_noexc(cs);
690 #endif /* CONFIG_USER_ONLY */