pc-dimm: correct comment of MemoryHotplugState
[qemu/ar7.git] / target-s390x / helper.c
blobad8f7978ea31002e3548aa972f73e50a9f8aa304
1 /*
2 * S/390 helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "cpu.h"
24 #include "exec/gdbstub.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu/sysemu.h"
31 #endif
33 //#define DEBUG_S390
34 //#define DEBUG_S390_STDOUT
36 #ifdef DEBUG_S390
37 #ifdef DEBUG_S390_STDOUT
38 #define DPRINTF(fmt, ...) \
39 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
40 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
41 #else
42 #define DPRINTF(fmt, ...) \
43 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
44 #endif
45 #else
46 #define DPRINTF(fmt, ...) \
47 do { } while (0)
48 #endif
51 #ifndef CONFIG_USER_ONLY
52 void s390x_tod_timer(void *opaque)
54 S390CPU *cpu = opaque;
55 CPUS390XState *env = &cpu->env;
57 env->pending_int |= INTERRUPT_TOD;
58 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
61 void s390x_cpu_timer(void *opaque)
63 S390CPU *cpu = opaque;
64 CPUS390XState *env = &cpu->env;
66 env->pending_int |= INTERRUPT_CPUTIMER;
67 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
69 #endif
71 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
73 S390CPU *cpu;
75 cpu = S390_CPU(object_new(TYPE_S390_CPU));
77 return cpu;
80 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
82 S390CPU *cpu;
83 Error *err = NULL;
85 cpu = cpu_s390x_create(cpu_model, &err);
86 if (err != NULL) {
87 goto out;
90 object_property_set_int(OBJECT(cpu), id, "id", &err);
91 if (err != NULL) {
92 goto out;
94 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
96 out:
97 if (err) {
98 error_propagate(errp, err);
99 object_unref(OBJECT(cpu));
100 cpu = NULL;
102 return cpu;
105 S390CPU *cpu_s390x_init(const char *cpu_model)
107 Error *err = NULL;
108 S390CPU *cpu;
109 /* Use to track CPU ID for linux-user only */
110 static int64_t next_cpu_id;
112 cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
113 if (err) {
114 error_report_err(err);
116 return cpu;
119 #if defined(CONFIG_USER_ONLY)
121 void s390_cpu_do_interrupt(CPUState *cs)
123 cs->exception_index = -1;
126 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
127 int rw, int mmu_idx)
129 S390CPU *cpu = S390_CPU(cs);
131 cs->exception_index = EXCP_PGM;
132 cpu->env.int_pgm_code = PGM_ADDRESSING;
133 /* On real machines this value is dropped into LowMem. Since this
134 is userland, simply put this someplace that cpu_loop can find it. */
135 cpu->env.__excp_addr = address;
136 return 1;
139 #else /* !CONFIG_USER_ONLY */
141 /* Ensure to exit the TB after this call! */
142 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
144 CPUState *cs = CPU(s390_env_get_cpu(env));
146 cs->exception_index = EXCP_PGM;
147 env->int_pgm_code = code;
148 env->int_pgm_ilen = ilen;
151 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
152 int rw, int mmu_idx)
154 S390CPU *cpu = S390_CPU(cs);
155 CPUS390XState *env = &cpu->env;
156 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
157 target_ulong vaddr, raddr;
158 int prot;
160 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
161 __func__, orig_vaddr, rw, mmu_idx);
163 orig_vaddr &= TARGET_PAGE_MASK;
164 vaddr = orig_vaddr;
166 /* 31-Bit mode */
167 if (!(env->psw.mask & PSW_MASK_64)) {
168 vaddr &= 0x7fffffff;
171 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
172 /* Translation ended in exception */
173 return 1;
176 /* check out of RAM access */
177 if (raddr > ram_size) {
178 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
179 (uint64_t)raddr, (uint64_t)ram_size);
180 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
181 return 1;
184 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
185 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
187 tlb_set_page(cs, orig_vaddr, raddr, prot,
188 mmu_idx, TARGET_PAGE_SIZE);
190 return 0;
193 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
195 S390CPU *cpu = S390_CPU(cs);
196 CPUS390XState *env = &cpu->env;
197 target_ulong raddr;
198 int prot;
199 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
201 /* 31-Bit mode */
202 if (!(env->psw.mask & PSW_MASK_64)) {
203 vaddr &= 0x7fffffff;
206 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
207 return -1;
209 return raddr;
212 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
214 hwaddr phys_addr;
215 target_ulong page;
217 page = vaddr & TARGET_PAGE_MASK;
218 phys_addr = cpu_get_phys_page_debug(cs, page);
219 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
221 return phys_addr;
224 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
226 uint64_t old_mask = env->psw.mask;
228 env->psw.addr = addr;
229 env->psw.mask = mask;
230 if (tcg_enabled()) {
231 env->cc_op = (mask >> 44) & 3;
234 if ((old_mask ^ mask) & PSW_MASK_PER) {
235 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
238 if (mask & PSW_MASK_WAIT) {
239 S390CPU *cpu = s390_env_get_cpu(env);
240 if (s390_cpu_halt(cpu) == 0) {
241 #ifndef CONFIG_USER_ONLY
242 qemu_system_shutdown_request();
243 #endif
248 static uint64_t get_psw_mask(CPUS390XState *env)
250 uint64_t r = env->psw.mask;
252 if (tcg_enabled()) {
253 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
254 env->cc_vr);
256 r &= ~PSW_MASK_CC;
257 assert(!(env->cc_op & ~3));
258 r |= (uint64_t)env->cc_op << 44;
261 return r;
264 static LowCore *cpu_map_lowcore(CPUS390XState *env)
266 S390CPU *cpu = s390_env_get_cpu(env);
267 LowCore *lowcore;
268 hwaddr len = sizeof(LowCore);
270 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
272 if (len < sizeof(LowCore)) {
273 cpu_abort(CPU(cpu), "Could not map lowcore\n");
276 return lowcore;
279 static void cpu_unmap_lowcore(LowCore *lowcore)
281 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
284 void do_restart_interrupt(CPUS390XState *env)
286 uint64_t mask, addr;
287 LowCore *lowcore;
289 lowcore = cpu_map_lowcore(env);
291 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
292 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
293 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
294 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
296 cpu_unmap_lowcore(lowcore);
298 load_psw(env, mask, addr);
301 static void do_program_interrupt(CPUS390XState *env)
303 uint64_t mask, addr;
304 LowCore *lowcore;
305 int ilen = env->int_pgm_ilen;
307 switch (ilen) {
308 case ILEN_LATER:
309 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
310 break;
311 case ILEN_LATER_INC:
312 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
313 env->psw.addr += ilen;
314 break;
315 default:
316 assert(ilen == 2 || ilen == 4 || ilen == 6);
319 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
320 __func__, env->int_pgm_code, ilen);
322 lowcore = cpu_map_lowcore(env);
324 /* Signal PER events with the exception. */
325 if (env->per_perc_atmid) {
326 env->int_pgm_code |= PGM_PER;
327 lowcore->per_address = cpu_to_be64(env->per_address);
328 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
329 env->per_perc_atmid = 0;
332 lowcore->pgm_ilen = cpu_to_be16(ilen);
333 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
334 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
335 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
336 mask = be64_to_cpu(lowcore->program_new_psw.mask);
337 addr = be64_to_cpu(lowcore->program_new_psw.addr);
338 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
340 cpu_unmap_lowcore(lowcore);
342 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
343 env->int_pgm_code, ilen, env->psw.mask,
344 env->psw.addr);
346 load_psw(env, mask, addr);
349 static void do_svc_interrupt(CPUS390XState *env)
351 uint64_t mask, addr;
352 LowCore *lowcore;
354 lowcore = cpu_map_lowcore(env);
356 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
357 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
358 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
359 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
360 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
361 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
363 cpu_unmap_lowcore(lowcore);
365 load_psw(env, mask, addr);
367 /* When a PER event is pending, the PER exception has to happen
368 immediately after the SERVICE CALL one. */
369 if (env->per_perc_atmid) {
370 env->int_pgm_code = PGM_PER;
371 env->int_pgm_ilen = env->int_svc_ilen;
372 do_program_interrupt(env);
376 #define VIRTIO_SUBCODE_64 0x0D00
378 static void do_ext_interrupt(CPUS390XState *env)
380 S390CPU *cpu = s390_env_get_cpu(env);
381 uint64_t mask, addr;
382 LowCore *lowcore;
383 ExtQueue *q;
385 if (!(env->psw.mask & PSW_MASK_EXT)) {
386 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
389 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
390 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
393 q = &env->ext_queue[env->ext_index];
394 lowcore = cpu_map_lowcore(env);
396 lowcore->ext_int_code = cpu_to_be16(q->code);
397 lowcore->ext_params = cpu_to_be32(q->param);
398 lowcore->ext_params2 = cpu_to_be64(q->param64);
399 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
400 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
401 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
402 mask = be64_to_cpu(lowcore->external_new_psw.mask);
403 addr = be64_to_cpu(lowcore->external_new_psw.addr);
405 cpu_unmap_lowcore(lowcore);
407 env->ext_index--;
408 if (env->ext_index == -1) {
409 env->pending_int &= ~INTERRUPT_EXT;
412 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
413 env->psw.mask, env->psw.addr);
415 load_psw(env, mask, addr);
418 static void do_io_interrupt(CPUS390XState *env)
420 S390CPU *cpu = s390_env_get_cpu(env);
421 LowCore *lowcore;
422 IOIntQueue *q;
423 uint8_t isc;
424 int disable = 1;
425 int found = 0;
427 if (!(env->psw.mask & PSW_MASK_IO)) {
428 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
431 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
432 uint64_t isc_bits;
434 if (env->io_index[isc] < 0) {
435 continue;
437 if (env->io_index[isc] >= MAX_IO_QUEUE) {
438 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
439 isc, env->io_index[isc]);
442 q = &env->io_queue[env->io_index[isc]][isc];
443 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
444 if (!(env->cregs[6] & isc_bits)) {
445 disable = 0;
446 continue;
448 if (!found) {
449 uint64_t mask, addr;
451 found = 1;
452 lowcore = cpu_map_lowcore(env);
454 lowcore->subchannel_id = cpu_to_be16(q->id);
455 lowcore->subchannel_nr = cpu_to_be16(q->nr);
456 lowcore->io_int_parm = cpu_to_be32(q->parm);
457 lowcore->io_int_word = cpu_to_be32(q->word);
458 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
459 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
460 mask = be64_to_cpu(lowcore->io_new_psw.mask);
461 addr = be64_to_cpu(lowcore->io_new_psw.addr);
463 cpu_unmap_lowcore(lowcore);
465 env->io_index[isc]--;
467 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
468 env->psw.mask, env->psw.addr);
469 load_psw(env, mask, addr);
471 if (env->io_index[isc] >= 0) {
472 disable = 0;
474 continue;
477 if (disable) {
478 env->pending_int &= ~INTERRUPT_IO;
483 static void do_mchk_interrupt(CPUS390XState *env)
485 S390CPU *cpu = s390_env_get_cpu(env);
486 uint64_t mask, addr;
487 LowCore *lowcore;
488 MchkQueue *q;
489 int i;
491 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
492 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
495 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
496 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
499 q = &env->mchk_queue[env->mchk_index];
501 if (q->type != 1) {
502 /* Don't know how to handle this... */
503 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
505 if (!(env->cregs[14] & (1 << 28))) {
506 /* CRW machine checks disabled */
507 return;
510 lowcore = cpu_map_lowcore(env);
512 for (i = 0; i < 16; i++) {
513 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
514 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
515 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
516 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
518 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
519 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
520 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
521 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
522 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
523 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
524 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
526 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
527 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
528 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
529 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
530 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
531 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
533 cpu_unmap_lowcore(lowcore);
535 env->mchk_index--;
536 if (env->mchk_index == -1) {
537 env->pending_int &= ~INTERRUPT_MCHK;
540 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
541 env->psw.mask, env->psw.addr);
543 load_psw(env, mask, addr);
546 void s390_cpu_do_interrupt(CPUState *cs)
548 S390CPU *cpu = S390_CPU(cs);
549 CPUS390XState *env = &cpu->env;
551 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
552 __func__, cs->exception_index, env->psw.addr);
554 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
555 /* handle machine checks */
556 if ((env->psw.mask & PSW_MASK_MCHECK) &&
557 (cs->exception_index == -1)) {
558 if (env->pending_int & INTERRUPT_MCHK) {
559 cs->exception_index = EXCP_MCHK;
562 /* handle external interrupts */
563 if ((env->psw.mask & PSW_MASK_EXT) &&
564 cs->exception_index == -1) {
565 if (env->pending_int & INTERRUPT_EXT) {
566 /* code is already in env */
567 cs->exception_index = EXCP_EXT;
568 } else if (env->pending_int & INTERRUPT_TOD) {
569 cpu_inject_ext(cpu, 0x1004, 0, 0);
570 cs->exception_index = EXCP_EXT;
571 env->pending_int &= ~INTERRUPT_EXT;
572 env->pending_int &= ~INTERRUPT_TOD;
573 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
574 cpu_inject_ext(cpu, 0x1005, 0, 0);
575 cs->exception_index = EXCP_EXT;
576 env->pending_int &= ~INTERRUPT_EXT;
577 env->pending_int &= ~INTERRUPT_TOD;
580 /* handle I/O interrupts */
581 if ((env->psw.mask & PSW_MASK_IO) &&
582 (cs->exception_index == -1)) {
583 if (env->pending_int & INTERRUPT_IO) {
584 cs->exception_index = EXCP_IO;
588 switch (cs->exception_index) {
589 case EXCP_PGM:
590 do_program_interrupt(env);
591 break;
592 case EXCP_SVC:
593 do_svc_interrupt(env);
594 break;
595 case EXCP_EXT:
596 do_ext_interrupt(env);
597 break;
598 case EXCP_IO:
599 do_io_interrupt(env);
600 break;
601 case EXCP_MCHK:
602 do_mchk_interrupt(env);
603 break;
605 cs->exception_index = -1;
607 if (!env->pending_int) {
608 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
612 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
614 if (interrupt_request & CPU_INTERRUPT_HARD) {
615 S390CPU *cpu = S390_CPU(cs);
616 CPUS390XState *env = &cpu->env;
618 if (env->psw.mask & PSW_MASK_EXT) {
619 s390_cpu_do_interrupt(cs);
620 return true;
623 return false;
626 void s390_cpu_recompute_watchpoints(CPUState *cs)
628 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
629 S390CPU *cpu = S390_CPU(cs);
630 CPUS390XState *env = &cpu->env;
632 /* We are called when the watchpoints have changed. First
633 remove them all. */
634 cpu_watchpoint_remove_all(cs, BP_CPU);
636 /* Return if PER is not enabled */
637 if (!(env->psw.mask & PSW_MASK_PER)) {
638 return;
641 /* Return if storage-alteration event is not enabled. */
642 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
643 return;
646 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
647 /* We can't create a watchoint spanning the whole memory range, so
648 split it in two parts. */
649 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
650 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
651 } else if (env->cregs[10] > env->cregs[11]) {
652 /* The address range loops, create two watchpoints. */
653 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
654 wp_flags, NULL);
655 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
657 } else {
658 /* Default case, create a single watchpoint. */
659 cpu_watchpoint_insert(cs, env->cregs[10],
660 env->cregs[11] - env->cregs[10] + 1,
661 wp_flags, NULL);
665 void s390x_cpu_debug_excp_handler(CPUState *cs)
667 S390CPU *cpu = S390_CPU(cs);
668 CPUS390XState *env = &cpu->env;
669 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
671 if (wp_hit && wp_hit->flags & BP_CPU) {
672 /* FIXME: When the storage-alteration-space control bit is set,
673 the exception should only be triggered if the memory access
674 is done using an address space with the storage-alteration-event
675 bit set. We have no way to detect that with the current
676 watchpoint code. */
677 cs->watchpoint_hit = NULL;
679 env->per_address = env->psw.addr;
680 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
681 /* FIXME: We currently no way to detect the address space used
682 to trigger the watchpoint. For now just consider it is the
683 current default ASC. This turn to be true except when MVCP
684 and MVCS instrutions are not used. */
685 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
687 /* Remove all watchpoints to re-execute the code. A PER exception
688 will be triggered, it will call load_psw which will recompute
689 the watchpoints. */
690 cpu_watchpoint_remove_all(cs, BP_CPU);
691 cpu_resume_from_signal(cs, NULL);
694 #endif /* CONFIG_USER_ONLY */