3 This code was written as part of the CMU Common Lisp project at
4 Carnegie Mellon University, and has been placed in the public domain.
19 #include "interrupt.h"
21 #include "breakpoint.h"
23 #include "genesis/constants.h"
25 #define INSN_LEN sizeof(unsigned int)
34 arch_get_bad_addr(int signam
, siginfo_t
*siginfo
, os_context_t
*context
)
36 /* Classic CMUCL comment:
38 Finding the bad address on the mips is easy. */
39 return (os_vm_address_t
)siginfo
->si_addr
;
42 static inline unsigned int
43 os_context_register(os_context_t
*context
, int offset
)
45 return (unsigned int)(*os_context_register_addr(context
, offset
));
48 static inline unsigned int
49 os_context_pc(os_context_t
*context
)
51 return (unsigned int)(*os_context_pc_addr(context
));
54 static inline unsigned int
55 os_context_insn(os_context_t
*context
)
57 if (os_context_bd_cause(context
))
58 return *(unsigned int *)(os_context_pc(context
) + INSN_LEN
);
60 return *(unsigned int *)(os_context_pc(context
));
64 arch_insn_with_bdelay_p(unsigned int insn
)
68 switch (insn
& 0x3f) {
75 /* branches and immediate jumps */
77 switch ((insn
>> 16) & 0x1f) {
99 switch ((insn
>> 21) & 0x1f) {
100 /* CP0/CP1/CP2 branches */
105 /* branch likely (MIPS II) */
115 /* Find the next instruction in the control flow. For a instruction
116 with branch delay slot, this is the branch/jump target if the branch
117 is taken, and PC + 8 if it is not taken. For other instructions it
120 next_insn_addr(os_context_t
*context
, unsigned int inst
)
122 unsigned int opcode
= inst
>> 26;
123 unsigned int r1
= (inst
>> 21) & 0x1f;
124 unsigned int r2
= (inst
>> 16) & 0x1f;
125 unsigned int r3
= (inst
>> 11) & 0x1f;
126 unsigned int disp
= ((inst
&(1<<15)) ? inst
| (-1 << 16) : inst
&0x7fff) << 2;
127 unsigned int jtgt
= (os_context_pc(context
) & ~0x0fffffff) | (inst
&0x3ffffff) << 2;
128 unsigned int tgt
= os_context_pc(context
);
131 case 0x0: /* jr, jalr */
132 switch(inst
& 0x3f) {
134 tgt
= os_context_register(context
, r1
);
136 case 0x09: /* jalr */
137 tgt
= os_context_register(context
, r1
);
138 *os_context_register_addr(context
, r3
)
139 = os_context_pc(context
) + INSN_LEN
;
146 case 0x1: /* bltz, bgez, bltzal, bgezal, ... */
148 case 0x00: /* bltz */
149 case 0x02: /* bltzl */
150 if(os_context_register(context
, r1
) < 0)
155 case 0x01: /* bgez */
156 case 0x03: /* bgezl */
157 if(os_context_register(context
, r1
) >= 0)
162 case 0x10: /* bltzal */
163 case 0x12: /* bltzall */
164 if(os_context_register(context
, r1
) < 0) {
166 *os_context_register_addr(context
, 31)
167 = os_context_pc(context
) + INSN_LEN
;
171 case 0x11: /* bgezal */
172 case 0x13: /* bgezall */
173 if(os_context_register(context
, r1
) >= 0) {
175 *os_context_register_addr(context
, 31)
176 = os_context_pc(context
) + INSN_LEN
;
190 *os_context_register_addr(context
, 31)
191 = os_context_pc(context
) + INSN_LEN
;
194 case 0x14: /* beql */
195 if(os_context_register(context
, r1
)
196 == os_context_register(context
, r2
))
202 case 0x15: /* bnel */
203 if(os_context_register(context
, r1
)
204 != os_context_register(context
, r2
))
210 case 0x16: /* blezl */
211 if(os_context_register(context
, r1
)
212 <= os_context_register(context
, r2
))
218 case 0x17: /* bgtzl */
219 if(os_context_register(context
, r1
)
220 > os_context_register(context
, r2
))
229 /* CP0/CP1/CP2 branches */
244 arch_skip_instruction(os_context_t
*context
)
246 /* Skip the offending instruction. Don't use os_context_insn here,
247 since in case of a branch we want the branch insn, not the delay
249 *os_context_pc_addr(context
)
250 = (os_context_register_t
)
251 next_insn_addr(context
,
252 *(unsigned int *)(os_context_pc(context
)));
256 arch_internal_error_arguments(os_context_t
*context
)
258 if (os_context_bd_cause(context
))
259 return (unsigned char *)(os_context_pc(context
) + (INSN_LEN
* 2));
261 return (unsigned char *)(os_context_pc(context
) + INSN_LEN
);
265 arch_pseudo_atomic_atomic(os_context_t
*context
)
267 return os_context_register(context
, reg_ALLOC
) & 1;
271 arch_set_pseudo_atomic_interrupted(os_context_t
*context
)
273 *os_context_register_addr(context
, reg_NL4
) |= -1LL<<31;
277 arch_clear_pseudo_atomic_interrupted(os_context_t
*context
)
279 *os_context_register_addr(context
, reg_NL4
) &= ~(-1LL<<31);
283 arch_install_breakpoint(void *pc
)
285 unsigned int *ptr
= (unsigned int *)pc
;
288 /* Don't install over a branch/jump with delay slot. */
289 if (arch_insn_with_bdelay_p(*ptr
))
293 *ptr
= (trap_Breakpoint
<< 6) | 0xd;
294 os_flush_icache((os_vm_address_t
)ptr
, INSN_LEN
);
299 static inline unsigned int
300 arch_install_after_breakpoint(void *pc
)
302 unsigned int *ptr
= (unsigned int *)pc
;
305 /* Don't install over a branch/jump with delay slot. */
306 if (arch_insn_with_bdelay_p(*ptr
))
310 *ptr
= (trap_AfterBreakpoint
<< 6) | 0xd;
311 os_flush_icache((os_vm_address_t
)ptr
, INSN_LEN
);
317 arch_remove_breakpoint(void *pc
, unsigned int orig_inst
)
319 unsigned int *ptr
= (unsigned int *)pc
;
321 /* We may remove from a branch delay slot. */
322 if (arch_insn_with_bdelay_p(*ptr
))
326 os_flush_icache((os_vm_address_t
)ptr
, INSN_LEN
);
329 /* Perform the instruction that we overwrote with a breakpoint. As we
330 don't have a single-step facility, this means we have to:
331 - put the instruction back
332 - put a second breakpoint at the following instruction,
333 set after_breakpoint and continue execution.
335 When the second breakpoint is hit (very shortly thereafter, we hope)
336 sigtrap_handler gets called again, but follows the AfterBreakpoint
338 - puts a bpt back in the first breakpoint place (running across a
339 breakpoint shouldn't cause it to be uninstalled)
340 - replaces the second bpt with the instruction it was meant to be
345 static unsigned int *skipped_break_addr
, displaced_after_inst
;
346 static sigset_t orig_sigmask
;
349 arch_do_displaced_inst(os_context_t
*context
, unsigned int orig_inst
)
351 unsigned int *pc
= (unsigned int *)os_context_pc(context
);
352 unsigned int *next_pc
;
354 orig_sigmask
= *os_context_sigmask_addr(context
);
355 sigaddset_blockable(os_context_sigmask_addr(context
));
357 /* Put the original instruction back. */
358 arch_remove_breakpoint(pc
, orig_inst
);
359 skipped_break_addr
= pc
;
361 /* Figure out where it goes. */
362 next_pc
= (unsigned int *)next_insn_addr(context
, *pc
);
363 displaced_after_inst
= arch_install_after_breakpoint(next_pc
);
367 arch_handle_breakpoint(os_context_t
*context
)
369 handle_breakpoint(context
);
373 arch_handle_fun_end_breakpoint(os_context_t
*context
)
375 *os_context_pc_addr(context
)
376 = (os_context_register_t
)(unsigned int)
377 handle_fun_end_breakpoint(context
);
381 arch_handle_after_breakpoint(os_context_t
*context
)
383 arch_install_breakpoint(skipped_break_addr
);
384 arch_remove_breakpoint((unsigned int *)os_context_pc(context
),
385 displaced_after_inst
);
386 *os_context_sigmask_addr(context
) = orig_sigmask
;
390 arch_handle_single_step_trap(os_context_t
*context
, int trap
)
392 unsigned int code
= *((u32
*)(os_context_pc(context
)));
393 int register_offset
= code
>> 11 & 0x1f;
394 handle_single_step_trap(context
, trap
, register_offset
);
395 arch_skip_instruction(context
);
399 sigtrap_handler(int signal
, siginfo_t
*info
, os_context_t
*context
)
401 unsigned int code
= (os_context_insn(context
) >> 6) & 0x1f;
402 if (code
== trap_PendingInterrupt
) {
403 /* KLUDGE: is this neccessary or will handle_trap do the same? */
404 arch_clear_pseudo_atomic_interrupted(context
);
406 handle_trap(context
, code
);
410 sigfpe_handler(int signal
, siginfo_t
*info
, os_context_t
*context
)
412 interrupt_handle_now(signal
, info
, context
);
416 arch_get_fp_control(void)
418 register unsigned int ret
asm("$2");
420 __asm__
__volatile__ ("cfc1 %0, $31" : "=r" (ret
));
426 arch_set_fp_control(unsigned int fp
)
428 __asm__
__volatile__ ("ctc1 %0, $31" :: "r" (fp
));
432 arch_install_interrupt_handlers(void)
434 undoably_install_low_level_interrupt_handler(SIGTRAP
,sigtrap_handler
);
437 #ifdef LISP_FEATURE_LINKAGE_TABLE
439 /* Linkage tables for MIPS
441 Linkage entry size is 16, because we need 4 instructions to implement
442 a jump. The entry size constant is defined in parms.lisp.
444 Define the register to use in the linkage jump table. For MIPS this
445 has to be the PIC call register $25 aka t9 aka reg_ALLOC. */
446 #define LINKAGE_TEMP_REG reg_ALLOC
448 /* Insert the necessary jump instructions at the given address. */
450 arch_write_linkage_table_jmp(char *reloc_addr
, void *target_addr
)
452 /* Make JMP to function entry. The instruction sequence is:
453 lui $25, 0, %hi(addr)
454 addiu $25, $25, %lo(addr)
457 unsigned int *insn
= (unsigned int *)reloc_addr
;
458 unsigned int addr
= (unsigned int)target_addr
;
459 unsigned int hi
= ((addr
+ 0x8000) >> 16) & 0xffff;
460 unsigned int lo
= addr
& 0xffff;
462 *insn
++ = (15 << 26) | (LINKAGE_TEMP_REG
<< 16) | hi
;
463 *insn
++ = ((9 << 26) | (LINKAGE_TEMP_REG
<< 21)
464 | (LINKAGE_TEMP_REG
<< 16) | lo
);
465 *insn
++ = (LINKAGE_TEMP_REG
<< 21) | 8;
468 os_flush_icache((os_vm_address_t
)reloc_addr
, LINKAGE_TABLE_ENTRY_SIZE
);
472 arch_write_linkage_table_ref(void *reloc_addr
, void *target_addr
)
474 *(unsigned int *)reloc_addr
= (unsigned int)target_addr
;