Better checking of package locks when declaring variables.
[sbcl.git] / src / runtime / mips-arch.c
blob40c48de87e604f9c6efe2b5bfdda42b71a8173a5
1 /*
3 This code was written as part of the CMU Common Lisp project at
4 Carnegie Mellon University, and has been placed in the public domain.
6 */
8 #include <stdio.h>
10 #include "sbcl.h"
11 #include "runtime.h"
12 #include "arch.h"
13 #include "globals.h"
14 #include "validate.h"
15 #include "os.h"
16 #include "lispregs.h"
17 #include "signal.h"
18 #include "alloc.h"
19 #include "interrupt.h"
20 #include "interr.h"
21 #include "breakpoint.h"
23 #include "genesis/constants.h"
25 #define INSN_LEN sizeof(unsigned int)
27 void
28 arch_init(void)
30 return;
33 os_vm_address_t
34 arch_get_bad_addr(int signam, siginfo_t *siginfo, os_context_t *context)
36 /* Classic CMUCL comment:
38 Finding the bad address on the mips is easy. */
39 return (os_vm_address_t)siginfo->si_addr;
42 static inline unsigned int
43 os_context_register(os_context_t *context, int offset)
45 return (unsigned int)(*os_context_register_addr(context, offset));
48 static inline unsigned int
49 os_context_pc(os_context_t *context)
51 return (unsigned int)(*os_context_pc_addr(context));
54 static inline unsigned int
55 os_context_insn(os_context_t *context)
57 if (os_context_bd_cause(context))
58 return *(unsigned int *)(os_context_pc(context) + INSN_LEN);
59 else
60 return *(unsigned int *)(os_context_pc(context));
63 boolean
64 arch_insn_with_bdelay_p(unsigned int insn)
66 switch (insn >> 26) {
67 case 0x0:
68 switch (insn & 0x3f) {
69 /* register jumps */
70 case 0x08:
71 case 0x09:
72 return 1;
74 break;
75 /* branches and immediate jumps */
76 case 0x1:
77 switch ((insn >> 16) & 0x1f) {
78 case 0x00:
79 case 0x01:
80 case 0x02:
81 case 0x03:
82 case 0x10:
83 case 0x11:
84 case 0x12:
85 case 0x13:
86 return 1;
88 break;
89 case 0x2:
90 case 0x3:
91 case 0x4:
92 case 0x5:
93 case 0x6:
94 case 0x7:
95 return 1;
96 case 0x10:
97 case 0x11:
98 case 0x12:
99 switch ((insn >> 21) & 0x1f) {
100 /* CP0/CP1/CP2 branches */
101 case 0x08:
102 return 1;
104 break;
105 /* branch likely (MIPS II) */
106 case 0x14:
107 case 0x15:
108 case 0x16:
109 case 0x17:
110 return 1;
112 return 0;
115 /* Find the next instruction in the control flow. For a instruction
116 with branch delay slot, this is the branch/jump target if the branch
117 is taken, and PC + 8 if it is not taken. For other instructions it
118 is PC + 4. */
119 static unsigned int
120 next_insn_addr(os_context_t *context, unsigned int inst)
122 unsigned int opcode = inst >> 26;
123 unsigned int r1 = (inst >> 21) & 0x1f;
124 unsigned int r2 = (inst >> 16) & 0x1f;
125 unsigned int r3 = (inst >> 11) & 0x1f;
126 unsigned int disp = ((inst&(1<<15)) ? inst | (-1 << 16) : inst&0x7fff) << 2;
127 unsigned int jtgt = (os_context_pc(context) & ~0x0fffffff) | (inst&0x3ffffff) << 2;
128 unsigned int tgt = os_context_pc(context);
130 switch(opcode) {
131 case 0x0: /* jr, jalr */
132 switch(inst & 0x3f) {
133 case 0x08: /* jr */
134 tgt = os_context_register(context, r1);
135 break;
136 case 0x09: /* jalr */
137 tgt = os_context_register(context, r1);
138 *os_context_register_addr(context, r3)
139 = os_context_pc(context) + INSN_LEN;
140 break;
141 default:
142 tgt += INSN_LEN;
143 break;
145 break;
146 case 0x1: /* bltz, bgez, bltzal, bgezal, ... */
147 switch(r2) {
148 case 0x00: /* bltz */
149 case 0x02: /* bltzl */
150 if(os_context_register(context, r1) < 0)
151 tgt += disp;
152 else
153 tgt += INSN_LEN;
154 break;
155 case 0x01: /* bgez */
156 case 0x03: /* bgezl */
157 if(os_context_register(context, r1) >= 0)
158 tgt += disp;
159 else
160 tgt += INSN_LEN;
161 break;
162 case 0x10: /* bltzal */
163 case 0x12: /* bltzall */
164 if(os_context_register(context, r1) < 0) {
165 tgt += disp;
166 *os_context_register_addr(context, 31)
167 = os_context_pc(context) + INSN_LEN;
168 } else
169 tgt += INSN_LEN;
170 break;
171 case 0x11: /* bgezal */
172 case 0x13: /* bgezall */
173 if(os_context_register(context, r1) >= 0) {
174 tgt += disp;
175 *os_context_register_addr(context, 31)
176 = os_context_pc(context) + INSN_LEN;
177 } else
178 tgt += INSN_LEN;
179 break;
180 default:
181 tgt += INSN_LEN;
182 break;
184 break;
185 case 0x2: /* j */
186 tgt = jtgt;
187 break;
188 case 0x3: /* jal */
189 tgt = jtgt;
190 *os_context_register_addr(context, 31)
191 = os_context_pc(context) + INSN_LEN;
192 break;
193 case 0x4: /* beq */
194 case 0x14: /* beql */
195 if(os_context_register(context, r1)
196 == os_context_register(context, r2))
197 tgt += disp;
198 else
199 tgt += INSN_LEN;
200 break;
201 case 0x5: /* bne */
202 case 0x15: /* bnel */
203 if(os_context_register(context, r1)
204 != os_context_register(context, r2))
205 tgt += disp;
206 else
207 tgt += INSN_LEN;
208 break;
209 case 0x6: /* blez */
210 case 0x16: /* blezl */
211 if(os_context_register(context, r1)
212 <= os_context_register(context, r2))
213 tgt += disp;
214 else
215 tgt += INSN_LEN;
216 break;
217 case 0x7: /* bgtz */
218 case 0x17: /* bgtzl */
219 if(os_context_register(context, r1)
220 > os_context_register(context, r2))
221 tgt += disp;
222 else
223 tgt += INSN_LEN;
224 break;
225 case 0x10:
226 case 0x11:
227 case 0x12:
228 switch (r1) {
229 /* CP0/CP1/CP2 branches */
230 case 0x08:
231 /* FIXME */
232 tgt += INSN_LEN;
233 break;
235 break;
236 default:
237 tgt += INSN_LEN;
238 break;
240 return tgt;
243 void
244 arch_skip_instruction(os_context_t *context)
246 /* Skip the offending instruction. Don't use os_context_insn here,
247 since in case of a branch we want the branch insn, not the delay
248 slot. */
249 *os_context_pc_addr(context)
250 = (os_context_register_t)
251 next_insn_addr(context,
252 *(unsigned int *)(os_context_pc(context)));
255 unsigned char *
256 arch_internal_error_arguments(os_context_t *context)
258 if (os_context_bd_cause(context))
259 return (unsigned char *)(os_context_pc(context) + (INSN_LEN * 2));
260 else
261 return (unsigned char *)(os_context_pc(context) + INSN_LEN);
264 boolean
265 arch_pseudo_atomic_atomic(os_context_t *context)
267 return os_context_register(context, reg_ALLOC) & 1;
270 void
271 arch_set_pseudo_atomic_interrupted(os_context_t *context)
273 *os_context_register_addr(context, reg_NL4) |= -1LL<<31;
276 void
277 arch_clear_pseudo_atomic_interrupted(os_context_t *context)
279 *os_context_register_addr(context, reg_NL4) &= ~(-1LL<<31);
282 unsigned int
283 arch_install_breakpoint(void *pc)
285 unsigned int *ptr = (unsigned int *)pc;
286 unsigned int insn;
288 /* Don't install over a branch/jump with delay slot. */
289 if (arch_insn_with_bdelay_p(*ptr))
290 ptr++;
292 insn = *ptr;
293 *ptr = (trap_Breakpoint << 6) | 0xd;
294 os_flush_icache((os_vm_address_t)ptr, INSN_LEN);
296 return insn;
299 static inline unsigned int
300 arch_install_after_breakpoint(void *pc)
302 unsigned int *ptr = (unsigned int *)pc;
303 unsigned int insn;
305 /* Don't install over a branch/jump with delay slot. */
306 if (arch_insn_with_bdelay_p(*ptr))
307 ptr++;
309 insn = *ptr;
310 *ptr = (trap_AfterBreakpoint << 6) | 0xd;
311 os_flush_icache((os_vm_address_t)ptr, INSN_LEN);
313 return insn;
316 void
317 arch_remove_breakpoint(void *pc, unsigned int orig_inst)
319 unsigned int *ptr = (unsigned int *)pc;
321 /* We may remove from a branch delay slot. */
322 if (arch_insn_with_bdelay_p(*ptr))
323 ptr++;
325 *ptr = orig_inst;
326 os_flush_icache((os_vm_address_t)ptr, INSN_LEN);
329 /* Perform the instruction that we overwrote with a breakpoint. As we
330 don't have a single-step facility, this means we have to:
331 - put the instruction back
332 - put a second breakpoint at the following instruction,
333 set after_breakpoint and continue execution.
335 When the second breakpoint is hit (very shortly thereafter, we hope)
336 sigtrap_handler gets called again, but follows the AfterBreakpoint
337 arm, which
338 - puts a bpt back in the first breakpoint place (running across a
339 breakpoint shouldn't cause it to be uninstalled)
340 - replaces the second bpt with the instruction it was meant to be
341 - carries on
343 Clear? */
345 static unsigned int *skipped_break_addr, displaced_after_inst;
346 static sigset_t orig_sigmask;
348 void
349 arch_do_displaced_inst(os_context_t *context, unsigned int orig_inst)
351 unsigned int *pc = (unsigned int *)os_context_pc(context);
352 unsigned int *next_pc;
354 orig_sigmask = *os_context_sigmask_addr(context);
355 sigaddset_blockable(os_context_sigmask_addr(context));
357 /* Put the original instruction back. */
358 arch_remove_breakpoint(pc, orig_inst);
359 skipped_break_addr = pc;
361 /* Figure out where it goes. */
362 next_pc = (unsigned int *)next_insn_addr(context, *pc);
363 displaced_after_inst = arch_install_after_breakpoint(next_pc);
366 void
367 arch_handle_breakpoint(os_context_t *context)
369 handle_breakpoint(context);
372 void
373 arch_handle_fun_end_breakpoint(os_context_t *context)
375 *os_context_pc_addr(context)
376 = (os_context_register_t)(unsigned int)
377 handle_fun_end_breakpoint(context);
380 void
381 arch_handle_after_breakpoint(os_context_t *context)
383 arch_install_breakpoint(skipped_break_addr);
384 arch_remove_breakpoint((unsigned int *)os_context_pc(context),
385 displaced_after_inst);
386 *os_context_sigmask_addr(context) = orig_sigmask;
389 void
390 arch_handle_single_step_trap(os_context_t *context, int trap)
392 unsigned int code = *((u32 *)(os_context_pc(context)));
393 int register_offset = code >> 11 & 0x1f;
394 handle_single_step_trap(context, trap, register_offset);
395 arch_skip_instruction(context);
398 static void
399 sigtrap_handler(int signal, siginfo_t *info, os_context_t *context)
401 unsigned int code = (os_context_insn(context) >> 6) & 0x1f;
402 if (code == trap_PendingInterrupt) {
403 /* KLUDGE: is this neccessary or will handle_trap do the same? */
404 arch_clear_pseudo_atomic_interrupted(context);
406 handle_trap(context, code);
409 static void
410 sigfpe_handler(int signal, siginfo_t *info, os_context_t *context)
412 interrupt_handle_now(signal, info, context);
415 unsigned int
416 arch_get_fp_control(void)
418 register unsigned int ret asm("$2");
420 __asm__ __volatile__ ("cfc1 %0, $31" : "=r" (ret));
422 return ret;
425 void
426 arch_set_fp_control(unsigned int fp)
428 __asm__ __volatile__ ("ctc1 %0, $31" :: "r" (fp));
431 void
432 arch_install_interrupt_handlers(void)
434 undoably_install_low_level_interrupt_handler(SIGTRAP,sigtrap_handler);
437 #ifdef LISP_FEATURE_LINKAGE_TABLE
439 /* Linkage tables for MIPS
441 Linkage entry size is 16, because we need 4 instructions to implement
442 a jump. The entry size constant is defined in parms.lisp.
444 Define the register to use in the linkage jump table. For MIPS this
445 has to be the PIC call register $25 aka t9 aka reg_ALLOC. */
446 #define LINKAGE_TEMP_REG reg_ALLOC
448 /* Insert the necessary jump instructions at the given address. */
449 void
450 arch_write_linkage_table_jmp(char *reloc_addr, void *target_addr)
452 /* Make JMP to function entry. The instruction sequence is:
453 lui $25, 0, %hi(addr)
454 addiu $25, $25, %lo(addr)
455 jr $25
456 nop */
457 unsigned int *insn = (unsigned int *)reloc_addr;
458 unsigned int addr = (unsigned int)target_addr;
459 unsigned int hi = ((addr + 0x8000) >> 16) & 0xffff;
460 unsigned int lo = addr & 0xffff;
462 *insn++ = (15 << 26) | (LINKAGE_TEMP_REG << 16) | hi;
463 *insn++ = ((9 << 26) | (LINKAGE_TEMP_REG << 21)
464 | (LINKAGE_TEMP_REG << 16) | lo);
465 *insn++ = (LINKAGE_TEMP_REG << 21) | 8;
466 *insn = 0;
468 os_flush_icache((os_vm_address_t)reloc_addr, LINKAGE_TABLE_ENTRY_SIZE);
471 void
472 arch_write_linkage_table_ref(void *reloc_addr, void *target_addr)
474 *(unsigned int *)reloc_addr = (unsigned int)target_addr;
477 #endif