2 * This software is part of the SBCL system. See the README file for
5 * This software is derived from the CMU CL system, which was
6 * written at Carnegie Mellon University and released into the
7 * public domain. The software is in the public domain and is
8 * provided with absolutely no warranty. See the COPYING and CREDITS
9 * files for more information.
14 #include "genesis/sbcl.h"
19 #include "interrupt.h"
22 #include "interrupt.h"
24 #include "breakpoint.h"
25 #include "pseudo-atomic.h"
26 #include "gc-assert.h"
28 /* The header files may not define PT_DAR/PT_DSISR. This definition
29 is correct for all versions of ppc linux >= 2.0.30
31 As of DR2.1u4, MkLinux doesn't pass these registers to signal
32 handlers correctly; a patch is necessary in order to (partially)
35 Even with the patch, the DSISR may not have its 'write' bit set
36 correctly (it tends not to be set if the fault was caused by
37 something other than a protection violation.)
41 #if defined (LISP_FEATURE_DARWIN) || defined(LISP_FEATURE_LINUX)
51 #ifdef LISP_FEATURE_64_BIT
52 #define TRAP_INSTRUCTION(trap) ((2<<26) | (1 << 21) | reg_NULL << 16 | (trap))
54 #define TRAP_INSTRUCTION(trap) ((3<<26) | (6 << 21) | (trap))
59 arch_get_bad_addr(int sig
, siginfo_t
*code
, os_context_t
*context
)
63 #if defined(LISP_FEATURE_NETBSD) || defined(LISP_FEATURE_OPENBSD)
64 addr
= (os_vm_address_t
) (code
->si_addr
);
66 addr
= (os_vm_address_t
) (*os_context_register_addr(context
,PT_DAR
));
73 arch_skip_instruction(os_context_t
*context
)
75 OS_CONTEXT_PC(context
) += 4;
79 arch_internal_error_arguments(os_context_t
*context
)
81 return (unsigned char *)(OS_CONTEXT_PC(context
)+4);
85 bool arch_pseudo_atomic_atomic(struct thread
*thread
) {
86 return get_pseudo_atomic_atomic(thread
);
89 void arch_set_pseudo_atomic_interrupted(struct thread
*thread
) {
90 set_pseudo_atomic_interrupted(thread
);
93 void arch_clear_pseudo_atomic_interrupted(struct thread
*thread
) {
94 clear_pseudo_atomic_interrupted(thread
);
98 arch_install_breakpoint(void *pc
)
100 unsigned int *ptr
= (unsigned int *)pc
;
101 unsigned int result
= *ptr
;
102 *ptr
= TRAP_INSTRUCTION(trap_Breakpoint
);
103 os_flush_icache((os_vm_address_t
) pc
, sizeof(unsigned int));
108 arch_remove_breakpoint(void *pc
, unsigned int orig_inst
)
110 *(unsigned int *)pc
= orig_inst
;
111 os_flush_icache((os_vm_address_t
) pc
, sizeof(unsigned int));
115 * Perform the instruction that we overwrote with a breakpoint. As we
116 * don't have a single-step facility, this means we have to:
117 * - put the instruction back
118 * - put a second breakpoint at the following instruction,
119 * set after_breakpoint and continue execution.
121 * When the second breakpoint is hit (very shortly thereafter, we hope)
122 * sigtrap_handler gets called again, but follows the AfterBreakpoint
124 * - puts a bpt back in the first breakpoint place (running across a
125 * breakpoint shouldn't cause it to be uninstalled)
126 * - replaces the second bpt with the instruction it was meant to be
131 static unsigned int *skipped_break_addr
, displaced_after_inst
;
132 static sigset_t orig_sigmask
;
135 should_branch(os_context_t
*context
, unsigned int orig_inst
)
137 /* orig_inst is a conditional branch instruction. We need to
138 * know if the branch will be taken if executed in context. */
139 int ctr
= *os_context_ctr_addr(context
);
140 int cr
= *os_context_cr_addr(context
);
141 int bo_field
= (orig_inst
>> 21) & 0x1f;
142 int bi_field
= (orig_inst
>> 16) & 0x1f;
145 if (!(bo_field
& 4)) ctr
--; /* Decrement CTR if necessary. */
147 ctr_ok
= (bo_field
& 4) || ((ctr
== 0) == ((bo_field
& 2) == 2));
148 return ctr_ok
&& ((bo_field
& 0x10) ||
149 !(((cr
>> (31-bi_field
)) ^ (bo_field
>> 3)) & 1));
152 static sword_t
sign_extend(uword_t word
, int n_bits
) {
153 return (sword_t
)(word
<<(N_WORD_BITS
-n_bits
)) >> (N_WORD_BITS
-n_bits
);
157 arch_do_displaced_inst(os_context_t
*context
, unsigned int orig_inst
)
159 /* not sure how we ensure that we get the breakpoint reinstalled
160 * after doing this -dan */
161 unsigned int *pc
= (unsigned int *)OS_CONTEXT_PC(context
);
162 unsigned int *next_pc
;
163 int op
= orig_inst
>> 26;
164 int sub_op
= (orig_inst
& 0x7fe) >> 1; /* XL-form sub-opcode */
166 orig_sigmask
= *os_context_sigmask_addr(context
);
167 sigaddset_blockable(os_context_sigmask_addr(context
));
170 os_flush_icache((os_vm_address_t
) pc
, sizeof(unsigned int));
171 skipped_break_addr
= pc
;
173 /* Figure out where we will end up after running the displaced
174 * instruction by defaulting to the next instruction in the stream
175 * and then checking for branch instructions. FIXME: This will
176 * probably screw up if it attempts to step a trap instruction. */
181 sword_t displacement
= sign_extend(orig_inst
& 0x03fffffc, 26);
182 if (orig_inst
& 2) { /* Absolute Address */
183 next_pc
= (unsigned int *)displacement
;
185 next_pc
= (unsigned int *)((uword_t
)pc
+ displacement
);
187 } else if ((op
== 16)
188 && should_branch(context
, orig_inst
)) {
189 /* Branch Conditional B-form */
190 sword_t displacement
= sign_extend(orig_inst
& 0x0000fffc, 16);
191 if (orig_inst
& 2) { /* Absolute Address */
192 next_pc
= (unsigned int *)displacement
;
194 next_pc
= (unsigned int *)((uword_t
)pc
+ displacement
);
196 } else if ((op
== 19) && (sub_op
== 16)
197 && should_branch(context
, orig_inst
)) {
198 /* Branch Conditional to Link Register XL-form */
199 next_pc
= (unsigned int *)
200 ((*os_context_lr_addr(context
)) & ~3);
201 } else if ((op
== 19) && (sub_op
== 528)
202 && should_branch(context
, orig_inst
)) {
203 /* Branch Conditional to Count Register XL-form */
204 next_pc
= (unsigned int *)
205 ((*os_context_ctr_addr(context
)) & ~3);
208 /* Set the "after" breakpoint. */
209 displaced_after_inst
= *next_pc
;
210 *next_pc
= TRAP_INSTRUCTION(trap_AfterBreakpoint
);
211 os_flush_icache((os_vm_address_t
)next_pc
, sizeof(unsigned int));
214 #define INLINE_ALLOC_DEBUG 0
215 #define ALLOC_TRAP_LISTIFY 1
216 #define ALLOC_TRAP_CONS 2
217 #define ALLOC_TRAP_GENERAL 3
219 * Return non-zero if the current instruction is an allocation trap
222 allocation_trap_p(os_context_t
* context
)
225 * First, the instruction has to be "Tx {LGT|LGE} temp, NL3, which has the
227 * | 6| 5| 5 | 5 | 10|1| field width
228 * ----------------------
229 * |31| TO|dst|src| 4|0| TW - trap word
230 * |31| TO|dst|src| 68|0| TD - trap doubleword
232 * TO = #b00100 for EQ
233 * #b00001 for LGT (logical greater-than)
234 * #b00101 for LGE (logical greater-or-equal)
235 * #b00010 for LLT (logical less-than)
236 * #b00110 for LLE (logical less-or-equal)
238 unsigned *pc
= (unsigned int *)OS_CONTEXT_PC(context
);
240 unsigned opcode
= inst
>> 26;
241 unsigned src
= (inst
>> 11) & 0x1f;
242 unsigned dst
= (inst
>> 16) & 0x1f;
243 unsigned to
= (inst
>> 21) & 0x1f;
244 unsigned subcode
= inst
& 0x7ff;
246 // recognize the listify-rest-args allocation trap
247 if (opcode
== 31 && to
== 5 && src
== dst
&& subcode
== 4<<1)
248 return ALLOC_TRAP_LISTIFY
;
250 // FIXME: we can remove the wired use of NL3 in the allocator
251 if (opcode
== 31 && (to
== 1 || to
== 2)
252 && (src
== reg_NL3
|| dst
== reg_NL3
)
253 && (subcode
== 4<<1 || subcode
== 68<<1)) {
254 int success
= (to
== 2) ? ALLOC_TRAP_CONS
: ALLOC_TRAP_GENERAL
;
257 * We got the instruction. Now, look back to make sure it was
258 * proceeded by what we expected. The previous instruction
259 * should be an ADD or ADDI instruction.
261 unsigned int add_inst
;
264 opcode
= add_inst
>> 26;
265 if ((opcode
== 31) && (266 == ((add_inst
>> 1) & 0x1ff))) {
267 } else if (opcode
== 14) {
271 "Whoa! Got allocation trap but could not find ADD or ADDI instruction: 0x%08x in the proper place\n",
279 handle_allocation_trap(os_context_t
* context
)
281 int alloc_trap_kind
= allocation_trap_p(context
);
283 if (!alloc_trap_kind
) return 0;
285 struct thread
* thread
= get_sb_vm_thread();
286 gc_assert(!foreign_function_call_active_p(thread
));
287 if (gencgc_alloc_profiler
&& thread
->state_word
.sprof_enable
)
288 record_backtrace_from_context(context
, thread
);
290 fake_foreign_function_call(context
);
291 struct interrupt_data
*data
= &thread_interrupt_data(thread
);
292 data
->allocation_trap_context
= context
;
294 unsigned int *pc
= (unsigned int *)OS_CONTEXT_PC(context
);
296 if (alloc_trap_kind
== ALLOC_TRAP_LISTIFY
) {
297 unsigned inst
= pc
[0];
298 int count_reg
= (inst
>> 11) & 0x1f;
299 // There's a dummy trap instruction which encodes the context and result.
301 unsigned context_reg
= (inst
>> 16) & 0x1f;
302 unsigned result_reg
= (inst
>> 11) & 0x1f;
303 lispobj
* argv
= (void*)*os_context_register_addr(context
, context_reg
);
304 lispobj nbytes
= *os_context_register_addr(context
, count_reg
);
305 extern lispobj
listify_rest_arg(lispobj
*, sword_t
);
306 lispobj result
= listify_rest_arg(argv
, nbytes
);
307 *os_context_register_addr(context
, result_reg
) = result
;
308 // Skip this and the next instruction
309 OS_CONTEXT_PC(context
) += 8;
312 * Go back and look at the add/addi instruction. The second src arg
313 * is the size of the allocation. Get it and call alloc to allocate
315 * (Alternatively we could look at the trap instruction to see which
316 * register was compared against the region free pointer. Subtracting
317 * the region base would yield the size)
319 unsigned int inst
= pc
[-1];
320 int target
= (inst
>> 21) & 0x1f;
321 unsigned int opcode
= inst
>> 26;
323 if (opcode
== 14) { // ADDI temp-tn, alloc-tn, size
324 size
= (inst
& 0xffff);
325 } else if (opcode
== 31) { // ADD temp-tn, alloc-tn, size-tn
327 reg
= (inst
>> 11) & 0x1f;
328 size
= *os_context_register_addr(context
, reg
);
331 extern lispobj
*alloc(sword_t
), *alloc_list(sword_t
);
332 memory
= (char*)(alloc_trap_kind
==ALLOC_TRAP_CONS
? alloc_list(size
) : alloc(size
));
333 // ALLOCATION wants the result to point to the end of the object!
334 *os_context_register_addr(context
, target
) =
335 (os_context_register_t
)(memory
+ size
);
336 // Skip 2 instructions: the trap, and the writeback of free pointer
337 OS_CONTEXT_PC(context
) = (uword_t
)(pc
+ 2); // ('pc' is of type int*)
340 data
->allocation_trap_context
= 0;
341 undo_fake_foreign_function_call(context
);
346 #if defined LISP_FEATURE_SB_THREAD
348 handle_tls_trap(os_context_t
* context
, uword_t pc
, unsigned int code
)
350 #ifdef LISP_FEATURE_PPC64
351 # ifdef LISP_FEATURE_LITTLE_ENDIAN
352 # define TLS_INDEX_FIELD_DISPLACEMENT (4-OTHER_POINTER_LOWTAG)
354 # define TLS_INDEX_FIELD_DISPLACEMENT (-OTHER_POINTER_LOWTAG)
357 # define TLS_INDEX_FIELD_DISPLACEMENT \
358 (offsetof(struct symbol,tls_index)-OTHER_POINTER_LOWTAG)
361 /* LWZ ra,-k(rb) followed by TWI :eq ra,0 is the "unassigned symbol TLS" trap.
362 * LWZ and TWI coincidentally have a similar format as follows:
363 * | 6| 5| 5| 16| field width
364 * ----------------------
365 * LWZ: |32| RT| RA| D|
366 * TWI: | 3| TO| RA|imm| TO bits: EQ = 4
372 if ((code
& ~(31 << 16)) == ((3<<26)|(4<<21))) { // mask out RA for test
373 prev_inst
= ((uint32_t*)pc
)[-1];
374 int16_t disp
= (prev_inst
& 0xFFFF);
375 handle_it
= (prev_inst
>> 26) == 32 // a load
376 // RT of the load must be RA of the trap
377 && ((prev_inst
>> 21) & 31) == ((code
>> 16) & 31)
378 && (disp
== TLS_INDEX_FIELD_DISPLACEMENT
);
380 if (!handle_it
) return 0;
382 struct thread
*thread
= get_sb_vm_thread();
383 set_pseudo_atomic_atomic(thread
);
385 int symbol_reg
= (prev_inst
>> 16) & 31;
386 struct symbol
*specvar
=
387 SYMBOL(*os_context_register_addr(context
, symbol_reg
));
388 struct symbol
*free_tls_index
= SYMBOL(FREE_TLS_INDEX
);
390 // *FREE-TLS-INDEX* value is [lock][tls-index]
391 uword_t
* pvalue
= &free_tls_index
->value
;
393 const uword_t spinlock_bit
= (uword_t
)1<<31;
395 old
= __sync_fetch_and_or(pvalue
, spinlock_bit
);
396 if (old
& spinlock_bit
) sched_yield(); else break;
398 // sync_fetch_and_or acts as a barrier which prevents
399 // speculatively loading tls_index_of().
400 uint32_t tls_index
= tls_index_of(specvar
);
401 if (tls_index
!= 0) { // someone else assigned
402 free_tls_index
->value
= old
; // just release the spinlock
403 // fprintf(stderr, "TLS index trap: special var = %p, data race\n", specvar);
406 // XXX: need to be careful here if GC uses any bits of the header
407 // for concurrent marking. Would need to do a 4-byte write in that case.
408 // This is simpler because it works for either endianness.
409 #ifdef LISP_FEATURE_PPC64
410 specvar
->header
|= (uword_t
)tls_index
<< 32;
412 specvar
->tls_index
= tls_index
;
414 // A barrier here ensures that nobody else thinks this symbol
415 // doesn't have a TLS index. compare-and-swap is the barrier.
416 // It doesn't really need to be a CAS, because we hold the spinlock.
417 int res
= __sync_bool_compare_and_swap(pvalue
,
421 // fprintf(stderr, "TLS index trap: special var = %p, assigned %x\n", specvar, tls_index);
423 // This is actually always going to be 0 for 64-bit code
424 int tlsindex_reg
= (code
>> 16) & 31; // the register we trapped on
425 *os_context_register_addr(context
, tlsindex_reg
) = tls_index
;
426 clear_pseudo_atomic_atomic(thread
);
427 return 1; // handled this signal
432 arch_handle_breakpoint(os_context_t
*context
)
434 handle_breakpoint(context
);
438 arch_handle_fun_end_breakpoint(os_context_t
*context
)
440 OS_CONTEXT_PC(context
) = (uword_t
)handle_fun_end_breakpoint(context
);
444 arch_handle_after_breakpoint(os_context_t
*context
)
446 *skipped_break_addr
= TRAP_INSTRUCTION(trap_Breakpoint
);
447 os_flush_icache((os_vm_address_t
) skipped_break_addr
,
448 sizeof(unsigned int));
449 skipped_break_addr
= NULL
;
450 // This writes an instruction, NOT assigns to the pc.
451 *(unsigned int *)OS_CONTEXT_PC(context
) = displaced_after_inst
;
452 *os_context_sigmask_addr(context
)= orig_sigmask
;
453 os_flush_icache((os_vm_address_t
) OS_CONTEXT_PC(context
),
454 sizeof(unsigned int));
458 arch_handle_single_step_trap(os_context_t
*context
, int trap
)
460 unsigned int code
= *(uint32_t *)OS_CONTEXT_PC(context
);
461 int register_offset
= code
>> 8 & 0x1f;
462 handle_single_step_trap(context
, trap
, register_offset
);
463 arch_skip_instruction(context
);
467 static void dump_cpu_state(char *reason
, os_context_t
* context
)
472 sigset_t
*sigset
= os_context_sigmask_addr(context
); sigset_tostring(sigset
, buf
, sizeof buf
);
473 fprintf(stderr
, "%s\n", reason
);
474 fprintf(stderr
, " oldmask=%s\n", buf
);
475 pthread_sigmask(0, 0, &cur_sigset
); sigset_tostring(&cur_sigset
, buf
, sizeof buf
);
476 fprintf(stderr
, " curmask=%s\n", buf
);
477 fprintf(stderr
, " $pc=%16lx $lr=%16lx $ctr=%16lx $cr=%16lx\n",
478 OS_CONTEXT_PC(context
),
479 *os_context_lr_addr(context
),
480 *os_context_ctr_addr(context
),
481 *os_context_cr_addr(context
));
483 for(j
=0; j
<8 && r
<32; ++j
,++r
)
484 fprintf(stderr
, " %s%d=%16lx",
485 r
<10 ? " $r" : "$r", r
,
486 *os_context_register_addr(context
, r
));
493 sigtrap_handler(int signal
, siginfo_t
*siginfo
, os_context_t
*context
)
495 uword_t pc
= OS_CONTEXT_PC(context
);
496 unsigned int code
= *(uint32_t*)pc
;
498 #ifdef LISP_FEATURE_SIGILL_TRAPS
499 if (signal
== SIGILL
) {
500 if (code
== 0x7C0002A6) { // allocation region overflow trap
501 // there is an actual trap instruction located 2 instructions later.
502 // pretend the trap happened there.
503 OS_CONTEXT_PC(context
) = pc
+ 8;
504 if (handle_allocation_trap(context
)) return;
506 if (code
== 0x7C2002A6) { // pending interrupt
507 arch_clear_pseudo_atomic_interrupted(get_sb_vm_thread());
508 arch_skip_instruction(context
);
509 interrupt_handle_pending(context
);
512 lose("sigill traps enabled but got unexpected sigill");
515 if (signal
== SIGTRAP
&& handle_allocation_trap(context
)) return;
517 #ifdef LISP_FEATURE_SB_THREAD
518 if (signal
== SIGTRAP
&& handle_tls_trap(context
, pc
, code
)) return;
521 if (code
== ((3 << 26) | (0x18 << 21) | (reg_NL3
<< 16))|| // TWI NE,$NL3,0
522 /* trap instruction from do_pending_interrupt */
523 code
== 0x7fe00008) { // TW T,0,0
524 arch_clear_pseudo_atomic_interrupted(get_sb_vm_thread());
525 arch_skip_instruction(context
);
526 /* interrupt or GC was requested in PA; now we're done with the
527 PA section we may as well get around to it */
528 interrupt_handle_pending(context
);
531 #ifdef LISP_FEATURE_64_BIT
532 /* TDI LGT,$NULL,code */
533 if ((code
>> 16) == ((2 << 10) | (1 << 5) | reg_NULL
)) {
534 int trap
= code
& 0xff;
535 handle_trap(context
,trap
);
539 if ((code
>> 16) == ((3 << 10) | (6 << 5))) {
540 /* twllei reg_ZERO,N will always trap if reg_ZERO = 0 */
541 int trap
= code
& 0xff;
542 handle_trap(context
,trap
);
547 /* twi :ne ... or twi ... nargs */
548 if (((code
>> 26) == 3) && (((code
>> 21) & 31) == 24
549 || ((code
>> 16) & 31) == reg_NARGS
551 interrupt_internal_error(context
, 0);
555 interrupt_handle_now(signal
, (siginfo_t
*)code
, context
);
559 void arch_install_interrupt_handlers()
561 ll_install_handler(SIGILL
, sigtrap_handler
);
562 ll_install_handler(SIGTRAP
, sigtrap_handler
);
566 ppc_flush_icache(os_vm_address_t address
, os_vm_size_t length
)
568 os_vm_address_t end
= PTR_ALIGN_UP(address
+length
, 32);
569 extern void ppc_flush_cache_line(os_vm_address_t
);
571 while (address
< end
) {
572 ppc_flush_cache_line(address
);
577 /* Linkage tables for PowerPC
579 * Linkage entry size is 16, because we need at least 4 instructions to
584 * Define the registers to use in the linkage jump table. Can be the
585 * same. Some care must be exercised when choosing these. It has to be
586 * a register that is not otherwise being used. reg_NFP is a good
587 * choice. call_into_c trashes reg_NFP without preserving it, so we can
588 * trash it in the linkage jump table.
590 #define LINKAGE_TEMP_REG reg_NFP
591 #define LINKAGE_ADDR_REG reg_NFP
594 * Insert the necessary jump instructions at the given address.
597 arch_write_linkage_table_entry(int index
, void *target_addr
, int datap
)
599 char *reloc_addr
= (char*)ALIEN_LINKAGE_SPACE_START
+ index
* ALIEN_LINKAGE_TABLE_ENTRY_SIZE
;
601 *(unsigned long *)reloc_addr
= (unsigned long)target_addr
;
605 #if defined LISP_FEATURE_64_BIT
606 extern long call_into_c
; // actually a function entry address,
607 // but trick the compiler into thinking it isn't, so that it does not
608 // indirect through a descriptor, but instead we get its logical address.
609 if (target_addr
!= &call_into_c
) {
610 #ifdef LISP_FEATURE_LITTLE_ENDIAN
612 unsigned long a0
,a16
,a32
,a48
;
615 inst_ptr
= (int*) reloc_addr
;
617 a48
= (unsigned long) target_addr
>> 48 & 0xFFFF;
618 a32
= (unsigned long) target_addr
>> 32 & 0xFFFF;
619 a16
= (unsigned long) target_addr
>> 16 & 0xFFFF;
620 a0
= (unsigned long) target_addr
& 0xFFFF;
623 /* addis 12, 0, a48 */
625 inst
= (15 << 26) | (12 << 21) | (0 << 16) | a48
;
628 /* ori 12, 12, a32 */
630 inst
= (24 << 26) | (12 << 21) | (12 << 16) | a32
;
633 /* sldi 12, 12, 32 */
634 inst
= (30 << 26) | (12 << 21) | (12 << 16) | 0x07C6;
637 /* oris 12, 12, a16 */
639 inst
= (25 << 26) | (12 << 21) | (12 << 16) | a16
;
644 inst
= (24 << 26) | (12 << 21) | (12 << 16) | a0
;
651 inst
= (31 << 26) | (12 << 21) | (9 << 16) | (467 << 1);
658 inst
= (19 << 26) | (20 << 21) | (528 << 1);
661 os_flush_icache((os_vm_address_t
) reloc_addr
, (char*) inst_ptr
- reloc_addr
);
664 // Could use either ABI, but we're assuming v1
665 /* In the 64-bit v1 ABI, function pointers are alway passed around
666 * as "function descriptors", not directly the jump target address.
667 * A descriptor is 3 words:
668 * word 0 = address to jump to
669 * word 1 = value to place in r2
670 * word 2 = value to place in r11
671 * For foreign calls, the value that we hand off to call_into_c
672 * is therefore a function descriptor. To make things consistent,
673 * this linkage table entry itself has to look like a function descriptor.
674 * We can just copy the real descriptor to here, except in one case:
675 * call_into_c is not itself an ABI-compatible call. It really should be
676 * a lisp assembly routine, but then we have a turtles-all-the-way-down problem:
677 * it's tricky to access C global data from lisp assembly routines.
679 memcpy(reloc_addr
, target_addr
, 24);
683 // Can't encode more than 32 bits of jump address
684 gc_assert(((unsigned long) target_addr
>> 32) == 0);
688 * Make JMP to function entry.
690 * The instruction sequence is:
692 * addis 13, 0, (hi part of addr)
693 * ori 13, 13, (low part of addr)
699 unsigned long hi
; /* Top 16 bits of address */
700 unsigned long lo
; /* Low 16 bits of address */
703 inst_ptr
= (int*) reloc_addr
;
705 hi
= (unsigned long) target_addr
;
710 * addis 13, 0, (hi part)
713 inst
= (15 << 26) | (LINKAGE_TEMP_REG
<< 21) | (0 << 16) | hi
;
717 * ori 13, 13, (lo part)
720 inst
= (24 << 26) | (LINKAGE_TEMP_REG
<< 21) | (LINKAGE_TEMP_REG
<< 16) | lo
;
727 inst
= (31 << 26) | (LINKAGE_TEMP_REG
<< 21) | (9 << 16) | (467 << 1);
734 inst
= (19 << 26) | (20 << 21) | (528 << 1);
737 os_flush_icache((os_vm_address_t
) reloc_addr
, (char*) inst_ptr
- reloc_addr
);
740 #ifdef LISP_FEATURE_64_BIT
741 void gcbarrier_patch_code(void* where
, int nbits
)
743 int m_operand
= 64 - nbits
;
744 // the M field has a kooky encoding
745 int m_encoded
= ((m_operand
& 0x1F) << 1) | (m_operand
>> 5);
746 unsigned int* pc
= where
;
747 unsigned int inst
= *pc
;
748 // .... ____ _xxx xxx_ ____ = 0x7E0;
749 // ^ deposit it here, in (BYTE 6 5) of the instruction.
750 *pc
= (inst
& ~0x7E0) | (m_encoded
<< 5);