2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 /* Boehm, November 17, 1995 12:13 pm PST */
15 # include "private/gc_priv.h"
18 # if defined(OS2) || defined(CX_UX)
19 # define _setjmp(b) setjmp(b)
20 # define _longjmp(b,v) longjmp(b,v)
26 # include <machine/reg.h>
30 #if defined(__MWERKS__) && !defined(POWERPC)
32 asm static void PushMacRegisters()
34 sub
.w
#4,sp // reserve space for one parameter.
41 # if !__option(a6frames)
42 // <pcb> perhaps a6 should be pushed if stack frames are not being used.
46 // skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
59 add
.w
#4,sp // fix stack.
63 #endif /* __MWERKS__ */
65 # if defined(SPARC) || defined(IA64)
66 /* Value returned from register flushing routine; either sp (SPARC) */
67 /* or ar.bsp (IA64) */
68 word GC_save_regs_ret_val
;
71 /* Routine to mark from registers that are preserved by the C compiler. */
72 /* This must be ported to every new architecture. There is a generic */
73 /* version at the end, that is likely, but not guaranteed to work */
74 /* on your architecture. Run the test_setjmp program to see whether */
75 /* there is any chance it will work. */
77 #ifndef USE_GENERIC_PUSH_REGS
81 register long TMP_SP
; /* must be bound to r11 */
84 # if defined(MIPS) && defined(LINUX)
85 /* I'm not sure whether this has actually been tested. */
86 # define call_push(x) asm("move $4," x ";"); asm("jal GC_push_one")
99 # endif /* MIPS && LINUX */
102 /* VAX - generic code below does not work under 4.2 */
103 /* r1 through r5 are caller save, and therefore */
104 /* on the stack or dead. */
105 asm("pushl r11"); asm("calls $1,_GC_push_one");
106 asm("pushl r10"); asm("calls $1,_GC_push_one");
107 asm("pushl r9"); asm("calls $1,_GC_push_one");
108 asm("pushl r8"); asm("calls $1,_GC_push_one");
109 asm("pushl r7"); asm("calls $1,_GC_push_one");
110 asm("pushl r6"); asm("calls $1,_GC_push_one");
112 # if defined(M68K) && (defined(SUNOS4) || defined(NEXT))
113 /* M68K SUNOS - could be replaced by generic code */
114 /* a0, a1 and d1 are caller save */
115 /* and therefore are on stack or dead. */
117 asm("subqw #0x4,sp"); /* allocate word on top of stack */
119 asm("movl a2,sp@"); asm("jbsr _GC_push_one");
120 asm("movl a3,sp@"); asm("jbsr _GC_push_one");
121 asm("movl a4,sp@"); asm("jbsr _GC_push_one");
122 asm("movl a5,sp@"); asm("jbsr _GC_push_one");
123 /* Skip frame pointer and stack pointer */
124 asm("movl d1,sp@"); asm("jbsr _GC_push_one");
125 asm("movl d2,sp@"); asm("jbsr _GC_push_one");
126 asm("movl d3,sp@"); asm("jbsr _GC_push_one");
127 asm("movl d4,sp@"); asm("jbsr _GC_push_one");
128 asm("movl d5,sp@"); asm("jbsr _GC_push_one");
129 asm("movl d6,sp@"); asm("jbsr _GC_push_one");
130 asm("movl d7,sp@"); asm("jbsr _GC_push_one");
132 asm("addqw #0x4,sp"); /* put stack back where it was */
135 # if defined(M68K) && defined(HP)
136 /* M68K HP - could be replaced by generic code */
137 /* a0, a1 and d1 are caller save. */
139 asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
141 asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
142 asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
143 asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
144 asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
145 /* Skip frame pointer and stack pointer */
146 asm("mov.l %d1,(%sp)"); asm("jsr _GC_push_one");
147 asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
148 asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
149 asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
150 asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
151 asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
152 asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
154 asm("addq.w &0x4,%sp"); /* put stack back where it was */
155 # endif /* M68K HP */
157 # if defined(M68K) && defined(AMIGA)
158 /* AMIGA - could be replaced by generic code */
159 /* a0, a1, d0 and d1 are caller save */
162 asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
164 asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
165 asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
166 asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
167 asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
168 asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
169 /* Skip frame pointer and stack pointer */
170 asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
171 asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
172 asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
173 asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
174 asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
175 asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
177 asm("addq.w &0x4,%sp"); /* put stack back where it was */
178 # else /* !__GNUC__ */
179 GC_push_one(getreg(REG_A2
));
180 GC_push_one(getreg(REG_A3
));
182 /* Can probably be changed to #if 0 -Kjetil M. (a4=globals)*/
183 GC_push_one(getreg(REG_A4
));
185 GC_push_one(getreg(REG_A5
));
186 GC_push_one(getreg(REG_A6
));
187 /* Skip stack pointer */
188 GC_push_one(getreg(REG_D2
));
189 GC_push_one(getreg(REG_D3
));
190 GC_push_one(getreg(REG_D4
));
191 GC_push_one(getreg(REG_D5
));
192 GC_push_one(getreg(REG_D6
));
193 GC_push_one(getreg(REG_D7
));
194 # endif /* !__GNUC__ */
197 # if defined(M68K) && defined(MACOS)
198 # if defined(THINK_C)
199 # define PushMacReg(reg) \
203 sub
.w
#4,sp ; reserve space for one parameter.
207 ; skip
a5 (globals
), a6 (frame pointer
), and a7 (stack pointer
)
214 add
.w
#4,sp ; fix stack.
217 # endif /* THINK_C */
218 # if defined(__MWERKS__)
220 # endif /* __MWERKS__ */
223 # if defined(I386) &&!defined(OS2) &&!defined(SVR4) \
224 && (defined(__MINGW32__) || !defined(MSWIN32)) \
225 && !defined(SCO) && !defined(SCO_ELF) \
226 && !(defined(LINUX) && defined(__ELF__)) \
227 && !(defined(FREEBSD) && defined(__ELF__)) \
228 && !(defined(NETBSD) && defined(__ELF__)) \
229 && !(defined(OPENBSD) && defined(__ELF__)) \
230 && !(defined(BEOS) && defined(__ELF__)) \
231 && !defined(DOS4GW) && !defined(HURD)
232 /* I386 code, generic code does not appear to work */
233 /* It does appear to work under OS2, and asms dont */
234 /* This is used for some 38g UNIX variants and for CYGWIN32 */
235 asm("pushl %eax"); asm("call _GC_push_one"); asm("addl $4,%esp");
236 asm("pushl %ecx"); asm("call _GC_push_one"); asm("addl $4,%esp");
237 asm("pushl %edx"); asm("call _GC_push_one"); asm("addl $4,%esp");
238 asm("pushl %ebp"); asm("call _GC_push_one"); asm("addl $4,%esp");
239 asm("pushl %esi"); asm("call _GC_push_one"); asm("addl $4,%esp");
240 asm("pushl %edi"); asm("call _GC_push_one"); asm("addl $4,%esp");
241 asm("pushl %ebx"); asm("call _GC_push_one"); asm("addl $4,%esp");
244 # if ( defined(I386) && defined(LINUX) && defined(__ELF__) ) \
245 || ( defined(I386) && defined(FREEBSD) && defined(__ELF__) ) \
246 || ( defined(I386) && defined(NETBSD) && defined(__ELF__) ) \
247 || ( defined(I386) && defined(OPENBSD) && defined(__ELF__) ) \
248 || ( defined(I386) && defined(HURD) && defined(__ELF__) )
250 /* This is modified for Linux with ELF (Note: _ELF_ only) */
251 /* This section handles FreeBSD with ELF. */
252 /* Eax is caller-save and dead here. Other caller-save */
253 /* registers could also be skipped. We assume there are no */
254 /* pointers in MMX registers, etc. */
255 /* We combine instructions in a single asm to prevent gcc from */
256 /* inserting code in the middle. */
257 asm("pushl %ecx; call GC_push_one; addl $4,%esp");
258 asm("pushl %edx; call GC_push_one; addl $4,%esp");
259 asm("pushl %ebp; call GC_push_one; addl $4,%esp");
260 asm("pushl %esi; call GC_push_one; addl $4,%esp");
261 asm("pushl %edi; call GC_push_one; addl $4,%esp");
262 asm("pushl %ebx; call GC_push_one; addl $4,%esp");
265 # if ( defined(I386) && defined(BEOS) && defined(__ELF__) )
266 /* As far as I can understand from */
267 /* http://www.beunited.org/articles/jbq/nasm.shtml, */
268 /* only ebp, esi, edi and ebx are not scratch. How MMX */
269 /* etc. registers should be treated, I have no idea. */
270 asm("pushl %ebp; call GC_push_one; addl $4,%esp");
271 asm("pushl %esi; call GC_push_one; addl $4,%esp");
272 asm("pushl %edi; call GC_push_one; addl $4,%esp");
273 asm("pushl %ebx; call GC_push_one; addl $4,%esp");
276 # if defined(I386) && defined(MSWIN32) && !defined(__MINGW32__) \
277 && !defined(USE_GENERIC)
278 /* I386 code, Microsoft variant */
280 __asm call GC_push_one
283 __asm call GC_push_one
286 __asm call GC_push_one
289 __asm call GC_push_one
292 __asm call GC_push_one
295 __asm call GC_push_one
298 __asm call GC_push_one
302 # if defined(I386) && (defined(SVR4) || defined(SCO) || defined(SCO_ELF))
303 /* I386 code, SVR4 variant, generic code does not appear to work */
304 asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
305 asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
306 asm("pushl %ecx"); asm("call GC_push_one"); asm("addl $4,%esp");
307 asm("pushl %edx"); asm("call GC_push_one"); asm("addl $4,%esp");
308 asm("pushl %ebp"); asm("call GC_push_one"); asm("addl $4,%esp");
309 asm("pushl %esi"); asm("call GC_push_one"); asm("addl $4,%esp");
310 asm("pushl %edi"); asm("call GC_push_one"); asm("addl $4,%esp");
314 asm ("movd r3, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
315 asm ("movd r4, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
316 asm ("movd r5, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
317 asm ("movd r6, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
318 asm ("movd r7, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
323 word
GC_save_regs_in_stack();
325 GC_save_regs_ret_val
= GC_save_regs_in_stack();
330 GC_push_one(TMP_SP
); /* GC_push_one from r11 */
332 asm("cas r11, r6, r0"); GC_push_one(TMP_SP
); /* r6 */
333 asm("cas r11, r7, r0"); GC_push_one(TMP_SP
); /* through */
334 asm("cas r11, r8, r0"); GC_push_one(TMP_SP
); /* r10 */
335 asm("cas r11, r9, r0"); GC_push_one(TMP_SP
);
336 asm("cas r11, r10, r0"); GC_push_one(TMP_SP
);
338 asm("cas r11, r12, r0"); GC_push_one(TMP_SP
); /* r12 */
339 asm("cas r11, r13, r0"); GC_push_one(TMP_SP
); /* through */
340 asm("cas r11, r14, r0"); GC_push_one(TMP_SP
); /* r15 */
341 asm("cas r11, r15, r0"); GC_push_one(TMP_SP
);
344 # if defined(M68K) && defined(SYSV)
345 /* Once again similar to SUN and HP, though setjmp appears to work.
349 asm("subqw #0x4,%sp"); /* allocate word on top of stack */
351 asm("movl %a2,%sp@"); asm("jbsr GC_push_one");
352 asm("movl %a3,%sp@"); asm("jbsr GC_push_one");
353 asm("movl %a4,%sp@"); asm("jbsr GC_push_one");
354 asm("movl %a5,%sp@"); asm("jbsr GC_push_one");
355 /* Skip frame pointer and stack pointer */
356 asm("movl %d1,%sp@"); asm("jbsr GC_push_one");
357 asm("movl %d2,%sp@"); asm("jbsr GC_push_one");
358 asm("movl %d3,%sp@"); asm("jbsr GC_push_one");
359 asm("movl %d4,%sp@"); asm("jbsr GC_push_one");
360 asm("movl %d5,%sp@"); asm("jbsr GC_push_one");
361 asm("movl %d6,%sp@"); asm("jbsr GC_push_one");
362 asm("movl %d7,%sp@"); asm("jbsr GC_push_one");
364 asm("addqw #0x4,%sp"); /* put stack back where it was */
365 # else /* !__GNUC__*/
366 asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
368 asm("mov.l %a2,(%sp)"); asm("jsr GC_push_one");
369 asm("mov.l %a3,(%sp)"); asm("jsr GC_push_one");
370 asm("mov.l %a4,(%sp)"); asm("jsr GC_push_one");
371 asm("mov.l %a5,(%sp)"); asm("jsr GC_push_one");
372 /* Skip frame pointer and stack pointer */
373 asm("mov.l %d1,(%sp)"); asm("jsr GC_push_one");
374 asm("mov.l %d2,(%sp)"); asm("jsr GC_push_one");
375 asm("mov.l %d3,(%sp)"); asm("jsr GC_push_one");
376 asm("mov.l %d4,(%sp)"); asm("jsr GC_push_one");
377 asm("mov.l %d5,(%sp)"); asm("jsr GC_push_one");
378 asm("mov.l %d6,(%sp)"); asm("jsr GC_push_one");
379 asm("mov.l %d7,(%sp)"); asm("jsr GC_push_one");
381 asm("addq.w &0x4,%sp"); /* put stack back where it was */
382 # endif /* !__GNUC__ */
383 # endif /* M68K/SYSV */
387 register int * sp
asm ("optop");
388 extern int *__libc_stack_end
;
390 GC_push_all_stack (sp
, __libc_stack_end
);
394 /* other machines... */
395 # if !defined(M68K) && !defined(VAX) && !defined(RT)
396 # if !defined(SPARC) && !defined(I386) && !defined(NS32K)
397 # if !defined(POWERPC) && !defined(UTS4)
398 # if !defined(PJ) && !(defined(MIPS) && defined(LINUX))
405 #endif /* !USE_GENERIC_PUSH_REGS */
407 #if defined(USE_GENERIC_PUSH_REGS)
408 void GC_generic_push_regs(cold_gc_frame
)
412 # ifdef HAVE_BUILTIN_UNWIND_INIT
413 /* This was suggested by Richard Henderson as the way to */
414 /* force callee-save registers and register windows onto */
416 __builtin_unwind_init();
417 # else /* !HAVE_BUILTIN_UNWIND_INIT */
419 /* The idea is due to Parag Patel at HP. */
420 /* We're not sure whether he would like */
421 /* to be he acknowledged for it or not. */
423 register word
* i
= (word
*) regs
;
424 register ptr_t lim
= (ptr_t
)(regs
) + (sizeof regs
);
426 /* Setjmp doesn't always clear all of the buffer. */
427 /* That tends to preserve garbage. Clear it. */
428 for (; (char *)i
< lim
; i
++) {
431 # if defined(POWERPC) || defined(MSWIN32) || defined(MSWINCE) \
432 || defined(UTS4) || defined(LINUX)
435 (void) _setjmp(regs
);
437 # endif /* !HAVE_BUILTIN_UNWIND_INIT */
438 # if (defined(SPARC) && !defined(HAVE_BUILTIN_UNWIND_INIT)) \
440 /* On a register window machine, we need to save register */
441 /* contents on the stack for this to work. The setjmp */
442 /* is probably not needed on SPARC, since pointers are */
443 /* only stored in windowed or scratch registers. It is */
444 /* needed on IA64, since some non-windowed registers are */
447 word
GC_save_regs_in_stack();
449 GC_save_regs_ret_val
= GC_save_regs_in_stack();
450 /* On IA64 gcc, could use __builtin_ia64_flushrs() and */
451 /* __builtin_ia64_flushrs(). The latter will be done */
452 /* implicitly by __builtin_unwind_init() for gcc3.0.1 */
456 GC_push_current_stack(cold_gc_frame
);
459 #endif /* USE_GENERIC_PUSH_REGS */
461 /* On register window machines, we need a way to force registers into */
462 /* the stack. Return sp. */
464 asm(" .seg \"text\"");
466 asm(" .globl GC_save_regs_in_stack");
467 asm("GC_save_regs_in_stack:");
468 asm(" .type GC_save_regs_in_stack,#function");
470 asm(" .globl _GC_save_regs_in_stack");
471 asm("_GC_save_regs_in_stack:");
473 # if defined(__arch64__) || defined(__sparcv9)
474 asm(" save %sp,-128,%sp");
477 asm(" restore %sp,2047+128,%o0");
479 asm(" ta 0x3 ! ST_FLUSH_WINDOWS");
484 asm(" .GC_save_regs_in_stack_end:");
485 asm(" .size GC_save_regs_in_stack,.GC_save_regs_in_stack_end-GC_save_regs_in_stack");
488 word
GC_save_regs_in_stack() { return(0 /* sp really */);}
492 /* On IA64, we also need to flush register windows. But they end */
493 /* up on the other side of the stack segment. */
494 /* Returns the backing store pointer for the register stack. */
495 /* We implement this as a separate file in HP/UX. */
505 asm(" .global GC_save_regs_in_stack");
506 asm(" .proc GC_save_regs_in_stack");
507 asm("GC_save_regs_in_stack:");
511 asm(" mov r8=ar.bsp");
512 asm(" br.ret.sptk.few rp");
513 asm(" .endp GC_save_regs_in_stack");
515 # if 0 /* Other alternatives that don't work on HP/UX */
516 word
GC_save_regs_in_stack() {
518 __builtin_ia64_flushrs();
519 return __builtin_ia64_bsp();
524 _asm(" mov r8=ar.bsp");
525 _asm(" br.ret.sptk.few rp");
529 asm(" mov r8=ar.bsp");
530 asm(" br.ret.sptk.few rp");
537 /* GC_clear_stack_inner(arg, limit) clears stack area up to limit and */
538 /* returns arg. Stack clearing is crucial on SPARC, so we supply */
539 /* an assembly version that's more careful. Assumes limit is hotter */
540 /* than sp, and limit is 8 byte aligned. */
541 #if defined(ASM_CLEAR_CODE)
546 asm(".globl _GC_clear_stack_inner");
547 asm("_GC_clear_stack_inner:");
549 asm(".globl GC_clear_stack_inner");
550 asm("GC_clear_stack_inner:");
551 asm(".type GC_save_regs_in_stack,#function");
553 #if defined(__arch64__) || defined(__sparcv9)
554 asm("mov %sp,%o2"); /* Save sp */
555 asm("add %sp,2047-8,%o3"); /* p = sp+bias-8 */
556 asm("add %o1,-2047-192,%sp"); /* Move sp out of the way, */
557 /* so that traps still work. */
558 /* Includes some extra words */
559 /* so we can be sloppy below. */
561 asm("stx %g0,[%o3]"); /* *(long *)p = 0 */
563 asm("bgu,pt %xcc, loop"); /* if (p > limit) goto loop */
564 asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
566 asm("mov %o2,%sp"); /* Restore sp., delay slot */
568 asm("mov %sp,%o2"); /* Save sp */
569 asm("add %sp,-8,%o3"); /* p = sp-8 */
570 asm("clr %g1"); /* [g0,g1] = 0 */
571 asm("add %o1,-0x60,%sp"); /* Move sp out of the way, */
572 /* so that traps still work. */
573 /* Includes some extra words */
574 /* so we can be sloppy below. */
576 asm("std %g0,[%o3]"); /* *(long long *)p = 0 */
578 asm("bgu loop "); /* if (p > limit) goto loop */
579 asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
581 asm("mov %o2,%sp"); /* Restore sp., delay slot */
582 #endif /* old SPARC */
583 /* First argument = %o0 = return value */
585 asm(" .GC_clear_stack_inner_end:");
586 asm(" .size GC_clear_stack_inner,.GC_clear_stack_inner_end-GC_clear_stack_inner");
591 ptr_t
GC_clear_stack_inner(arg
, limit
)
592 ptr_t arg
; word limit
;