* config/arm/arm.c (arm_init_iwmmxt_builtins, arm_expand_builtin):
[official-gcc.git] / boehm-gc / mach_dep.c
bloba741058d70e9e8ea31a566007e716262ad77cc7f
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 /* Boehm, November 17, 1995 12:13 pm PST */
15 # include "private/gc_priv.h"
16 # include <stdio.h>
17 # include <setjmp.h>
18 # if defined(OS2) || defined(CX_UX)
19 # define _setjmp(b) setjmp(b)
20 # define _longjmp(b,v) longjmp(b,v)
21 # endif
22 # ifdef AMIGA
23 # ifndef __GNUC__
24 # include <dos.h>
25 # else
26 # include <machine/reg.h>
27 # endif
28 # endif
30 #if defined(__MWERKS__) && !defined(POWERPC)
32 asm static void PushMacRegisters()
34 sub.w #4,sp // reserve space for one parameter.
35 move.l a2,(sp)
36 jsr GC_push_one
37 move.l a3,(sp)
38 jsr GC_push_one
39 move.l a4,(sp)
40 jsr GC_push_one
41 # if !__option(a6frames)
42 // <pcb> perhaps a6 should be pushed if stack frames are not being used.
43 move.l a6,(sp)
44 jsr GC_push_one
45 # endif
46 // skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
47 move.l d2,(sp)
48 jsr GC_push_one
49 move.l d3,(sp)
50 jsr GC_push_one
51 move.l d4,(sp)
52 jsr GC_push_one
53 move.l d5,(sp)
54 jsr GC_push_one
55 move.l d6,(sp)
56 jsr GC_push_one
57 move.l d7,(sp)
58 jsr GC_push_one
59 add.w #4,sp // fix stack.
60 rts
63 #endif /* __MWERKS__ */
65 # if defined(SPARC) || defined(IA64)
66 /* Value returned from register flushing routine; either sp (SPARC) */
67 /* or ar.bsp (IA64) */
68 word GC_save_regs_ret_val;
69 # endif
71 /* Routine to mark from registers that are preserved by the C compiler. */
72 /* This must be ported to every new architecture. There is a generic */
73 /* version at the end, that is likely, but not guaranteed to work */
74 /* on your architecture. Run the test_setjmp program to see whether */
75 /* there is any chance it will work. */
77 #ifndef USE_GENERIC_PUSH_REGS
78 void GC_push_regs()
80 # ifdef RT
81 register long TMP_SP; /* must be bound to r11 */
82 # endif
84 # ifdef VAX
85 /* VAX - generic code below does not work under 4.2 */
86 /* r1 through r5 are caller save, and therefore */
87 /* on the stack or dead. */
88 asm("pushl r11"); asm("calls $1,_GC_push_one");
89 asm("pushl r10"); asm("calls $1,_GC_push_one");
90 asm("pushl r9"); asm("calls $1,_GC_push_one");
91 asm("pushl r8"); asm("calls $1,_GC_push_one");
92 asm("pushl r7"); asm("calls $1,_GC_push_one");
93 asm("pushl r6"); asm("calls $1,_GC_push_one");
94 # endif
95 # if defined(M68K) && (defined(SUNOS4) || defined(NEXT))
96 /* M68K SUNOS - could be replaced by generic code */
97 /* a0, a1 and d1 are caller save */
98 /* and therefore are on stack or dead. */
100 asm("subqw #0x4,sp"); /* allocate word on top of stack */
102 asm("movl a2,sp@"); asm("jbsr _GC_push_one");
103 asm("movl a3,sp@"); asm("jbsr _GC_push_one");
104 asm("movl a4,sp@"); asm("jbsr _GC_push_one");
105 asm("movl a5,sp@"); asm("jbsr _GC_push_one");
106 /* Skip frame pointer and stack pointer */
107 asm("movl d1,sp@"); asm("jbsr _GC_push_one");
108 asm("movl d2,sp@"); asm("jbsr _GC_push_one");
109 asm("movl d3,sp@"); asm("jbsr _GC_push_one");
110 asm("movl d4,sp@"); asm("jbsr _GC_push_one");
111 asm("movl d5,sp@"); asm("jbsr _GC_push_one");
112 asm("movl d6,sp@"); asm("jbsr _GC_push_one");
113 asm("movl d7,sp@"); asm("jbsr _GC_push_one");
115 asm("addqw #0x4,sp"); /* put stack back where it was */
116 # endif
118 # if defined(M68K) && defined(HP)
119 /* M68K HP - could be replaced by generic code */
120 /* a0, a1 and d1 are caller save. */
122 asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
124 asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
125 asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
126 asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
127 asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
128 /* Skip frame pointer and stack pointer */
129 asm("mov.l %d1,(%sp)"); asm("jsr _GC_push_one");
130 asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
131 asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
132 asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
133 asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
134 asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
135 asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
137 asm("addq.w &0x4,%sp"); /* put stack back where it was */
138 # endif /* M68K HP */
140 # if defined(M68K) && defined(AMIGA)
141 /* AMIGA - could be replaced by generic code */
142 /* a0, a1, d0 and d1 are caller save */
144 # ifdef __GNUC__
145 asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
147 asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
148 asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
149 asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
150 asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
151 asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
152 /* Skip frame pointer and stack pointer */
153 asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
154 asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
155 asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
156 asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
157 asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
158 asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
160 asm("addq.w &0x4,%sp"); /* put stack back where it was */
161 # else /* !__GNUC__ */
162 GC_push_one(getreg(REG_A2));
163 GC_push_one(getreg(REG_A3));
164 # ifndef __SASC
165 /* Can probably be changed to #if 0 -Kjetil M. (a4=globals)*/
166 GC_push_one(getreg(REG_A4));
167 # endif
168 GC_push_one(getreg(REG_A5));
169 GC_push_one(getreg(REG_A6));
170 /* Skip stack pointer */
171 GC_push_one(getreg(REG_D2));
172 GC_push_one(getreg(REG_D3));
173 GC_push_one(getreg(REG_D4));
174 GC_push_one(getreg(REG_D5));
175 GC_push_one(getreg(REG_D6));
176 GC_push_one(getreg(REG_D7));
177 # endif /* !__GNUC__ */
178 # endif /* AMIGA */
180 # if defined(M68K) && defined(MACOS)
181 # if defined(THINK_C)
182 # define PushMacReg(reg) \
183 move.l reg,(sp) \
184 jsr GC_push_one
185 asm {
186 sub.w #4,sp ; reserve space for one parameter.
187 PushMacReg(a2);
188 PushMacReg(a3);
189 PushMacReg(a4);
190 ; skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
191 PushMacReg(d2);
192 PushMacReg(d3);
193 PushMacReg(d4);
194 PushMacReg(d5);
195 PushMacReg(d6);
196 PushMacReg(d7);
197 add.w #4,sp ; fix stack.
199 # undef PushMacReg
200 # endif /* THINK_C */
201 # if defined(__MWERKS__)
202 PushMacRegisters();
203 # endif /* __MWERKS__ */
204 # endif /* MACOS */
206 # if defined(I386) &&!defined(OS2) &&!defined(SVR4) \
207 && (defined(__MINGW32__) || !defined(MSWIN32)) \
208 && !defined(SCO) && !defined(SCO_ELF) \
209 && !(defined(LINUX) && defined(__ELF__)) \
210 && !(defined(FREEBSD) && defined(__ELF__)) \
211 && !(defined(NETBSD) && defined(__ELF__)) \
212 && !(defined(OPENBSD) && defined(__ELF__)) \
213 && !(defined(BEOS) && defined(__ELF__)) \
214 && !defined(DOS4GW) && !defined(HURD)
215 /* I386 code, generic code does not appear to work */
216 /* It does appear to work under OS2, and asms dont */
217 /* This is used for some 38g UNIX variants and for CYGWIN32 */
218 asm("pushl %eax"); asm("call _GC_push_one"); asm("addl $4,%esp");
219 asm("pushl %ecx"); asm("call _GC_push_one"); asm("addl $4,%esp");
220 asm("pushl %edx"); asm("call _GC_push_one"); asm("addl $4,%esp");
221 asm("pushl %ebp"); asm("call _GC_push_one"); asm("addl $4,%esp");
222 asm("pushl %esi"); asm("call _GC_push_one"); asm("addl $4,%esp");
223 asm("pushl %edi"); asm("call _GC_push_one"); asm("addl $4,%esp");
224 asm("pushl %ebx"); asm("call _GC_push_one"); asm("addl $4,%esp");
225 # endif
227 # if ( defined(I386) && defined(LINUX) && defined(__ELF__) ) \
228 || ( defined(I386) && defined(FREEBSD) && defined(__ELF__) ) \
229 || ( defined(I386) && defined(NETBSD) && defined(__ELF__) ) \
230 || ( defined(I386) && defined(OPENBSD) && defined(__ELF__) ) \
231 || ( defined(I386) && defined(HURD) && defined(__ELF__) )
233 /* This is modified for Linux with ELF (Note: _ELF_ only) */
234 /* This section handles FreeBSD with ELF. */
235 /* Eax is caller-save and dead here. Other caller-save */
236 /* registers could also be skipped. We assume there are no */
237 /* pointers in MMX registers, etc. */
238 /* We combine instructions in a single asm to prevent gcc from */
239 /* inserting code in the middle. */
240 asm("pushl %ecx; call GC_push_one; addl $4,%esp");
241 asm("pushl %edx; call GC_push_one; addl $4,%esp");
242 asm("pushl %ebp; call GC_push_one; addl $4,%esp");
243 asm("pushl %esi; call GC_push_one; addl $4,%esp");
244 asm("pushl %edi; call GC_push_one; addl $4,%esp");
245 asm("pushl %ebx; call GC_push_one; addl $4,%esp");
246 # endif
248 # if ( defined(I386) && defined(BEOS) && defined(__ELF__) )
249 /* As far as I can understand from */
250 /* http://www.beunited.org/articles/jbq/nasm.shtml, */
251 /* only ebp, esi, edi and ebx are not scratch. How MMX */
252 /* etc. registers should be treated, I have no idea. */
253 asm("pushl %ebp; call GC_push_one; addl $4,%esp");
254 asm("pushl %esi; call GC_push_one; addl $4,%esp");
255 asm("pushl %edi; call GC_push_one; addl $4,%esp");
256 asm("pushl %ebx; call GC_push_one; addl $4,%esp");
257 # endif
259 # if defined(I386) && defined(MSWIN32) && !defined(__MINGW32__) \
260 && !defined(USE_GENERIC)
261 /* I386 code, Microsoft variant */
262 __asm push eax
263 __asm call GC_push_one
264 __asm add esp,4
265 __asm push ebx
266 __asm call GC_push_one
267 __asm add esp,4
268 __asm push ecx
269 __asm call GC_push_one
270 __asm add esp,4
271 __asm push edx
272 __asm call GC_push_one
273 __asm add esp,4
274 __asm push ebp
275 __asm call GC_push_one
276 __asm add esp,4
277 __asm push esi
278 __asm call GC_push_one
279 __asm add esp,4
280 __asm push edi
281 __asm call GC_push_one
282 __asm add esp,4
283 # endif
285 # if defined(I386) && (defined(SVR4) || defined(SCO) || defined(SCO_ELF))
286 /* I386 code, SVR4 variant, generic code does not appear to work */
287 asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
288 asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
289 asm("pushl %ecx"); asm("call GC_push_one"); asm("addl $4,%esp");
290 asm("pushl %edx"); asm("call GC_push_one"); asm("addl $4,%esp");
291 asm("pushl %ebp"); asm("call GC_push_one"); asm("addl $4,%esp");
292 asm("pushl %esi"); asm("call GC_push_one"); asm("addl $4,%esp");
293 asm("pushl %edi"); asm("call GC_push_one"); asm("addl $4,%esp");
294 # endif
296 # ifdef NS32K
297 asm ("movd r3, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
298 asm ("movd r4, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
299 asm ("movd r5, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
300 asm ("movd r6, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
301 asm ("movd r7, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
302 # endif
304 # if defined(SPARC)
306 word GC_save_regs_in_stack();
308 GC_save_regs_ret_val = GC_save_regs_in_stack();
310 # endif
312 # ifdef RT
313 GC_push_one(TMP_SP); /* GC_push_one from r11 */
315 asm("cas r11, r6, r0"); GC_push_one(TMP_SP); /* r6 */
316 asm("cas r11, r7, r0"); GC_push_one(TMP_SP); /* through */
317 asm("cas r11, r8, r0"); GC_push_one(TMP_SP); /* r10 */
318 asm("cas r11, r9, r0"); GC_push_one(TMP_SP);
319 asm("cas r11, r10, r0"); GC_push_one(TMP_SP);
321 asm("cas r11, r12, r0"); GC_push_one(TMP_SP); /* r12 */
322 asm("cas r11, r13, r0"); GC_push_one(TMP_SP); /* through */
323 asm("cas r11, r14, r0"); GC_push_one(TMP_SP); /* r15 */
324 asm("cas r11, r15, r0"); GC_push_one(TMP_SP);
325 # endif
327 # if defined(M68K) && defined(SYSV)
328 /* Once again similar to SUN and HP, though setjmp appears to work.
329 --Parag
331 # ifdef __GNUC__
332 asm("subqw #0x4,%sp"); /* allocate word on top of stack */
334 asm("movl %a2,%sp@"); asm("jbsr GC_push_one");
335 asm("movl %a3,%sp@"); asm("jbsr GC_push_one");
336 asm("movl %a4,%sp@"); asm("jbsr GC_push_one");
337 asm("movl %a5,%sp@"); asm("jbsr GC_push_one");
338 /* Skip frame pointer and stack pointer */
339 asm("movl %d1,%sp@"); asm("jbsr GC_push_one");
340 asm("movl %d2,%sp@"); asm("jbsr GC_push_one");
341 asm("movl %d3,%sp@"); asm("jbsr GC_push_one");
342 asm("movl %d4,%sp@"); asm("jbsr GC_push_one");
343 asm("movl %d5,%sp@"); asm("jbsr GC_push_one");
344 asm("movl %d6,%sp@"); asm("jbsr GC_push_one");
345 asm("movl %d7,%sp@"); asm("jbsr GC_push_one");
347 asm("addqw #0x4,%sp"); /* put stack back where it was */
348 # else /* !__GNUC__*/
349 asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
351 asm("mov.l %a2,(%sp)"); asm("jsr GC_push_one");
352 asm("mov.l %a3,(%sp)"); asm("jsr GC_push_one");
353 asm("mov.l %a4,(%sp)"); asm("jsr GC_push_one");
354 asm("mov.l %a5,(%sp)"); asm("jsr GC_push_one");
355 /* Skip frame pointer and stack pointer */
356 asm("mov.l %d1,(%sp)"); asm("jsr GC_push_one");
357 asm("mov.l %d2,(%sp)"); asm("jsr GC_push_one");
358 asm("mov.l %d3,(%sp)"); asm("jsr GC_push_one");
359 asm("mov.l %d4,(%sp)"); asm("jsr GC_push_one");
360 asm("mov.l %d5,(%sp)"); asm("jsr GC_push_one");
361 asm("mov.l %d6,(%sp)"); asm("jsr GC_push_one");
362 asm("mov.l %d7,(%sp)"); asm("jsr GC_push_one");
364 asm("addq.w &0x4,%sp"); /* put stack back where it was */
365 # endif /* !__GNUC__ */
366 # endif /* M68K/SYSV */
368 # if defined(PJ)
370 register int * sp asm ("optop");
371 extern int *__libc_stack_end;
373 GC_push_all_stack (sp, __libc_stack_end);
375 # endif
377 /* other machines... */
378 # if !defined(M68K) && !defined(VAX) && !defined(RT)
379 # if !defined(SPARC) && !defined(I386) && !defined(NS32K)
380 # if !defined(POWERPC) && !defined(UTS4)
381 # if !defined(PJ) && !(defined(MIPS) && defined(LINUX))
382 --> bad news <--
383 # endif
384 # endif
385 # endif
386 # endif
388 #endif /* !USE_GENERIC_PUSH_REGS */
390 #if defined(USE_GENERIC_PUSH_REGS)
391 void GC_generic_push_regs(cold_gc_frame)
392 ptr_t cold_gc_frame;
395 # ifdef HAVE_BUILTIN_UNWIND_INIT
396 /* This was suggested by Richard Henderson as the way to */
397 /* force callee-save registers and register windows onto */
398 /* the stack. */
399 __builtin_unwind_init();
400 # else /* !HAVE_BUILTIN_UNWIND_INIT */
401 /* Generic code */
402 /* The idea is due to Parag Patel at HP. */
403 /* We're not sure whether he would like */
404 /* to be he acknowledged for it or not. */
405 jmp_buf regs;
406 register word * i = (word *) regs;
407 register ptr_t lim = (ptr_t)(regs) + (sizeof regs);
409 /* Setjmp doesn't always clear all of the buffer. */
410 /* That tends to preserve garbage. Clear it. */
411 for (; (char *)i < lim; i++) {
412 *i = 0;
414 # if defined(POWERPC) || defined(MSWIN32) || defined(MSWINCE) \
415 || defined(UTS4) || defined(LINUX) || defined(EWS4800)
416 (void) setjmp(regs);
417 # else
418 (void) _setjmp(regs);
419 # endif
420 # endif /* !HAVE_BUILTIN_UNWIND_INIT */
421 # if (defined(SPARC) && !defined(HAVE_BUILTIN_UNWIND_INIT)) \
422 || defined(IA64)
423 /* On a register window machine, we need to save register */
424 /* contents on the stack for this to work. The setjmp */
425 /* is probably not needed on SPARC, since pointers are */
426 /* only stored in windowed or scratch registers. It is */
427 /* needed on IA64, since some non-windowed registers are */
428 /* preserved. */
430 word GC_save_regs_in_stack();
432 GC_save_regs_ret_val = GC_save_regs_in_stack();
433 /* On IA64 gcc, could use __builtin_ia64_flushrs() and */
434 /* __builtin_ia64_flushrs(). The latter will be done */
435 /* implicitly by __builtin_unwind_init() for gcc3.0.1 */
436 /* and later. */
438 # endif
439 GC_push_current_stack(cold_gc_frame);
442 #endif /* USE_GENERIC_PUSH_REGS */
444 /* On register window machines, we need a way to force registers into */
445 /* the stack. Return sp. */
446 # ifdef SPARC
447 asm(" .seg \"text\"");
448 # ifdef SVR4
449 asm(" .globl GC_save_regs_in_stack");
450 asm("GC_save_regs_in_stack:");
451 asm(" .type GC_save_regs_in_stack,#function");
452 # else
453 asm(" .globl _GC_save_regs_in_stack");
454 asm("_GC_save_regs_in_stack:");
455 # endif
456 # if defined(__arch64__) || defined(__sparcv9)
457 asm(" save %sp,-128,%sp");
458 asm(" flushw");
459 asm(" ret");
460 asm(" restore %sp,2047+128,%o0");
461 # else
462 asm(" ta 0x3 ! ST_FLUSH_WINDOWS");
463 asm(" retl");
464 asm(" mov %sp,%o0");
465 # endif
466 # ifdef SVR4
467 asm(" .GC_save_regs_in_stack_end:");
468 asm(" .size GC_save_regs_in_stack,.GC_save_regs_in_stack_end-GC_save_regs_in_stack");
469 # endif
470 # ifdef LINT
471 word GC_save_regs_in_stack() { return(0 /* sp really */);}
472 # endif
473 # endif
475 /* On IA64, we also need to flush register windows. But they end */
476 /* up on the other side of the stack segment. */
477 /* Returns the backing store pointer for the register stack. */
478 /* We now implement this as a separate assembly file, since inline */
479 /* assembly code here doesn't work with either the Intel or HP */
480 /* compilers. */
481 # if 0
482 # ifdef LINUX
483 asm(" .text");
484 asm(" .psr abi64");
485 asm(" .psr lsb");
486 asm(" .lsb");
487 asm("");
488 asm(" .text");
489 asm(" .align 16");
490 asm(" .global GC_save_regs_in_stack");
491 asm(" .proc GC_save_regs_in_stack");
492 asm("GC_save_regs_in_stack:");
493 asm(" .body");
494 asm(" flushrs");
495 asm(" ;;");
496 asm(" mov r8=ar.bsp");
497 asm(" br.ret.sptk.few rp");
498 asm(" .endp GC_save_regs_in_stack");
499 # endif /* LINUX */
500 # if 0 /* Other alternatives that don't work on HP/UX */
501 word GC_save_regs_in_stack() {
502 # if USE_BUILTINS
503 __builtin_ia64_flushrs();
504 return __builtin_ia64_bsp();
505 # else
506 # ifdef HPUX
507 _asm(" flushrs");
508 _asm(" ;;");
509 _asm(" mov r8=ar.bsp");
510 _asm(" br.ret.sptk.few rp");
511 # else
512 asm(" flushrs");
513 asm(" ;;");
514 asm(" mov r8=ar.bsp");
515 asm(" br.ret.sptk.few rp");
516 # endif
517 # endif
519 # endif
520 # endif
522 /* GC_clear_stack_inner(arg, limit) clears stack area up to limit and */
523 /* returns arg. Stack clearing is crucial on SPARC, so we supply */
524 /* an assembly version that's more careful. Assumes limit is hotter */
525 /* than sp, and limit is 8 byte aligned. */
526 #if defined(ASM_CLEAR_CODE)
527 #ifndef SPARC
528 --> fix it
529 #endif
530 # ifdef SUNOS4
531 asm(".globl _GC_clear_stack_inner");
532 asm("_GC_clear_stack_inner:");
533 # else
534 asm(".globl GC_clear_stack_inner");
535 asm("GC_clear_stack_inner:");
536 asm(".type GC_save_regs_in_stack,#function");
537 # endif
538 #if defined(__arch64__) || defined(__sparcv9)
539 asm("mov %sp,%o2"); /* Save sp */
540 asm("add %sp,2047-8,%o3"); /* p = sp+bias-8 */
541 asm("add %o1,-2047-192,%sp"); /* Move sp out of the way, */
542 /* so that traps still work. */
543 /* Includes some extra words */
544 /* so we can be sloppy below. */
545 asm("loop:");
546 asm("stx %g0,[%o3]"); /* *(long *)p = 0 */
547 asm("cmp %o3,%o1");
548 asm("bgu,pt %xcc, loop"); /* if (p > limit) goto loop */
549 asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
550 asm("retl");
551 asm("mov %o2,%sp"); /* Restore sp., delay slot */
552 #else
553 asm("mov %sp,%o2"); /* Save sp */
554 asm("add %sp,-8,%o3"); /* p = sp-8 */
555 asm("clr %g1"); /* [g0,g1] = 0 */
556 asm("add %o1,-0x60,%sp"); /* Move sp out of the way, */
557 /* so that traps still work. */
558 /* Includes some extra words */
559 /* so we can be sloppy below. */
560 asm("loop:");
561 asm("std %g0,[%o3]"); /* *(long long *)p = 0 */
562 asm("cmp %o3,%o1");
563 asm("bgu loop "); /* if (p > limit) goto loop */
564 asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
565 asm("retl");
566 asm("mov %o2,%sp"); /* Restore sp., delay slot */
567 #endif /* old SPARC */
568 /* First argument = %o0 = return value */
569 # ifdef SVR4
570 asm(" .GC_clear_stack_inner_end:");
571 asm(" .size GC_clear_stack_inner,.GC_clear_stack_inner_end-GC_clear_stack_inner");
572 # endif
574 # ifdef LINT
575 /*ARGSUSED*/
576 ptr_t GC_clear_stack_inner(arg, limit)
577 ptr_t arg; word limit;
578 { return(arg); }
579 # endif
580 #endif