if we're intentionally using power-of-two sizing for chunks, don't add the header...
[mono-project/dkf.git] / mono / mini / exceptions-alpha.c
blob24ceaa5d5a233043ad45d4b69ac14a680a4daff9
1 /*------------------------------------------------------------------*/
2 /* */
3 /* Name - exceptions-alpha.c */
4 /* */
5 /* Function - Exception support for Alpha. */
6 /* */
7 /* Name - Sergey Tikhonov (tsv@solvo.ru) */
8 /* */
9 /* Date - January, 2006 */
10 /* */
11 /* Derivation - From exceptions-amd64 & exceptions-ia64 */
12 /* Paolo Molaro (lupus@ximian.com) */
13 /* Dietmar Maurer (dietmar@ximian.com) */
14 /* Zoltan Varga (vargaz@gmail.com) */
15 /* */
16 /* */
17 /*------------------------------------------------------------------*/
19 /*------------------------------------------------------------------*/
20 /* D e f i n e s */
21 /*------------------------------------------------------------------*/
22 #define ALPHA_DEBUG(x) \
23 if (mini_alpha_verbose_level) \
24 g_debug ("ALPHA_DEBUG: %s is called.", x);
26 #define ALPHA_PRINT if (mini_alpha_verbose_level)
28 #define SZ_THROW 384
30 extern int mini_alpha_verbose_level;
32 /*========================= End of Defines =========================*/
35 /*------------------------------------------------------------------*/
36 /* I n c l u d e s */
37 /*------------------------------------------------------------------*/
39 #include <config.h>
40 #include <glib.h>
41 #include <signal.h>
42 #include <string.h>
43 #include <ucontext.h>
45 #include <mono/arch/alpha/alpha-codegen.h>
46 #include <mono/metadata/appdomain.h>
47 #include <mono/metadata/tabledefs.h>
48 #include <mono/metadata/threads.h>
49 #include <mono/metadata/debug-helpers.h>
50 #include <mono/metadata/exception.h>
51 #include <mono/metadata/mono-debug.h>
53 #include "mini.h"
54 #include "mini-alpha.h"
56 /*========================= End of Includes ========================*/
58 /*------------------------------------------------------------------*/
59 /* */
60 /* Name - mono_arch_get_call_filter */
61 /* */
62 /* Function - Return a pointer to a method which calls an */
63 /* exception filter. We also use this function to */
64 /* call finally handlers (we pass NULL as @exc */
65 /* object in this case). */
66 /* */
67 /*------------------------------------------------------------------*/
69 gpointer
70 mono_arch_get_call_filter (void)
72 static gboolean inited = FALSE;
73 static unsigned int *start_code;
74 unsigned int *code;
76 ALPHA_DEBUG("mono_arch_get_call_filter");
78 if (inited)
79 return start_code;
81 start_code = code = mono_global_codeman_reserve (128 * 4);
83 /* call_filter (MonoContext *ctx, unsigned long eip) */
84 code = start_code;
86 alpha_ldah( code, alpha_gp, alpha_pv, 0 );
87 alpha_lda( code, alpha_gp, alpha_gp, 0 ); // ldgp gp, 0(pv)
89 /* store call convention parameters on stack */
90 alpha_lda(code, alpha_sp, alpha_sp, -(8*25)); // Save 22 regs + RA, FP
91 alpha_stq(code, alpha_ra, alpha_sp, 0);
92 alpha_stq(code, alpha_fp, alpha_sp, 8);
94 /* set the frame pointer */
95 alpha_mov1( code, alpha_sp, alpha_fp );
97 /* Save registers */
98 alpha_stq(code, alpha_r1, alpha_fp, (16+(8*0)));
99 alpha_stq(code, alpha_r2, alpha_fp, (16+(8*1)));
100 alpha_stq(code, alpha_r3, alpha_fp, (16+(8*2)));
101 alpha_stq(code, alpha_r4, alpha_fp, (16+(8*3)));
102 alpha_stq(code, alpha_r5, alpha_fp, (16+(8*4)));
103 alpha_stq(code, alpha_r6, alpha_fp, (16+(8*5)));
104 alpha_stq(code, alpha_r7, alpha_fp, (16+(8*6)));
105 alpha_stq(code, alpha_r8, alpha_fp, (16+(8*7)));
106 alpha_stq(code, alpha_r9, alpha_fp, (16+(8*8)));
107 alpha_stq(code, alpha_r10, alpha_fp, (16+(8*9)));
108 alpha_stq(code, alpha_r11, alpha_fp, (16+(8*10)));
109 alpha_stq(code, alpha_r12, alpha_fp, (16+(8*11)));
110 alpha_stq(code, alpha_r13, alpha_fp, (16+(8*12)));
111 alpha_stq(code, alpha_r14, alpha_fp, (16+(8*13)));
112 alpha_stq(code, alpha_r22, alpha_fp, (16+(8*14)));
113 alpha_stq(code, alpha_r23, alpha_fp, (16+(8*15)));
114 alpha_stq(code, alpha_r24, alpha_fp, (16+(8*16)));
115 alpha_stq(code, alpha_r25, alpha_fp, (16+(8*17)));
116 alpha_stq(code, alpha_r26, alpha_fp, (16+(8*18)));
117 alpha_stq(code, alpha_r27, alpha_fp, (16+(8*19)));
118 alpha_stq(code, alpha_r28, alpha_fp, (16+(8*20)));
119 alpha_stq(code, alpha_r29, alpha_fp, (16+(8*21)));
121 /* Load regs from ctx */
123 alpha_ldq(code, alpha_r1, alpha_a0,
124 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r1]));
125 alpha_ldq(code, alpha_r2, alpha_a0,
126 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r2]));
127 alpha_ldq(code, alpha_r3, alpha_a0,
128 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r3]));
129 alpha_ldq(code, alpha_r4, alpha_a0,
130 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r4]));
131 alpha_ldq(code, alpha_r5, alpha_a0,
132 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r5]));
133 alpha_ldq(code, alpha_r6, alpha_a0,
134 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r6]));
135 alpha_ldq(code, alpha_r7, alpha_a0,
136 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r7]));
137 alpha_ldq(code, alpha_r8, alpha_a0,
138 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r8]));
139 alpha_ldq(code, alpha_r9, alpha_a0,
140 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r9]));
141 alpha_ldq(code, alpha_r10, alpha_a0,
142 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r10]));
143 alpha_ldq(code, alpha_r11, alpha_a0,
144 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r11]));
145 alpha_ldq(code, alpha_r12, alpha_a0,
146 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r12]));
147 alpha_ldq(code, alpha_r13, alpha_a0,
148 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r13]));
149 alpha_ldq(code, alpha_r14, alpha_a0,
150 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r14]));
151 alpha_ldq(code, alpha_r15, alpha_a0,
152 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r15]));
153 alpha_ldq(code, alpha_r22, alpha_a0,
154 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r22]));
155 alpha_ldq(code, alpha_r23, alpha_a0,
156 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r23]));
157 alpha_ldq(code, alpha_r24, alpha_a0,
158 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r24]));
159 alpha_ldq(code, alpha_r25, alpha_a0,
160 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r25]));
161 alpha_ldq(code, alpha_r26, alpha_a0,
162 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r26]));
163 alpha_ldq(code, alpha_r27, alpha_a0,
164 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r27]));
165 alpha_ldq(code, alpha_r28, alpha_a0,
166 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r28]));
167 alpha_ldq(code, alpha_r29, alpha_a0,
168 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r29]));
170 alpha_mov1(code, alpha_a1, alpha_pv);
172 /* call the handler */
173 alpha_jsr(code, alpha_ra, alpha_pv, 0);
175 /* restore saved regs */
176 alpha_ldq(code, alpha_r1, alpha_sp, (16+(8*0)));
177 alpha_ldq(code, alpha_r2, alpha_sp, (16+(8*1)));
178 alpha_ldq(code, alpha_r3, alpha_sp, (16+(8*2)));
179 alpha_ldq(code, alpha_r4, alpha_sp, (16+(8*3)));
180 alpha_ldq(code, alpha_r5, alpha_sp, (16+(8*4)));
181 alpha_ldq(code, alpha_r6, alpha_sp, (16+(8*5)));
182 alpha_ldq(code, alpha_r7, alpha_sp, (16+(8*6)));
183 alpha_ldq(code, alpha_r8, alpha_sp, (16+(8*7)));
184 alpha_ldq(code, alpha_r9, alpha_sp, (16+(8*8)));
185 alpha_ldq(code, alpha_r10, alpha_sp, (16+(8*9)));
186 alpha_ldq(code, alpha_r11, alpha_sp, (16+(8*10)));
187 alpha_ldq(code, alpha_r12, alpha_sp, (16+(8*11)));
188 alpha_ldq(code, alpha_r13, alpha_sp, (16+(8*12)));
189 alpha_ldq(code, alpha_r14, alpha_sp, (16+(8*13)));
190 alpha_ldq(code, alpha_r22, alpha_sp, (16+(8*14)));
191 alpha_ldq(code, alpha_r23, alpha_sp, (16+(8*15)));
192 alpha_ldq(code, alpha_r24, alpha_sp, (16+(8*16)));
193 alpha_ldq(code, alpha_r25, alpha_sp, (16+(8*17)));
194 alpha_ldq(code, alpha_r26, alpha_sp, (16+(8*18)));
195 alpha_ldq(code, alpha_r27, alpha_sp, (16+(8*19)));
196 alpha_ldq(code, alpha_r28, alpha_sp, (16+(8*20)));
197 alpha_ldq(code, alpha_r29, alpha_sp, (16+(8*21)));
199 alpha_ldq(code, alpha_ra, alpha_sp, 0);
200 alpha_ldq(code, alpha_fp, alpha_sp, 8);
201 alpha_lda(code, alpha_sp, alpha_sp, (8*25)); // Save 22 regs + RA, FP
203 alpha_ret(code, alpha_ra, 1);
205 inited = TRUE;
207 g_assert (( ((char *)code) - (char *)start_code) < 128 * 4);
209 return start_code;
212 /*========================= End of Function ========================*/
214 /*------------------------------------------------------------------*/
215 /* */
216 /* Name - arch_get_throw_exception */
217 /* */
218 /* Function - Return a function pointer which can be used to */
219 /* raise exceptions. The returned function has the */
220 /* following signature: */
221 /* void (*func) (MonoException *exc); */
222 /* */
223 /*------------------------------------------------------------------*/
225 static void throw_exception(MonoException *exc, unsigned long RA,
226 unsigned long *SP, unsigned long rethrow)
228 static void (*restore_context) (MonoContext *);
229 MonoContext ctx;
230 unsigned long *LSP = SP - 24;
232 //g_print("ALPHA: throw_exception - Exc: %p, RA: %0lX, SP: %p\n",
233 // exc, RA, SP);
235 if (!restore_context)
236 restore_context = mono_arch_get_restore_context ();
238 // Save stored regs into context
239 ctx.uc_mcontext.sc_regs[alpha_r0] = LSP[0];
240 ctx.uc_mcontext.sc_regs[alpha_r1] = LSP[1];
241 ctx.uc_mcontext.sc_regs[alpha_r2] = LSP[2];
242 ctx.uc_mcontext.sc_regs[alpha_r3] = LSP[3];
243 ctx.uc_mcontext.sc_regs[alpha_r4] = LSP[4];
244 ctx.uc_mcontext.sc_regs[alpha_r5] = LSP[5];
245 ctx.uc_mcontext.sc_regs[alpha_r6] = LSP[6];
246 ctx.uc_mcontext.sc_regs[alpha_r7] = LSP[7];
247 ctx.uc_mcontext.sc_regs[alpha_r8] = LSP[8];
248 ctx.uc_mcontext.sc_regs[alpha_r9] = LSP[9];
249 ctx.uc_mcontext.sc_regs[alpha_r10] = LSP[10];
250 ctx.uc_mcontext.sc_regs[alpha_r11] = LSP[11];
251 ctx.uc_mcontext.sc_regs[alpha_r12] = LSP[12];
252 ctx.uc_mcontext.sc_regs[alpha_r13] = LSP[13];
253 ctx.uc_mcontext.sc_regs[alpha_r14] = LSP[14];
254 ctx.uc_mcontext.sc_regs[alpha_r15] = LSP[15];
255 ctx.uc_mcontext.sc_regs[alpha_r22] = LSP[16];
256 ctx.uc_mcontext.sc_regs[alpha_r23] = LSP[17];
257 ctx.uc_mcontext.sc_regs[alpha_r24] = LSP[18];
258 ctx.uc_mcontext.sc_regs[alpha_r25] = LSP[19];
259 ctx.uc_mcontext.sc_regs[alpha_r26] = LSP[20];
260 ctx.uc_mcontext.sc_regs[alpha_r27] = LSP[21];
261 ctx.uc_mcontext.sc_regs[alpha_r28] = LSP[22];
262 ctx.uc_mcontext.sc_regs[alpha_r29] = LSP[23];
264 ctx.uc_mcontext.sc_regs[alpha_r30] = (unsigned long)SP;
265 ctx.uc_mcontext.sc_pc = RA;
267 if (mono_object_isinst (exc, mono_defaults.exception_class))
269 MonoException *mono_ex = (MonoException*)exc;
270 if (!rethrow)
271 mono_ex->stack_trace = NULL;
274 mono_handle_exception (&ctx, exc, (gpointer)RA, FALSE);
276 restore_context(&ctx);
278 g_assert_not_reached ();
282 ** This trampoline code is called from the code as action on
283 ** throw opcode. It should save all necessary regs somethere and
284 ** call the C function to do the rest.
285 ** For Alpha trampoline code should allocate space on stack and
286 ** save all registers into it. Then call "throw_exception"
287 ** function with "exc" info and saved registers. The "throw_exception"
288 ** should handle the rest. The "throw_exception" has signature
289 ** void (*throw_exception)(MonoException *, long PC, long SP)
290 ** The stack layout is:
291 ** R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13, R14,
292 ** R15, R22, R23, R24, R25, R26, R27, R28, R29
297 static gpointer
298 get_throw_trampoline (gboolean rethrow)
300 guint8 *start_code;
301 unsigned int *code;
303 start_code = mono_global_codeman_reserve (46*4);
305 code = (unsigned int *)start_code;
307 /* Exception is in a0 already */
308 alpha_mov1(code, alpha_ra, alpha_a1); // Return address
309 alpha_mov1(code, alpha_sp, alpha_a2); // Stack pointer
311 if (rethrow)
312 alpha_lda(code, alpha_a3, alpha_zero, 1);
313 else
314 alpha_mov1(code, alpha_zero, alpha_a3);
316 alpha_lda(code, alpha_sp, alpha_sp, -(24*8)); // Allocate stack for regs
318 alpha_stq(code, alpha_r0, alpha_sp, 0*8);
319 alpha_stq(code, alpha_r1, alpha_sp, 1*8);
320 alpha_stq(code, alpha_r2, alpha_sp, 2*8);
321 alpha_stq(code, alpha_r3, alpha_sp, 3*8);
322 alpha_stq(code, alpha_r4, alpha_sp, 4*8);
323 alpha_stq(code, alpha_r5, alpha_sp, 5*8);
324 alpha_stq(code, alpha_r6, alpha_sp, 6*8);
325 alpha_stq(code, alpha_r7, alpha_sp, 7*8);
326 alpha_stq(code, alpha_r8, alpha_sp, 8*8);
327 alpha_stq(code, alpha_r9, alpha_sp, 9*8);
328 alpha_stq(code, alpha_r10, alpha_sp, 10*8);
329 alpha_stq(code, alpha_r11, alpha_sp, 11*8);
330 alpha_stq(code, alpha_r12, alpha_sp, 12*8);
331 alpha_stq(code, alpha_r13, alpha_sp, 13*8);
332 alpha_stq(code, alpha_r14, alpha_sp, 14*8);
333 alpha_stq(code, alpha_r15, alpha_sp, 15*8);
334 alpha_stq(code, alpha_r22, alpha_sp, 16*8);
335 alpha_stq(code, alpha_r23, alpha_sp, 17*8);
336 alpha_stq(code, alpha_r24, alpha_sp, 18*8);
337 alpha_stq(code, alpha_r25, alpha_sp, 19*8);
338 alpha_stq(code, alpha_r26, alpha_sp, 20*8);
339 alpha_stq(code, alpha_r27, alpha_sp, 21*8);
340 alpha_stq(code, alpha_r28, alpha_sp, 22*8);
341 alpha_stq(code, alpha_r29, alpha_sp, 23*8);
343 alpha_mov1(code, alpha_zero, alpha_pv);
344 alpha_lda(code, alpha_r1, alpha_zero,
345 ((unsigned long)throw_exception)&0xFFFF);
346 alpha_lda(code, alpha_r2, alpha_zero,
347 (((unsigned long)throw_exception) >> 16)&0xFFFF);
348 alpha_lda(code, alpha_r3, alpha_zero,
349 (((unsigned long)throw_exception) >> 32)&0xFFFF);
350 alpha_lda(code, alpha_r4, alpha_zero,
351 (((unsigned long)throw_exception) >> 48)&0xFFFF);
352 alpha_zapnot_(code, alpha_r1, 0x3, alpha_r1);
353 alpha_bis(code, alpha_r1, alpha_pv, alpha_pv);
355 alpha_zapnot_(code, alpha_r2, 0x3, alpha_r2);
356 alpha_sll_(code, alpha_r2, 16, alpha_r2);
357 alpha_bis(code, alpha_r2, alpha_pv, alpha_pv);
359 alpha_zapnot_(code, alpha_r3, 0x3, alpha_r3);
360 alpha_sll_(code, alpha_r3, 32, alpha_r3);
361 alpha_bis(code, alpha_r3, alpha_pv, alpha_pv);
363 alpha_zapnot_(code, alpha_r4, 0x3, alpha_r4);
364 alpha_sll_(code, alpha_r4, 48, alpha_r4);
365 alpha_bis(code, alpha_r4, alpha_pv, alpha_pv); // pv - handle_exception addr
367 alpha_jmp(code, alpha_zero, alpha_pv, 0);
369 // alpha_break(code);
371 g_assert (( ((char *)code) - (char *)start_code) < 46 * 4);
373 return start_code;
377 * mono_arch_get_throw_exception:
379 * Returns a function pointer which can be used to raise
380 * exceptions. The returned function has the following
381 * signature: void (*func) (MonoException *exc);
384 gpointer
385 mono_arch_get_throw_exception (void)
387 static guint8* start;
388 static gboolean inited = FALSE;
390 ALPHA_DEBUG("mono_arch_get_throw_exception");
392 if (inited)
393 return start;
395 start = get_throw_trampoline (FALSE);
397 inited = TRUE;
399 return start;
401 /*========================= End of Function ========================*/
405 * mono_arch_get_throw_corlib_exception:
407 * Returns a function pointer which can be used to raise
408 * corlib exceptions. The returned function has the following
409 * signature: void (*func) (guint32 ex_token, guint32 offset);
410 * Here, offset is the offset which needs to be substracted from the caller IP
411 * to get the IP of the throw. Passing the offset has the advantage that it
412 * needs no relocations in the caller.
414 gpointer
415 mono_arch_get_throw_corlib_exception (void)
417 static guint8* start;
418 static gboolean inited = FALSE;
419 unsigned int *code;
420 guint64 throw_ex;
422 ALPHA_DEBUG("mono_arch_get_throw_corlib_exception");
424 if (inited)
425 return start;
427 start = mono_global_codeman_reserve (512);
429 code = (unsigned int *)start;
430 // Logic
431 // Expect exception token as parameter
432 // call mono_exception_from_token(void *, uint32 token)
433 // Get result and call "throw_ex" (got from mono_arch_get_throw_exception)
434 // Throw exception
436 // The trampoline code will be called with PV set
437 // so expect correct ABI handling
439 //alpha_ldah(code, alpha_gp, alpha_pv, 0);
440 //alpha_lda(code, alpha_gp, alpha_gp, 0);
441 alpha_lda(code, alpha_sp, alpha_sp, -(8*4));
443 // Save caller GP
444 alpha_stq(code, alpha_gp, alpha_sp, 24);
446 /* store call convention parameters on stack */
447 alpha_stq( code, alpha_ra, alpha_sp, 0 ); // ra
448 alpha_stq( code, alpha_fp, alpha_sp, 8 ); // fp
450 /* set the frame pointer */
451 alpha_mov1(code, alpha_sp, alpha_fp );
453 // Store throw_ip offset
454 alpha_stq(code, alpha_a1, alpha_fp, 16);
456 // Prepare to call "mono_exception_from_token (MonoImage *image, guint32 token)"
457 // Move token to a1 reg
458 alpha_mov1(code, alpha_a0, alpha_a1);
460 alpha_mov1(code, alpha_zero, alpha_a0);
461 alpha_lda(code, alpha_r1, alpha_zero,
462 ((unsigned long)mono_defaults.exception_class->image)&0xFFFF);
463 alpha_lda(code, alpha_r2, alpha_zero,
464 (((unsigned long)mono_defaults.exception_class->image) >> 16)&0xFFFF);
465 alpha_lda(code, alpha_r3, alpha_zero,
466 (((unsigned long)mono_defaults.exception_class->image) >> 32)&0xFFFF);
467 alpha_lda(code, alpha_r4, alpha_zero,
468 (((unsigned long)mono_defaults.exception_class->image) >> 48)&0xFFFF);
469 alpha_zapnot_(code, alpha_r1, 0x3, alpha_r1);
470 alpha_bis(code, alpha_r1, alpha_a0, alpha_a0);
472 alpha_zapnot_(code, alpha_r2, 0x3, alpha_r2);
473 alpha_sll_(code, alpha_r2, 16, alpha_r2);
474 alpha_bis(code, alpha_r2, alpha_a0, alpha_a0);
476 alpha_zapnot_(code, alpha_r3, 0x3, alpha_r3);
477 alpha_sll_(code, alpha_r3, 32, alpha_r3);
478 alpha_bis(code, alpha_r3, alpha_a0, alpha_a0);
480 alpha_zapnot_(code, alpha_r4, 0x3, alpha_r4);
481 alpha_sll_(code, alpha_r4, 48, alpha_r4);
482 alpha_bis(code, alpha_r4, alpha_a0, alpha_a0); // a0 - mono_defaults.exception_class->image
484 alpha_mov1(code, alpha_zero, alpha_pv);
485 alpha_lda(code, alpha_r1, alpha_zero,
486 ((unsigned long)mono_exception_from_token)&0xFFFF);
487 alpha_lda(code, alpha_r2, alpha_zero,
488 (((unsigned long)mono_exception_from_token) >> 16)&0xFFFF);
489 alpha_lda(code, alpha_r3, alpha_zero,
490 (((unsigned long)mono_exception_from_token) >> 32)&0xFFFF);
491 alpha_lda(code, alpha_r4, alpha_zero,
492 (((unsigned long)mono_exception_from_token) >> 48)&0xFFFF);
493 alpha_zapnot_(code, alpha_r1, 0x3, alpha_r1);
494 alpha_bis(code, alpha_r1, alpha_pv, alpha_pv);
496 alpha_zapnot_(code, alpha_r2, 0x3, alpha_r2);
497 alpha_sll_(code, alpha_r2, 16, alpha_r2);
498 alpha_bis(code, alpha_r2, alpha_pv, alpha_pv);
500 alpha_zapnot_(code, alpha_r3, 0x3, alpha_r3);
501 alpha_sll_(code, alpha_r3, 32, alpha_r3);
502 alpha_bis(code, alpha_r3, alpha_pv, alpha_pv);
504 alpha_zapnot_(code, alpha_r4, 0x3, alpha_r4);
505 alpha_sll_(code, alpha_r4, 48, alpha_r4);
506 alpha_bis(code, alpha_r4, alpha_pv, alpha_pv); // pv - mono_exception_from_token addr
508 alpha_jsr(code, alpha_ra, alpha_pv, 0);
510 // R0 holds pointer to initialised exception object
512 throw_ex = (guint64)mono_arch_get_throw_exception ();
514 alpha_mov1(code, alpha_r0, alpha_a0);
516 // Calc return address
517 alpha_mov1(code, alpha_fp, alpha_sp);
518 alpha_ldq(code, alpha_ra, alpha_sp, 0);
519 alpha_ldq(code, alpha_fp, alpha_sp, 8);
520 alpha_ldq(code, alpha_a1, alpha_sp, 16);
521 alpha_addq(code, alpha_ra, alpha_a1, alpha_ra);
522 alpha_ldq(code, alpha_gp, alpha_sp, 24);
524 // Modify stack to point to exception caller
525 alpha_lda(code, alpha_sp, alpha_sp, (8*4));
527 alpha_mov1(code, alpha_zero, alpha_pv);
528 alpha_lda(code, alpha_r1, alpha_zero,
529 ((unsigned long)throw_ex)&0xFFFF);
530 alpha_lda(code, alpha_r2, alpha_zero,
531 (((unsigned long)throw_ex) >> 16)&0xFFFF);
532 alpha_lda(code, alpha_r3, alpha_zero,
533 (((unsigned long)throw_ex) >> 32)&0xFFFF);
534 alpha_lda(code, alpha_r4, alpha_zero,
535 (((unsigned long)throw_ex) >> 48)&0xFFFF);
536 alpha_zapnot_(code, alpha_r1, 0x3, alpha_r1);
537 alpha_bis(code, alpha_r1, alpha_pv, alpha_pv);
539 alpha_zapnot_(code, alpha_r2, 0x3, alpha_r2);
540 alpha_sll_(code, alpha_r2, 16, alpha_r2);
541 alpha_bis(code, alpha_r2, alpha_pv, alpha_pv);
543 alpha_zapnot_(code, alpha_r3, 0x3, alpha_r3);
544 alpha_sll_(code, alpha_r3, 32, alpha_r3);
545 alpha_bis(code, alpha_r3, alpha_pv, alpha_pv);
547 alpha_zapnot_(code, alpha_r4, 0x3, alpha_r4);
548 alpha_sll_(code, alpha_r4, 48, alpha_r4);
549 alpha_bis(code, alpha_r4, alpha_pv, alpha_pv); // pv - handle_exception addr
551 alpha_jmp(code, alpha_zero, alpha_pv, 0);
553 g_assert (((char *)code - (char *)start) < 512);
555 inited = TRUE;
557 return start;
560 /*------------------------------------------------------------------*/
561 /* */
562 /* Name - mono_arch_handle_exception */
563 /* */
564 /* Function - Handle an exception raised by the JIT code. */
565 /* */
566 /* Parameters - ctx - Saved processor state */
567 /* obj - The exception object */
568 /* test_only - Only test if the exception is caught, */
569 /* but don't call handlers */
570 /* */
571 /*------------------------------------------------------------------*/
573 gboolean
574 mono_arch_handle_exception (void *uc, gpointer obj, gboolean test_only)
576 ALPHA_DEBUG("mono_arch_handle_exception");
578 return mono_handle_exception (uc, obj, mono_arch_ip_from_context(uc),
579 test_only);
582 /*========================= End of Function ========================*/
584 /*------------------------------------------------------------------*/
585 /* */
586 /* Name - mono_arch_get_restore_context */
587 /* */
588 /* Function - Return the address of the routine that will rest- */
589 /* ore the context. */
590 /* */
591 /*------------------------------------------------------------------*/
593 gpointer
594 mono_arch_get_restore_context ()
596 static guint8 *start_code = NULL;
597 static gboolean inited = FALSE;
598 unsigned int *code;
600 ALPHA_DEBUG("mono_arch_get_restore_context");
602 if (inited)
603 return start_code;
605 /* restore_contect (MonoContext *ctx) */
607 start_code = mono_global_codeman_reserve (30*4);
609 code = (unsigned int *)start_code;
611 alpha_ldq(code, alpha_r0, alpha_a0,
612 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r0]));
613 alpha_ldq(code, alpha_r1, alpha_a0,
614 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r1]));
615 alpha_ldq(code, alpha_r2, alpha_a0,
616 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r2]));
617 alpha_ldq(code, alpha_r3, alpha_a0,
618 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r3]));
619 alpha_ldq(code, alpha_r4, alpha_a0,
620 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r4]));
621 alpha_ldq(code, alpha_r5, alpha_a0,
622 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r5]));
623 alpha_ldq(code, alpha_r6, alpha_a0,
624 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r6]));
625 alpha_ldq(code, alpha_r7, alpha_a0,
626 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r7]));
627 alpha_ldq(code, alpha_r8, alpha_a0,
628 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r8]));
629 alpha_ldq(code, alpha_r9, alpha_a0,
630 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r9]));
631 alpha_ldq(code, alpha_r10, alpha_a0,
632 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r10]));
633 alpha_ldq(code, alpha_r11, alpha_a0,
634 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r11]));
635 alpha_ldq(code, alpha_r12, alpha_a0,
636 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r12]));
637 alpha_ldq(code, alpha_r13, alpha_a0,
638 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r13]));
639 alpha_ldq(code, alpha_r14, alpha_a0,
640 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r14]));
641 alpha_ldq(code, alpha_r15, alpha_a0,
642 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r15]));
643 alpha_ldq(code, alpha_r22, alpha_a0,
644 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r22]));
645 alpha_ldq(code, alpha_r23, alpha_a0,
646 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r23]));
647 alpha_ldq(code, alpha_r24, alpha_a0,
648 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r24]));
649 alpha_ldq(code, alpha_r25, alpha_a0,
650 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r25]));
651 alpha_ldq(code, alpha_r26, alpha_a0,
652 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r26]));
653 alpha_ldq(code, alpha_r27, alpha_a0,
654 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r27]));
655 alpha_ldq(code, alpha_r28, alpha_a0,
656 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r28]));
657 alpha_ldq(code, alpha_r29, alpha_a0,
658 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r29]));
659 alpha_ldq(code, alpha_r30, alpha_a0,
660 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_regs[alpha_r30]));
662 alpha_ldq(code, alpha_ra, alpha_a0,
663 G_STRUCT_OFFSET(MonoContext, uc_mcontext.sc_pc));
665 alpha_ret(code, alpha_ra, 1);
667 inited = TRUE;
669 return start_code;
672 /*========================= End of Function ========================*/
674 /*------------------------------------------------------------------*/
675 /* */
676 /* Name - mono_arch_ip_from_context */
677 /* */
678 /* Function - Return the instruction pointer from the context. */
679 /* */
680 /* Parameters - sigctx - Saved processor state */
681 /* */
682 /*------------------------------------------------------------------*/
684 gpointer
685 mono_arch_ip_from_context (void *sigctx)
687 gpointer ip;
688 ALPHA_DEBUG("mono_arch_ip_from_context");
690 ip = (gpointer) MONO_CONTEXT_GET_IP(((MonoContext *) sigctx));
692 printf("ip_from_context = %p\n", ip);
694 return ip;
698 /*========================= End of Function ========================*/
700 /*------------------------------------------------------------------*/
701 /* */
702 /* Name - arch_get_rethrow_exception */
703 /* */
704 /* Function - Return a function pointer which can be used to */
705 /* raise exceptions. The returned function has the */
706 /* following signature: */
707 /* void (*func) (MonoException *exc); */
708 /* */
709 /*------------------------------------------------------------------*/
711 gpointer
712 mono_arch_get_rethrow_exception (void)
714 static guint8 *start;
715 static int inited = 0;
717 ALPHA_DEBUG("mono_arch_get_rethrow_exception");
719 if (inited)
720 return start;
722 start = get_throw_trampoline (TRUE);
724 inited = 1;
726 return start;
729 /*========================= End of Function ========================*/
731 /*------------------------------------------------------------------*/
732 /* */
733 /* Name - arch_get_throw_exception_by_name */
734 /* */
735 /* Function - Return a function pointer which can be used to */
736 /* raise corlib exceptions. The return function has */
737 /* the following signature: */
738 /* void (*func) (char *exc_name); */
739 /* */
740 /*------------------------------------------------------------------*/
742 gpointer
743 mono_arch_get_throw_exception_by_name (void)
745 static guint8 *start;
746 static int inited = 0;
747 unsigned int *code;
749 if (inited)
750 return start;
752 start = mono_global_codeman_reserve (SZ_THROW);
753 // get_throw_exception_generic (start, SZ_THROW, TRUE, FALSE);
754 inited = 1;
756 code = (unsigned int *)start;
758 alpha_call_pal(code, 0x80);
760 return start;
762 /*========================= End of Function ========================*/
764 /*------------------------------------------------------------------*/
765 /* */
766 /* Name - mono_arch_find_jit_info */
767 /* */
768 /* Function - This function is used to gather informatoin from */
769 /* @ctx. It returns the MonoJitInfo of the corres- */
770 /* ponding function, unwinds one stack frame and */
771 /* stores the resulting context into @new_ctx. It */
772 /* also stores a string describing the stack location*/
773 /* into @trace (if not NULL), and modifies the @lmf */
774 /* if necessary. @native_offset returns the IP off- */
775 /* set from the start of the function or -1 if that */
776 /* information is not available. */
777 /* */
778 /*------------------------------------------------------------------*/
780 MonoJitInfo *
781 mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
782 MonoJitInfo *res, MonoJitInfo *prev_ji,
783 MonoContext *ctx,
784 MonoContext *new_ctx, MonoLMF **lmf,
785 mgreg_t **save_locations,
786 gboolean *managed)
788 MonoJitInfo *ji;
789 int i;
790 gpointer ip = MONO_CONTEXT_GET_IP (ctx);
792 ALPHA_DEBUG("mono_arch_find_jit_info");
794 /* Avoid costly table lookup during stack overflow */
795 if (prev_ji &&
796 (ip > prev_ji->code_start &&
797 ((guint8*)ip < ((guint8*)prev_ji->code_start) + prev_ji->code_size)))
798 ji = prev_ji;
799 else
800 ji = mini_jit_info_table_find (domain, ip, NULL);
802 if (managed)
803 *managed = FALSE;
805 if (ji != NULL)
807 int offset;
808 gboolean omit_fp = 0; //(ji->used_regs & (1 << 31)) > 0;
810 *new_ctx = *ctx;
812 if (managed)
813 if (!ji->method->wrapper_type)
814 *managed = TRUE;
817 * Some managed methods like pinvoke wrappers might have save_lmf set.
818 * In this case, register save/restore code is not generated by the
819 * JIT, so we have to restore callee saved registers from the lmf.
822 if (ji->method->save_lmf)
825 * We only need to do this if the exception was raised in managed
826 * code, since otherwise the lmf was already popped of the stack.
828 if (*lmf && ((*lmf) != jit_tls->first_lmf) &&
829 (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp))
831 new_ctx->uc_mcontext.sc_regs[alpha_fp] = (*lmf)->ebp;
832 new_ctx->uc_mcontext.sc_regs[alpha_sp] = (*lmf)->rsp;
833 new_ctx->uc_mcontext.sc_regs[alpha_gp] = (*lmf)->rgp;
836 new_ctx->rbp = (*lmf)->ebp;
837 new_ctx->rbx = (*lmf)->rbx;
838 new_ctx->rsp = (*lmf)->rsp;
839 new_ctx->r12 = (*lmf)->r12;
840 new_ctx->r13 = (*lmf)->r13;
841 new_ctx->r14 = (*lmf)->r14;
842 new_ctx->r15 = (*lmf)->r15;
846 else
848 offset = omit_fp ? 0 : 2;
850 /* restore caller saved registers */
851 for (i = 0; i < MONO_MAX_IREGS; i++)
852 if (ALPHA_IS_CALLEE_SAVED_REG(i) &&
853 (ji->used_regs & (1 << i)))
856 guint64 reg;
857 #if 0
858 if (omit_fp)
860 reg = *((guint64*)ctx->rsp + offset);
861 offset++;
863 else
865 //reg = *((guint64 *)ctx->SC_EBP + offset);
866 //offset--;
869 switch (i)
871 case AMD64_RBX:
872 new_ctx->rbx = reg;
873 break;
874 case AMD64_R12:
875 new_ctx->r12 = reg;
876 break;
877 case AMD64_R13:
878 new_ctx->r13 = reg;
879 break;
880 case AMD64_R14:
881 new_ctx->r14 = reg;
882 break;
883 case AMD64_R15:
884 new_ctx->r15 = reg;
885 break;
886 case AMD64_RBP:
887 new_ctx->rbp = reg;
888 break;
889 default:
890 g_assert_not_reached ();
892 #endif
896 if (*lmf && ((*lmf) != jit_tls->first_lmf) &&
897 (MONO_CONTEXT_GET_SP (ctx) >= (gpointer)(*lmf)->rsp)) {
898 /* remove any unused lmf */
899 *lmf = (*lmf)->previous_lmf;
902 #if 0
903 if (omit_fp)
905 /* Pop frame */
906 new_ctx->rsp += (ji->used_regs >> 16) & (0x7fff);
907 new_ctx->SC_EIP = *((guint64 *)new_ctx->rsp) - 1;
908 /* Pop return address */
909 new_ctx->rsp += 8;
911 else
912 #endif
915 /* Pop FP and the RA */
916 /* Some how we should find size of frame. One way:
917 read 3rd instruction (alpha_lda(alpha_sp, alpha_sp, -stack_size ))
918 and extract "stack_size" from there
919 read 4th and 5th insts to get offsets to saved RA & FP
921 unsigned int *code = (unsigned int *)ji->code_start;
922 short stack_size = -((short)(code[2] & 0xFFFF));
923 short ra_off = code[3] & 0xFFFF;
924 short fp_off = code[4] & 0xFFFF;
926 /* Restore stack - value of FP reg + stack_size */
927 new_ctx->uc_mcontext.sc_regs[alpha_sp] =
928 ctx->uc_mcontext.sc_regs[alpha_r15] + stack_size;
930 /* we substract 1, so that the IP points into the call instruction */
931 /* restore PC - @FP + 0 */
932 new_ctx->uc_mcontext.sc_pc =
933 *((guint64 *)(ctx->uc_mcontext.sc_regs[alpha_r15] + ra_off));
935 /* Restore FP reg - @FP + 8 */
936 new_ctx->uc_mcontext.sc_regs[alpha_r15] =
937 *((guint64 *)(ctx->uc_mcontext.sc_regs[alpha_r15] + fp_off));
939 /* Restore GP - read two insts that restore GP from sc_pc and */
940 /* do the same. Use sc_pc as RA */
941 code = (unsigned int *)new_ctx->uc_mcontext.sc_pc;
942 if ((code[0] & 0xFFFF0000) == 0x27ba0000 && // ldah gp,high_off(ra)
943 (code[1] & 0xFFFF0000) == 0x23bd0000) // lda gp,low_off(gp)
945 short high_off = (short)(code[0] & 0xFFFF);
946 short low_off = (short)(code[1] & 0xFFFF);
948 long rgp = new_ctx->uc_mcontext.sc_pc +
949 (65536 * high_off) + low_off;
951 new_ctx->uc_mcontext.sc_regs[alpha_gp] = rgp;
955 #if 0
956 /* Pop arguments off the stack */
957 // No poping args off stack on Alpha
958 // We use fixed place
960 MonoJitArgumentInfo *arg_info =
961 g_newa (MonoJitArgumentInfo,
962 mono_method_signature (ji->method)->param_count + 1);
964 guint32 stack_to_pop =
965 mono_arch_get_argument_info (mono_method_signature (ji->method),
966 mono_method_signature (ji->method)->param_count,
967 arg_info);
968 new_ctx->uc_mcontext.sc_regs[alpha_sp] += stack_to_pop;
970 #endif
971 return ji;
973 else if (*lmf)
975 // Unwind based on LMF info
976 if (!(*lmf)->method)
977 return (gpointer)-1;
979 if ((ji = mini_jit_info_table_find (domain, (gpointer)(*lmf)->eip, NULL))) {
980 } else {
981 memset (res, 0, MONO_SIZEOF_JIT_INFO);
982 res->method = (*lmf)->method;
985 new_ctx->uc_mcontext.sc_regs[alpha_fp] = (*lmf)->ebp;
986 new_ctx->uc_mcontext.sc_regs[alpha_sp] = (*lmf)->rsp;
987 new_ctx->uc_mcontext.sc_regs[alpha_gp] = (*lmf)->rgp;
988 new_ctx->uc_mcontext.sc_pc = (*lmf)->eip;
990 *lmf = (*lmf)->previous_lmf;
992 return ji ? ji : res;
995 return NULL;
998 /*========================= End of Function ========================*/