1 /* Startup code for ZPU
2 Copyright (C) 2005 Free Software Foundation, Inc.
4 This file is free software; you can redistribute it and/or modify it
5 under the terms of the GNU General Public License as published by the
6 Free Software Foundation; either version 2, or (at your option) any
9 In addition to the permissions in the GNU General Public License, the
10 Free Software Foundation gives you unlimited permission to link the
11 compiled version of this file with other programs, and to distribute
12 those programs without any restriction coming from the use of this
13 file. (The General Public License restrictions do apply in other
14 respects; for example, they cover modification of the file, and
15 distribution when not linked into another program.)
17 This file is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; see the file COPYING. If not, write to
24 the Free Software Foundation, 59 Temple Place - Suite 330,
25 Boston, MA 02111-1307, USA. */
31 ; .section ".fixed_vectors","ax"
32 ; KLUDGE!!! we remove the executable bit to avoid relaxation
33 .section ".fixed_vectors","a"
36 ; we need to align these code sections to 32 bytes, which
37 ; means we must not use any assembler instructions that are relaxed
48 im _memreg+4 ; save R1
50 im _memreg+8 ; save R2
94 ; destroy arguments on stack
101 ; poke the result into the right slot
122 ; create mask of lowest bit in A
131 add ; accumulate in C
138 ; shift A right 1 bit
153 ; intSp must be 0 when we jump to _premain
166 .globl _zpu_interrupt_vector
167 _zpu_interrupt_vector:
168 jmp ___zpu_interrupt_vector
178 /* instruction emulation code */
185 ; by not masking out bit 0, we cause a memory access error
186 ; on unaligned access
200 ; shift right addr&3 * 8
213 ; by not masking out bit 0, we cause a memory access error
214 ; on unaligned access
253 ; 0x80000000 will overflow when negated, so we need to mask
254 ; the result above with the compare positive to negative
264 ; handle case where we are comparing a negative number
265 ; and positve number. This can underflow. E.g. consider 0x8000000 < 0x1000
310 /* low: -1 if low bit dif is negative 0 otherwise: neg (not x&1 and (y&1))
311 x&1 y&1 neg (not x&1 and (y&1))
327 /* high: upper 31-bit diff is only wrong when diff is 0 and low=-1
328 high=x>>1 - y>>1 + low
333 low= neg(not 0 and 1) = 1111 (-1)
334 high=000+ neg(111) +low = 000 + 1001 + low = 1000
338 low=neg(not 1 and 0) = 0
339 high=111+neg(000) + low = 0111
358 ; if they are equal, then the last bit decides...
361 /* test if negative: result = flip(diff) & 1 */
366 ; destroy a&b which are on stack
446 ; handle signed value
452 not ; now we have an integer on the stack with the signed
453 ; bits in the right position
455 ; mask these bits with the signed bit.
466 ; stuff in the signed bits...
469 ; store result into correct stack slot
472 ; move up return value
486 ; store return address
492 pushsp ; flush internal stack
660 ; mask away destination
718 ; fetch boolean & neg mask
722 ; calc address & mask for branch
726 ; subtract 1 to find PC of branch instruction
773 ; fetch boolean & neg mask
777 ; calc address & mask for branch
781 ; find address of branch instruction
800 ; address of poppcrel
865 storesp 12 ; return address
867 pushsp ; this will flush the internal stack.
918 ; NB! this is not an EMULATE instruction. It is a varargs fn.
935 .byte (.LmoreMult-.Lbranch)&0x7f+0x80
950 .globl ___zpu_interrupt_vector
951 .weak ___zpu_interrupt_vector
953 ___zpu_interrupt_vector: