(+cflags): Append to this instead of CFLAGS.
[glibc.git] / sysdeps / alpha / addmul_1.s
blob46d277df6ea9d93e73c6e4bb43a0972e6e52f37a
1 # Alpha 21064 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
2 # the result to a second limb vector.
4 # Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
6 # This file is part of the GNU MP Library.
8 # The GNU MP Library is free software; you can redistribute it and/or modify
9 # it under the terms of the GNU Library General Public License as published by
10 # the Free Software Foundation; either version 2 of the License, or (at your
11 # option) any later version.
13 # The GNU MP Library is distributed in the hope that it will be useful, but
14 # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
16 # License for more details.
18 # You should have received a copy of the GNU Library General Public License
19 # along with the GNU MP Library; see the file COPYING.LIB. If not, write to
20 # the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 # INPUT PARAMETERS
24 # res_ptr r16
25 # s1_ptr r17
26 # size r18
27 # s2_limb r19
29 # This code runs at 42 cycles/limb on the 21064.
31 # To improve performance for long multiplications, we would use
32 # 'fetch' for S1 and 'fetch_m' for RES. It's not obvious how to use
33 # these instructions without slowing down the general code: 1. We can
34 # only have two prefetches in operation at any time in the Alpha
35 # architecture. 2. There will seldom be any special alignment
36 # between RES_PTR and S1_PTR. Maybe we can simply divide the current
37 # loop into an inner and outer loop, having the inner loop handle
38 # exactly one prefetch block?
40 .set noreorder
41 .set noat
42 .text
43 .align 3
44 .globl __mpn_addmul_1
45 .ent __mpn_addmul_1 2
46 __mpn_addmul_1:
47 .frame $30,0,$26
49 ldq $2,0($17) # $2 = s1_limb
50 addq $17,8,$17 # s1_ptr++
51 subq $18,1,$18 # size--
52 mulq $2,$19,$3 # $3 = prod_low
53 ldq $5,0($16) # $5 = *res_ptr
54 umulh $2,$19,$0 # $0 = prod_high
55 beq $18,Lend1 # jump if size was == 1
56 ldq $2,0($17) # $2 = s1_limb
57 addq $17,8,$17 # s1_ptr++
58 subq $18,1,$18 # size--
59 addq $5,$3,$3
60 cmpult $3,$5,$4
61 stq $3,0($16)
62 addq $16,8,$16 # res_ptr++
63 beq $18,Lend2 # jump if size was == 2
65 .align 3
66 Loop: mulq $2,$19,$3 # $3 = prod_low
67 ldq $5,0($16) # $5 = *res_ptr
68 addq $4,$0,$0 # cy_limb = cy_limb + 'cy'
69 subq $18,1,$18 # size--
70 umulh $2,$19,$4 # $4 = cy_limb
71 ldq $2,0($17) # $2 = s1_limb
72 addq $17,8,$17 # s1_ptr++
73 addq $3,$0,$3 # $3 = cy_limb + prod_low
74 cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low)
75 addq $5,$3,$3
76 cmpult $3,$5,$5
77 stq $3,0($16)
78 addq $16,8,$16 # res_ptr++
79 addq $5,$0,$0 # combine carries
80 bne $18,Loop
82 Lend2: mulq $2,$19,$3 # $3 = prod_low
83 ldq $5,0($16) # $5 = *res_ptr
84 addq $4,$0,$0 # cy_limb = cy_limb + 'cy'
85 umulh $2,$19,$4 # $4 = cy_limb
86 addq $3,$0,$3 # $3 = cy_limb + prod_low
87 cmpult $3,$0,$0 # $0 = carry from (cy_limb + prod_low)
88 addq $5,$3,$3
89 cmpult $3,$5,$5
90 stq $3,0($16)
91 addq $5,$0,$0 # combine carries
92 addq $4,$0,$0 # cy_limb = prod_high + cy
93 ret $31,($26),1
94 Lend1: addq $5,$3,$3
95 cmpult $3,$5,$5
96 stq $3,0($16)
97 addq $0,$5,$0
98 ret $31,($26),1
100 .end __mpn_addmul_1