Update.
[glibc.git] / sysdeps / hppa / hppa1.1 / submul_1.s
bloba4a385467e88bf12e7dea02de4e51a13f1b00ef8
1 ; HP-PA-1.1 __mpn_submul_1 -- Multiply a limb vector with a limb and
2 ; subtract the result from a second limb vector.
4 ; Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
6 ; This file is part of the GNU MP Library.
8 ; The GNU MP Library is free software; you can redistribute it and/or modify
9 ; it under the terms of the GNU Library General Public License as published by
10 ; the Free Software Foundation; either version 2 of the License, or (at your
11 ; option) any later version.
13 ; The GNU MP Library is distributed in the hope that it will be useful, but
14 ; WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 ; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
16 ; License for more details.
18 ; You should have received a copy of the GNU Library General Public License
19 ; along with the GNU MP Library; see the file COPYING.LIB. If not, write to
20 ; the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
21 ; MA 02111-1307, USA.
24 ; INPUT PARAMETERS
25 ; res_ptr r26
26 ; s1_ptr r25
27 ; size r24
28 ; s2_limb r23
30 ; This runs at 12 cycles/limb on a PA7000. With the used instructions, it
31 ; can not become faster due to data cache contention after a store. On the
32 ; PA7100 it runs at 11 cycles/limb, and that can not be improved either,
33 ; since only the xmpyu does not need the integer pipeline, so the only
34 ; dual-issue we will get are addc+xmpyu. Unrolling could gain a cycle/limb
35 ; on the PA7100.
37 ; There are some ideas described in mul_1.s that applies to this code too.
39 ; It seems possible to make this run as fast as __mpn_addmul_1, if we use
40 ; sub,>>= %r29,%r19,%r22
41 ; addi 1,%r28,%r28
42 ; but that requires reworking the hairy software pipeline...
44 .code
45 .export __mpn_submul_1
46 __mpn_submul_1
47 .proc
48 .callinfo frame=64,no_calls
49 .entry
51 ldo 64(%r30),%r30
52 fldws,ma 4(%r25),%fr5
53 stw %r23,-16(%r30) ; move s2_limb ...
54 addib,= -1,%r24,L$just_one_limb
55 fldws -16(%r30),%fr4 ; ... into fr4
56 add %r0,%r0,%r0 ; clear carry
57 xmpyu %fr4,%fr5,%fr6
58 fldws,ma 4(%r25),%fr7
59 fstds %fr6,-16(%r30)
60 xmpyu %fr4,%fr7,%fr8
61 ldw -12(%r30),%r19 ; least significant limb in product
62 ldw -16(%r30),%r28
64 fstds %fr8,-16(%r30)
65 addib,= -1,%r24,L$end
66 ldw -12(%r30),%r1
68 ; Main loop
69 L$loop ldws 0(%r26),%r29
70 fldws,ma 4(%r25),%fr5
71 sub %r29,%r19,%r22
72 add %r22,%r19,%r0
73 stws,ma %r22,4(%r26)
74 addc %r28,%r1,%r19
75 xmpyu %fr4,%fr5,%fr6
76 ldw -16(%r30),%r28
77 fstds %fr6,-16(%r30)
78 addc %r0,%r28,%r28
79 addib,<> -1,%r24,L$loop
80 ldw -12(%r30),%r1
82 L$end ldw 0(%r26),%r29
83 sub %r29,%r19,%r22
84 add %r22,%r19,%r0
85 stws,ma %r22,4(%r26)
86 addc %r28,%r1,%r19
87 ldw -16(%r30),%r28
88 ldws 0(%r26),%r29
89 addc %r0,%r28,%r28
90 sub %r29,%r19,%r22
91 add %r22,%r19,%r0
92 stws,ma %r22,4(%r26)
93 addc %r0,%r28,%r28
94 bv 0(%r2)
95 ldo -64(%r30),%r30
97 L$just_one_limb
98 xmpyu %fr4,%fr5,%fr6
99 ldw 0(%r26),%r29
100 fstds %fr6,-16(%r30)
101 ldw -12(%r30),%r1
102 ldw -16(%r30),%r28
103 sub %r29,%r1,%r22
104 add %r22,%r1,%r0
105 stw %r22,0(%r26)
106 addc %r0,%r28,%r28
107 bv 0(%r2)
108 ldo -64(%r30),%r30
110 .exit
111 .procend