1 dnl ARM Neon mpn_lshift
and mpn_rshift.
3 dnl Contributed to the GNU project by Torbjörn Granlund.
5 dnl Copyright
2013 Free Software Foundation
, Inc.
7 dnl
This file is part of the GNU MP Library.
9 dnl The GNU MP Library is free software
; you can redistribute it and/or modify
10 dnl it under the terms of
either:
12 dnl
* the GNU Lesser General
Public License as published by the Free
13 dnl Software Foundation
; either version 3 of the License, or (at your
14 dnl option
) any later version.
18 dnl
* the GNU General
Public License as published by the Free Software
19 dnl Foundation
; either version 2 of the License, or (at your option) any
22 dnl
or both
in parallel
, as here.
24 dnl The GNU MP Library is distributed
in the hope that it will be useful
, but
25 dnl WITHOUT ANY WARRANTY
; without even the implied warranty of MERCHANTABILITY
26 dnl
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License
29 dnl You should have received copies of the GNU General
Public License
and the
30 dnl GNU Lesser General
Public License along with the GNU MP Library. If
not,
31 dnl see
https://www.gnu.
org/licenses
/.
33 include(`..
/config.m4
')
35 C cycles/limb cycles/limb cycles/limb good
36 C aligned unaligned best seen for cpu?
42 C Cortex-A15 1.5 1.5 Y
45 C We read 64 bits at a time at 32-bit aligned addresses, and except for the
46 C first and last store, we write using 64-bit aligned addresses. All shifting
47 C is done on 64-bit words in 'extension
' registers.
49 C It should be possible to read also using 64-bit alignment, by manipulating
50 C the shift count for unaligned operands. Not done, since it does not seem to
51 C matter for A9 or A15.
53 C This will not work in big-endian mode.
56 C * Try using 128-bit operations. Note that Neon lacks pure 128-bit shifts,
57 C which might make it tricky.
58 C * Clean up and simplify.
59 C * Consider sharing most of the code for lshift and rshift, since the feed-in code,
60 C the loop, and most of the wind-down code are identical.
61 C * Replace the basecase code with code using 'extension
' registers.
62 C * Optimise. It is not clear that this loop insn permutation is optimal for
71 ifdef(`OPERATION_lshift',`
76 define
(`func
',`mpn_lshift')
78 ifdef(`OPERATION_rshift',`
83 define
(`func
',`mpn_rshift')
86 MULFUNC_PROLOGUE(mpn_lshift mpn_rshift)
92 IFLSH(` mov r12, n, lsl #2 ')
93 IFLSH
(`
add rp
, rp
, r12
')
94 IFLSH(` add ap, ap, r12 ')
96 cmp n
, #
4 C SIMD code n limit
99 ifdef
(`OPERATION_lshift
',`
100 vdup.32 d6, r3 C left shift count is positive
101 sub r3, r3, #64 C right shift count is negative
103 mov r12, #-8') C lshift pointer update
offset
104 ifdef
(`OPERATION_rshift
',`
105 rsb r3, r3, #0 C right shift count is negative
107 add r3, r3, #64 C left shift count is positive
109 mov r12, #8') C rshift pointer update
offset
111 IFLSH
(`
sub ap
, ap
, #
8 ')
112 vld1.32 {d19}, [ap], r12 C load initial 2 limbs
113 vshl.u64 d18, d19, d7 C retval
115 tst rp, #4 C is rp 64-bit aligned already?
116 beq L(rp_aligned) C yes, skip
117 IFLSH(` add ap, ap, #4 ') C move back ap pointer
118 IFRSH
(`
sub ap
, ap
, #
4 ') C move back ap pointer
120 sub n, n, #1 C first limb handled
121 IFLSH(` sub rp, rp, #4 ')
122 vst1.32
{d4[Y]}, [rp
]IFRSH
(!) C store first limb
, rp gets aligned
123 vld1.32
{d19}, [ap
], r12 C load ap
[1] and ap
[2]
126 IFLSH
(`
sub rp
, rp
, #
8 ')
128 blt L(two_or_three_more)
132 L(1): vld1.32 {d17}, [ap], r12
134 vld1.32 {d16}, [ap], r12
140 L(2): vld1.32 {d16}, [ap], r12
142 vld1.32 {d17}, [ap], r12
148 L(top): vld1.32 {d16}, [ap], r12
152 vst1.32 {d2}, [rp:64], r12
153 L(mid): vld1.32 {d17}, [ap], r12
157 vst1.32 {d3}, [rp:64], r12
165 vst1.32 {d2}, [rp:64], r12
168 L(evn): vorr d2, d4, d1
170 vshl.u64 d16, d17, d6
171 vst1.32 {d2}, [rp:64], r12
175 C Load last 2 - 3 limbs, store last 4 - 5 limbs
176 L(two_or_three_more):
180 L(l3): vshl.u64 d5, d19, d6
181 vld1.32 {d17}, [ap], r12
182 L(cj1): veor d16, d16, d16
183 IFLSH(` add ap, ap, #4 ')
184 vld1.32
{d16[Y]}, [ap
], r12
190 vst1.32
{d3}, [rp:64], r12
192 vst1.32
{d2}, [rp:64], r12
193 IFLSH
(`
add rp
, rp
, #
4 ')
194 vst1.32 {d5[Y]}, [rp]
198 L(l2): vld1.32 {d16}, [ap], r12
201 vshl.u64 d16, d16, d6
203 L(cj2): vst1.32 {d2}, [rp:64], r12
211 push {r4, r6, r7, r8}
212 ifdef(`OPERATION_lshift',`
225 L
(ev
): ldr r6
, [ap
, #
-4]!
229 L
(tp
): ldr r8
, [ap
, #
-4]!
230 orr r7
, r7
, r6
, lsr tnc
233 L
(md
): ldr r6
, [ap
, #
-4]!
234 orr r7
, r7
, r8
, lsr tnc
238 L
(ed
): orr r7
, r7
, r6
, lsr tnc
241 L
(ed1
): str r7
, [rp
, #
-4]
244 ifdef(`OPERATION_rshift',`
257 L
(ev
): ldr r6
, [ap
, #
4]!
262 L
(tp
): ldr r8
, [ap
, #
4]!
263 orr r7
, r7
, r6
, lsl tnc
266 L
(md
): ldr r6
, [ap
, #
4]!
267 orr r7
, r7
, r8
, lsl tnc
271 L
(ed
): orr r7
, r7
, r6
, lsl tnc
274 L
(ed1
): str r7
, [rp
], #
4