1 dnl AMD64 mpn_lshift optimised for CPUs with fast SSE including fast movdqu.
3 dnl Contributed to the GNU project by Torbjorn Granlund.
5 dnl Copyright
2010-2012 Free Software Foundation
, Inc.
7 dnl
This file is part of the GNU MP Library.
9 dnl The GNU MP Library is free software
; you can redistribute it and/or modify
10 dnl it under the terms of
either:
12 dnl
* the GNU Lesser General
Public License as published by the Free
13 dnl Software Foundation
; either version 3 of the License, or (at your
14 dnl option
) any later version.
18 dnl
* the GNU General
Public License as published by the Free Software
19 dnl Foundation
; either version 2 of the License, or (at your option) any
22 dnl
or both
in parallel
, as here.
24 dnl The GNU MP Library is distributed
in the hope that it will be useful
, but
25 dnl WITHOUT ANY WARRANTY
; without even the implied warranty of MERCHANTABILITY
26 dnl
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License
29 dnl You should have received copies of the GNU General
Public License
and the
30 dnl GNU Lesser General
Public License along with the GNU MP Library. If
not,
31 dnl see
https://www.gnu.
org/licenses
/.
33 include(`..
/config.m4
')
36 C cycles/limb cycles/limb cycles/limb good
37 C aligned unaligned best seen for cpu?
38 C AMD K8,K9 3 3 2.35 no, use shl/shr
39 C AMD K10 1.5-1.8 1.5-1.8 1.33 yes
40 C AMD bd1 1.7-1.9 1.7-1.9 1.33 yes
41 C AMD bobcat 3.17 3.17 yes, bad for n < 20
42 C Intel P4 4.67 4.67 2.7 no, slow movdqu
43 C Intel core2 2.15 2.15 1.25 no, use shld/shrd
44 C Intel NHM 1.66 1.66 1.25 no, use shld/shrd
45 C Intel SBR 1.3 1.3 1.25 yes, bad for n = 4-6
46 C Intel atom 11.7 11.7 4.5 no
47 C VIA nano 5.7 5.95 2.0 no, slow movdqu
49 C We try to do as many aligned 16-byte operations as possible. The top-most
50 C and bottom-most writes might need 8-byte operations.
52 C This variant rely on fast load movdqu, and uses it even for aligned operands,
53 C in order to avoid the need for two separate loops.
56 C * Could 2-limb wind-down code be simplified?
57 C * Improve basecase code, using shld/shrd for SBR, discrete integer shifts
58 C for other affected CPUs.
73 sub R32(%rcx), R32(%rax)
83 lea (rp,n,8), R32(%rcx)
87 C Do one initial limb in order to make rp aligned
88 movq -8(ap,n,8), %xmm0
89 movq -16(ap,n,8), %xmm1
93 movq %xmm0, -8(rp,n,8)
114 L(top): movdqu 40(ap,n,8), %xmm1
115 movdqu 48(ap,n,8), %xmm0
119 movdqa %xmm0, 48(rp,n,8)
121 movdqu 24(ap,n,8), %xmm1
122 movdqu 32(ap,n,8), %xmm0
126 movdqa %xmm0, 32(rp,n,8)
128 movdqu 8(ap,n,8), %xmm1
129 movdqu 16(ap,n,8), %xmm0
133 movdqa %xmm0, 16(rp,n,8)
135 movdqu -8(ap,n,8), %xmm1
136 movdqu (ap,n,8), %xmm0
140 movdqa %xmm0, (rp,n,8)
144 L(end): test $1, R8(n)
149 punpcklqdq %xmm1, %xmm0
163 movq -8(ap,n,8), %xmm0
177 L(end8):movq (ap), %xmm0