1 dnl AMD64 mpn_copyd optimised for CPUs with fast AVX.
3 dnl Copyright
2003, 2005, 2007, 2011-2013, 2015 Free Software Foundation
, Inc.
5 dnl Contributed to the GNU project by Torbjörn Granlund.
7 dnl
This file is part of the GNU MP Library.
9 dnl The GNU MP Library is free software
; you can redistribute it and/or modify
10 dnl it under the terms of
either:
12 dnl
* the GNU Lesser General
Public License as published by the Free
13 dnl Software Foundation
; either version 3 of the License, or (at your
14 dnl option
) any later version.
18 dnl
* the GNU General
Public License as published by the Free Software
19 dnl Foundation
; either version 2 of the License, or (at your option) any
22 dnl
or both
in parallel
, as here.
24 dnl The GNU MP Library is distributed
in the hope that it will be useful
, but
25 dnl WITHOUT ANY WARRANTY
; without even the implied warranty of MERCHANTABILITY
26 dnl
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License
29 dnl You should have received copies of the GNU General
Public License
and the
30 dnl GNU Lesser General
Public License along with the GNU MP Library. If
not,
31 dnl see
https://www.gnu.
org/licenses
/.
33 include(`..
/config.m4
')
35 C cycles/limb cycles/limb cycles/limb good
36 C aligned unaligned best seen for cpu?
40 C AMD pile 4.87 4.87 N
47 C Intel SBR 0.50 0.91 N
48 C Intel IBR 0.50 0.65 N
49 C Intel HWL 0.25 0.30 Y
50 C Intel BWL 0.28 0.37 Y
54 C We try to do as many 32-byte operations as possible. The top-most and
55 C bottom-most writes might need 8-byte operations. For the bulk copying, we
56 C write using aligned 32-byte operations, but we read with both aligned and
57 C unaligned 32-byte operations.
66 dnl define(`vmovdqu', vlddqu
)
77 cmp $7, n C basecase needed for correctness
80 test $8, R8
(rp
) C is rp
16-byte aligned
?
81 jz L
(a2
) C jump if rp aligned
87 L
(a2
): test $16, R8
(rp
) C is rp
32-byte aligned
?
88 jz L
(a3
) C jump if rp aligned
98 L
(top
): vmovdqu
(up
), %ymm0
99 vmovdqu
-32(up
), %ymm1
100 vmovdqu
-64(up
), %ymm2
101 vmovdqu
-96(up
), %ymm3
104 vmovdqa
%ymm1
, -32(rp
)
105 vmovdqa
%ymm2
, -64(rp
)
106 vmovdqa
%ymm3
, -96(rp
)
111 L
(sma
): test $8, R8
(n
)
114 vmovdqu
-32(up
), %ymm1
117 vmovdqa
%ymm1
, -32(rp
)
129 vmovdqu
16(up
), %xmm0
131 vmovdqa
%xmm0
, 16(rp
)
143 L
(bc
): test $4, R8
(n
)