1 dnl AMD64 mpn_addmul_2 optimised for Intel Sandy Bridge.
3 dnl Contributed to the GNU project by Torbjörn Granlund.
5 dnl Copyright
2003-2005, 2007, 2008, 2011-2013 Free Software Foundation
, Inc.
7 dnl
This file is part of the GNU MP Library.
9 dnl The GNU MP Library is free software
; you can redistribute it and/or modify
10 dnl it under the terms of
either:
12 dnl
* the GNU Lesser General
Public License as published by the Free
13 dnl Software Foundation
; either version 3 of the License, or (at your
14 dnl option
) any later version.
18 dnl
* the GNU General
Public License as published by the Free Software
19 dnl Foundation
; either version 2 of the License, or (at your option) any
22 dnl
or both
in parallel
, as here.
24 dnl The GNU MP Library is distributed
in the hope that it will be useful
, but
25 dnl WITHOUT ANY WARRANTY
; without even the implied warranty of MERCHANTABILITY
26 dnl
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License
29 dnl You should have received copies of the GNU General
Public License
and the
30 dnl GNU Lesser General
Public License along with the GNU MP Library. If
not,
31 dnl see
https://www.gnu.
org/licenses
/.
33 include(`..
/config.m4
')
52 C This code is the result of running a code generation and optimisation tool
53 C suite written by David Harvey and Torbjorn Granlund.
55 C When playing with pointers, set this to $2 to fall back to conservative
56 C indexing in wind-down code.
60 define(`rp', `
%rdi
') C rcx
61 define(`up', `
%rsi
') C rdx
62 define(`n_param', `
%rdx
') C r8
63 define(`vp', `
%rcx
') C r9
81 PROLOGUE(mpn_addmul_2)
96 lea (up,n_param,8), up
97 lea 8(rp,n_param,8), rp
103 L(bx0): mov -8(rp,n,8), X0
113 L(b00): nop C this nop make loop go faster on SBR!
121 L(bx1): mov -8(rp,n,8), X1
131 L(b01): mov (rp,n,8), X0
155 mov X1, -16(rp,n,8) C 3
159 L(lo0): mov %rdx, w2 C 2
160 mov X0, -8(rp,n,8) C 0
173 mov 8(rp,n,8), X0 C 2
190 mov 16(rp,n,8), X1 C 3
195 mov X0, 8(rp,n,8) C 2
202 mov 24(rp,n,8), X0 C 0 useless but harmless final read
212 mov X1, I(-16(rp),-16(rp,n,8))
215 mov %rax, I(-8(rp),-8(rp,n,8))