3 dnl Contributed to the GNU project by Torbjörn Granlund
and Niels Möller.
5 dnl Copyright
2009-2012, 2014 Free Software Foundation
, Inc.
7 dnl
This file is part of the GNU MP Library.
9 dnl The GNU MP Library is free software
; you can redistribute it and/or modify
10 dnl it under the terms of
either:
12 dnl
* the GNU Lesser General
Public License as published by the Free
13 dnl Software Foundation
; either version 3 of the License, or (at your
14 dnl option
) any later version.
18 dnl
* the GNU General
Public License as published by the Free Software
19 dnl Foundation
; either version 2 of the License, or (at your option) any
22 dnl
or both
in parallel
, as here.
24 dnl The GNU MP Library is distributed
in the hope that it will be useful
, but
25 dnl WITHOUT ANY WARRANTY
; without even the implied warranty of MERCHANTABILITY
26 dnl
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License
29 dnl You should have received copies of the GNU General
Public License
and the
30 dnl GNU Lesser General
Public License along with the GNU MP Library. If
not,
31 dnl see
https://www.gnu.
org/licenses
/.
33 include(`..
/config.m4
')
41 C Intel SBR 8.4 (slowdown, old code took 8.0)
45 define(`B2mb', `
%r10
')
46 define(`B2modb', `
%r11
')
52 define(`r0', `
%rbp
') C r1 kept in %rax
53 define(`r2', `
%rcx
') C kept negated. Also used as shift count
57 C mpn_mod_1_1p (mp_srcptr ap, mp_size_t n, mp_limb_t b, mp_limb_t bmodb[4])
59 C The pre array contains bi, cnt, B1modb, B2modb
60 C Note: This implementation needs B1modb only when cnt > 0
62 C The iteration is almost as follows,
64 C r_2 B^3 + r_1 B^2 + r_0 B + u = r_1 B2modb + (r_0 + r_2 B2mod) B + u
66 C where r2 is a single bit represented as a mask. But to make sure that the
67 C result fits in two limbs and a bit, carry from the addition
71 C is handled specially. On carry, we subtract b to cancel the carry,
72 C and we use instead the value
76 C This addition can be issued early since it doesn't depend on r2
, and it is
77 C the source of the cmov
in the
loop.
79 C We have the invariant that r_2 B^
2 + r_1 B
+ r_0
< B^
2 + B b
87 PROLOGUE
(mpn_mod_1_1p
)
94 mov -8(ap
, n
, 8), %rax
101 C First iteration
, no r2
104 mov -24(ap
, n
, 8), r0
106 mov -16(ap
, n
, 8), %rax
116 L
(top
): and B2modb
, r2
135 mov 8(pre
), R32
(%rcx
)
136 test R32
(%rcx
), R32
(%rcx
)
139 C Unnormalized
, use B1modb to reduce to
size < B
(b
+1)
146 C Left
-shift to normalize
155 shld R8
(%rcx
), r0
, %rax
177 L(ok): shr R8(%rcx), %rax
188 PROLOGUE(mpn_mod_1_1p_cps)
197 mov R32(%rcx), R32(%rbp)
199 IFSTD(` mov %r12, %rdi ') C pass parameter
200 IFDOS
(`
mov %r12
, %rcx
') C pass parameter
201 ASSERT(nz, `test $15, %rsp')
202 CALL( mpn_invert_limb
)
205 mov %rax
, (%rbx
) C store bi
206 mov %rbp
, 8(%rbx
) C store cnt
208 mov %r12
, 24(%rbx
) C store B2modb
209 mov R32
(%rbp
), R32
(%rcx
)
210 test R32
(%rcx
), R32
(%rcx
)
215 C Destroys %rax, unlike shld. Otherwise, we could do B1modb
216 C before B2modb, and get rid of the move %r12, %r8 above.
224 shld R8
(%rcx
), %rax
, %rdx
228 mov %r8, 16(%rbx) C store B1modb