1 /**********************************************************************
2 * Copyright (c) 2013, 2014 Pieter Wuille *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
7 #ifndef SECP256K1_SCALAR_REPR_IMPL_H
8 #define SECP256K1_SCALAR_REPR_IMPL_H
10 /* Limbs of the secp256k1 order. */
11 #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
12 #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
13 #define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
14 #define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
16 /* Limbs of 2^256 minus the secp256k1 order. */
17 #define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
18 #define SECP256K1_N_C_1 (~SECP256K1_N_1)
19 #define SECP256K1_N_C_2 (1)
21 /* Limbs of half the secp256k1 order. */
22 #define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
23 #define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
24 #define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
25 #define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
27 SECP256K1_INLINE
static void secp256k1_scalar_clear(secp256k1_scalar
*r
) {
34 SECP256K1_INLINE
static void secp256k1_scalar_set_int(secp256k1_scalar
*r
, unsigned int v
) {
41 SECP256K1_INLINE
static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar
*a
, unsigned int offset
, unsigned int count
) {
42 VERIFY_CHECK((offset
+ count
- 1) >> 6 == offset
>> 6);
43 return (a
->d
[offset
>> 6] >> (offset
& 0x3F)) & ((((uint64_t)1) << count
) - 1);
46 SECP256K1_INLINE
static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar
*a
, unsigned int offset
, unsigned int count
) {
47 VERIFY_CHECK(count
< 32);
48 VERIFY_CHECK(offset
+ count
<= 256);
49 if ((offset
+ count
- 1) >> 6 == offset
>> 6) {
50 return secp256k1_scalar_get_bits(a
, offset
, count
);
52 VERIFY_CHECK((offset
>> 6) + 1 < 4);
53 return ((a
->d
[offset
>> 6] >> (offset
& 0x3F)) | (a
->d
[(offset
>> 6) + 1] << (64 - (offset
& 0x3F)))) & ((((uint64_t)1) << count
) - 1);
57 SECP256K1_INLINE
static int secp256k1_scalar_check_overflow(const secp256k1_scalar
*a
) {
60 no
|= (a
->d
[3] < SECP256K1_N_3
); /* No need for a > check. */
61 no
|= (a
->d
[2] < SECP256K1_N_2
);
62 yes
|= (a
->d
[2] > SECP256K1_N_2
) & ~no
;
63 no
|= (a
->d
[1] < SECP256K1_N_1
);
64 yes
|= (a
->d
[1] > SECP256K1_N_1
) & ~no
;
65 yes
|= (a
->d
[0] >= SECP256K1_N_0
) & ~no
;
69 SECP256K1_INLINE
static int secp256k1_scalar_reduce(secp256k1_scalar
*r
, unsigned int overflow
) {
71 VERIFY_CHECK(overflow
<= 1);
72 t
= (uint128_t
)r
->d
[0] + overflow
* SECP256K1_N_C_0
;
73 r
->d
[0] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
74 t
+= (uint128_t
)r
->d
[1] + overflow
* SECP256K1_N_C_1
;
75 r
->d
[1] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
76 t
+= (uint128_t
)r
->d
[2] + overflow
* SECP256K1_N_C_2
;
77 r
->d
[2] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
78 t
+= (uint64_t)r
->d
[3];
79 r
->d
[3] = t
& 0xFFFFFFFFFFFFFFFFULL
;
83 static int secp256k1_scalar_add(secp256k1_scalar
*r
, const secp256k1_scalar
*a
, const secp256k1_scalar
*b
) {
85 uint128_t t
= (uint128_t
)a
->d
[0] + b
->d
[0];
86 r
->d
[0] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
87 t
+= (uint128_t
)a
->d
[1] + b
->d
[1];
88 r
->d
[1] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
89 t
+= (uint128_t
)a
->d
[2] + b
->d
[2];
90 r
->d
[2] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
91 t
+= (uint128_t
)a
->d
[3] + b
->d
[3];
92 r
->d
[3] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
93 overflow
= t
+ secp256k1_scalar_check_overflow(r
);
94 VERIFY_CHECK(overflow
== 0 || overflow
== 1);
95 secp256k1_scalar_reduce(r
, overflow
);
99 static void secp256k1_scalar_cadd_bit(secp256k1_scalar
*r
, unsigned int bit
, int flag
) {
101 VERIFY_CHECK(bit
< 256);
102 bit
+= ((uint32_t) flag
- 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
103 t
= (uint128_t
)r
->d
[0] + (((uint64_t)((bit
>> 6) == 0)) << (bit
& 0x3F));
104 r
->d
[0] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
105 t
+= (uint128_t
)r
->d
[1] + (((uint64_t)((bit
>> 6) == 1)) << (bit
& 0x3F));
106 r
->d
[1] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
107 t
+= (uint128_t
)r
->d
[2] + (((uint64_t)((bit
>> 6) == 2)) << (bit
& 0x3F));
108 r
->d
[2] = t
& 0xFFFFFFFFFFFFFFFFULL
; t
>>= 64;
109 t
+= (uint128_t
)r
->d
[3] + (((uint64_t)((bit
>> 6) == 3)) << (bit
& 0x3F));
110 r
->d
[3] = t
& 0xFFFFFFFFFFFFFFFFULL
;
112 VERIFY_CHECK((t
>> 64) == 0);
113 VERIFY_CHECK(secp256k1_scalar_check_overflow(r
) == 0);
117 static void secp256k1_scalar_set_b32(secp256k1_scalar
*r
, const unsigned char *b32
, int *overflow
) {
119 r
->d
[0] = (uint64_t)b32
[31] | (uint64_t)b32
[30] << 8 | (uint64_t)b32
[29] << 16 | (uint64_t)b32
[28] << 24 | (uint64_t)b32
[27] << 32 | (uint64_t)b32
[26] << 40 | (uint64_t)b32
[25] << 48 | (uint64_t)b32
[24] << 56;
120 r
->d
[1] = (uint64_t)b32
[23] | (uint64_t)b32
[22] << 8 | (uint64_t)b32
[21] << 16 | (uint64_t)b32
[20] << 24 | (uint64_t)b32
[19] << 32 | (uint64_t)b32
[18] << 40 | (uint64_t)b32
[17] << 48 | (uint64_t)b32
[16] << 56;
121 r
->d
[2] = (uint64_t)b32
[15] | (uint64_t)b32
[14] << 8 | (uint64_t)b32
[13] << 16 | (uint64_t)b32
[12] << 24 | (uint64_t)b32
[11] << 32 | (uint64_t)b32
[10] << 40 | (uint64_t)b32
[9] << 48 | (uint64_t)b32
[8] << 56;
122 r
->d
[3] = (uint64_t)b32
[7] | (uint64_t)b32
[6] << 8 | (uint64_t)b32
[5] << 16 | (uint64_t)b32
[4] << 24 | (uint64_t)b32
[3] << 32 | (uint64_t)b32
[2] << 40 | (uint64_t)b32
[1] << 48 | (uint64_t)b32
[0] << 56;
123 over
= secp256k1_scalar_reduce(r
, secp256k1_scalar_check_overflow(r
));
129 static void secp256k1_scalar_get_b32(unsigned char *bin
, const secp256k1_scalar
* a
) {
130 bin
[0] = a
->d
[3] >> 56; bin
[1] = a
->d
[3] >> 48; bin
[2] = a
->d
[3] >> 40; bin
[3] = a
->d
[3] >> 32; bin
[4] = a
->d
[3] >> 24; bin
[5] = a
->d
[3] >> 16; bin
[6] = a
->d
[3] >> 8; bin
[7] = a
->d
[3];
131 bin
[8] = a
->d
[2] >> 56; bin
[9] = a
->d
[2] >> 48; bin
[10] = a
->d
[2] >> 40; bin
[11] = a
->d
[2] >> 32; bin
[12] = a
->d
[2] >> 24; bin
[13] = a
->d
[2] >> 16; bin
[14] = a
->d
[2] >> 8; bin
[15] = a
->d
[2];
132 bin
[16] = a
->d
[1] >> 56; bin
[17] = a
->d
[1] >> 48; bin
[18] = a
->d
[1] >> 40; bin
[19] = a
->d
[1] >> 32; bin
[20] = a
->d
[1] >> 24; bin
[21] = a
->d
[1] >> 16; bin
[22] = a
->d
[1] >> 8; bin
[23] = a
->d
[1];
133 bin
[24] = a
->d
[0] >> 56; bin
[25] = a
->d
[0] >> 48; bin
[26] = a
->d
[0] >> 40; bin
[27] = a
->d
[0] >> 32; bin
[28] = a
->d
[0] >> 24; bin
[29] = a
->d
[0] >> 16; bin
[30] = a
->d
[0] >> 8; bin
[31] = a
->d
[0];
136 SECP256K1_INLINE
static int secp256k1_scalar_is_zero(const secp256k1_scalar
*a
) {
137 return (a
->d
[0] | a
->d
[1] | a
->d
[2] | a
->d
[3]) == 0;
140 static void secp256k1_scalar_negate(secp256k1_scalar
*r
, const secp256k1_scalar
*a
) {
141 uint64_t nonzero
= 0xFFFFFFFFFFFFFFFFULL
* (secp256k1_scalar_is_zero(a
) == 0);
142 uint128_t t
= (uint128_t
)(~a
->d
[0]) + SECP256K1_N_0
+ 1;
143 r
->d
[0] = t
& nonzero
; t
>>= 64;
144 t
+= (uint128_t
)(~a
->d
[1]) + SECP256K1_N_1
;
145 r
->d
[1] = t
& nonzero
; t
>>= 64;
146 t
+= (uint128_t
)(~a
->d
[2]) + SECP256K1_N_2
;
147 r
->d
[2] = t
& nonzero
; t
>>= 64;
148 t
+= (uint128_t
)(~a
->d
[3]) + SECP256K1_N_3
;
149 r
->d
[3] = t
& nonzero
;
152 SECP256K1_INLINE
static int secp256k1_scalar_is_one(const secp256k1_scalar
*a
) {
153 return ((a
->d
[0] ^ 1) | a
->d
[1] | a
->d
[2] | a
->d
[3]) == 0;
156 static int secp256k1_scalar_is_high(const secp256k1_scalar
*a
) {
159 no
|= (a
->d
[3] < SECP256K1_N_H_3
);
160 yes
|= (a
->d
[3] > SECP256K1_N_H_3
) & ~no
;
161 no
|= (a
->d
[2] < SECP256K1_N_H_2
) & ~yes
; /* No need for a > check. */
162 no
|= (a
->d
[1] < SECP256K1_N_H_1
) & ~yes
;
163 yes
|= (a
->d
[1] > SECP256K1_N_H_1
) & ~no
;
164 yes
|= (a
->d
[0] > SECP256K1_N_H_0
) & ~no
;
168 static int secp256k1_scalar_cond_negate(secp256k1_scalar
*r
, int flag
) {
169 /* If we are flag = 0, mask = 00...00 and this is a no-op;
170 * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
171 uint64_t mask
= !flag
- 1;
172 uint64_t nonzero
= (secp256k1_scalar_is_zero(r
) != 0) - 1;
173 uint128_t t
= (uint128_t
)(r
->d
[0] ^ mask
) + ((SECP256K1_N_0
+ 1) & mask
);
174 r
->d
[0] = t
& nonzero
; t
>>= 64;
175 t
+= (uint128_t
)(r
->d
[1] ^ mask
) + (SECP256K1_N_1
& mask
);
176 r
->d
[1] = t
& nonzero
; t
>>= 64;
177 t
+= (uint128_t
)(r
->d
[2] ^ mask
) + (SECP256K1_N_2
& mask
);
178 r
->d
[2] = t
& nonzero
; t
>>= 64;
179 t
+= (uint128_t
)(r
->d
[3] ^ mask
) + (SECP256K1_N_3
& mask
);
180 r
->d
[3] = t
& nonzero
;
181 return 2 * (mask
== 0) - 1;
184 /* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
186 /** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
187 #define muladd(a,b) { \
190 uint128_t t = (uint128_t)a * b; \
191 th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
194 c0 += tl; /* overflow is handled on the next line */ \
195 th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
196 c1 += th; /* overflow is handled on the next line */ \
197 c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
198 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
201 /** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
202 #define muladd_fast(a,b) { \
205 uint128_t t = (uint128_t)a * b; \
206 th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
209 c0 += tl; /* overflow is handled on the next line */ \
210 th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
211 c1 += th; /* never overflows by contract (verified in the next line) */ \
212 VERIFY_CHECK(c1 >= th); \
215 /** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
216 #define muladd2(a,b) { \
217 uint64_t tl, th, th2, tl2; \
219 uint128_t t = (uint128_t)a * b; \
220 th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
223 th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
224 c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
225 VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
226 tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
227 th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
228 c0 += tl2; /* overflow is handled on the next line */ \
229 th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
230 c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
231 VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
232 c1 += th2; /* overflow is handled on the next line */ \
233 c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
234 VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
237 /** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
238 #define sumadd(a) { \
240 c0 += (a); /* overflow is handled on the next line */ \
241 over = (c0 < (a)) ? 1 : 0; \
242 c1 += over; /* overflow is handled on the next line */ \
243 c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
246 /** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
247 #define sumadd_fast(a) { \
248 c0 += (a); /* overflow is handled on the next line */ \
249 c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
250 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
251 VERIFY_CHECK(c2 == 0); \
254 /** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */
255 #define extract(n) { \
262 /** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. c2 is required to be zero. */
263 #define extract_fast(n) { \
267 VERIFY_CHECK(c2 == 0); \
270 static void secp256k1_scalar_reduce_512(secp256k1_scalar
*r
, const uint64_t *l
) {
271 #ifdef USE_ASM_X86_64
272 /* Reduce 512 bits into 385. */
273 uint64_t m0
, m1
, m2
, m3
, m4
, m5
, m6
;
274 uint64_t p0
, p1
, p2
, p3
, p4
;
277 __asm__
__volatile__(
279 "movq 32(%%rsi), %%r11\n"
280 "movq 40(%%rsi), %%r12\n"
281 "movq 48(%%rsi), %%r13\n"
282 "movq 56(%%rsi), %%r14\n"
283 /* Initialize r8,r9,r10 */
284 "movq 0(%%rsi), %%r8\n"
286 "xorq %%r10, %%r10\n"
287 /* (r8,r9) += n0 * c0 */
296 "addq 8(%%rsi), %%r9\n"
298 /* (r9,r10,r8) += n1 * c0 */
302 "adcq %%rdx, %%r10\n"
304 /* (r9,r10,r8) += n0 * c1 */
308 "adcq %%rdx, %%r10\n"
313 /* (r10,r8,r9) += l2 */
314 "addq 16(%%rsi), %%r10\n"
317 /* (r10,r8,r9) += n2 * c0 */
320 "addq %%rax, %%r10\n"
323 /* (r10,r8,r9) += n1 * c1 */
326 "addq %%rax, %%r10\n"
329 /* (r10,r8,r9) += n0 */
330 "addq %%r11, %%r10\n"
335 "xorq %%r10, %%r10\n"
336 /* (r8,r9,r10) += l3 */
337 "addq 24(%%rsi), %%r8\n"
340 /* (r8,r9,r10) += n3 * c0 */
346 /* (r8,r9,r10) += n2 * c1 */
352 /* (r8,r9,r10) += n1 */
359 /* (r9,r10,r8) += n3 * c1 */
363 "adcq %%rdx, %%r10\n"
365 /* (r9,r10,r8) += n2 */
372 "addq %%r14, %%r10\n"
378 : "=g"(m0
), "=g"(m1
), "=g"(m2
), "=g"(m3
), "=g"(m4
), "=g"(m5
), "=g"(m6
)
379 : "S"(l
), "n"(SECP256K1_N_C_0
), "n"(SECP256K1_N_C_1
)
380 : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
382 /* Reduce 385 bits into 258. */
383 __asm__
__volatile__(
388 /* Initialize (r8,r9,r10) */
391 "xorq %%r10, %%r10\n"
392 /* (r8,r9) += m4 * c0 */
403 /* (r9,r10,r8) += m5 * c0 */
407 "adcq %%rdx, %%r10\n"
409 /* (r9,r10,r8) += m4 * c1 */
413 "adcq %%rdx, %%r10\n"
418 /* (r10,r8,r9) += m2 */
422 /* (r10,r8,r9) += m6 * c0 */
425 "addq %%rax, %%r10\n"
428 /* (r10,r8,r9) += m5 * c1 */
431 "addq %%rax, %%r10\n"
434 /* (r10,r8,r9) += m4 */
435 "addq %%r11, %%r10\n"
443 /* (r8,r9) += m6 * c1 */
457 : "=&g"(p0
), "=&g"(p1
), "=&g"(p2
), "=g"(p3
), "=g"(p4
)
458 : "g"(m0
), "g"(m1
), "g"(m2
), "g"(m3
), "g"(m4
), "g"(m5
), "g"(m6
), "n"(SECP256K1_N_C_0
), "n"(SECP256K1_N_C_1
)
459 : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
461 /* Reduce 258 bits into 256. */
462 __asm__
__volatile__(
465 /* (rax,rdx) = p4 * c0 */
468 /* (rax,rdx) += p0 */
472 "movq %%rax, 0(%q6)\n"
473 /* Move to (r8,r9) */
479 /* (r8,r9) += p4 * c1 */
485 "movq %%r8, 8(%q6)\n"
494 "movq %%r9, 16(%q6)\n"
500 "movq %%r8, 24(%q6)\n"
504 : "g"(p0
), "g"(p1
), "g"(p2
), "g"(p3
), "g"(p4
), "D"(r
), "n"(SECP256K1_N_C_0
), "n"(SECP256K1_N_C_1
)
505 : "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
509 uint64_t n0
= l
[4], n1
= l
[5], n2
= l
[6], n3
= l
[7];
510 uint64_t m0
, m1
, m2
, m3
, m4
, m5
;
512 uint64_t p0
, p1
, p2
, p3
;
515 /* Reduce 512 bits into 385. */
516 /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
517 c0
= l
[0]; c1
= 0; c2
= 0;
518 muladd_fast(n0
, SECP256K1_N_C_0
);
521 muladd(n1
, SECP256K1_N_C_0
);
522 muladd(n0
, SECP256K1_N_C_1
);
525 muladd(n2
, SECP256K1_N_C_0
);
526 muladd(n1
, SECP256K1_N_C_1
);
530 muladd(n3
, SECP256K1_N_C_0
);
531 muladd(n2
, SECP256K1_N_C_1
);
534 muladd(n3
, SECP256K1_N_C_1
);
539 VERIFY_CHECK(c0
<= 1);
542 /* Reduce 385 bits into 258. */
543 /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
544 c0
= m0
; c1
= 0; c2
= 0;
545 muladd_fast(m4
, SECP256K1_N_C_0
);
548 muladd(m5
, SECP256K1_N_C_0
);
549 muladd(m4
, SECP256K1_N_C_1
);
552 muladd(m6
, SECP256K1_N_C_0
);
553 muladd(m5
, SECP256K1_N_C_1
);
557 muladd_fast(m6
, SECP256K1_N_C_1
);
561 VERIFY_CHECK(p4
<= 2);
563 /* Reduce 258 bits into 256. */
564 /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
565 c
= p0
+ (uint128_t
)SECP256K1_N_C_0
* p4
;
566 r
->d
[0] = c
& 0xFFFFFFFFFFFFFFFFULL
; c
>>= 64;
567 c
+= p1
+ (uint128_t
)SECP256K1_N_C_1
* p4
;
568 r
->d
[1] = c
& 0xFFFFFFFFFFFFFFFFULL
; c
>>= 64;
569 c
+= p2
+ (uint128_t
)p4
;
570 r
->d
[2] = c
& 0xFFFFFFFFFFFFFFFFULL
; c
>>= 64;
572 r
->d
[3] = c
& 0xFFFFFFFFFFFFFFFFULL
; c
>>= 64;
575 /* Final reduction of r. */
576 secp256k1_scalar_reduce(r
, c
+ secp256k1_scalar_check_overflow(r
));
579 static void secp256k1_scalar_mul_512(uint64_t l
[8], const secp256k1_scalar
*a
, const secp256k1_scalar
*b
) {
580 #ifdef USE_ASM_X86_64
581 const uint64_t *pb
= b
->d
;
582 __asm__
__volatile__(
584 "movq 0(%%rdi), %%r15\n"
585 "movq 8(%%rdi), %%rbx\n"
586 "movq 16(%%rdi), %%rcx\n"
587 "movq 0(%%rdx), %%r11\n"
588 "movq 8(%%rdx), %%r12\n"
589 "movq 16(%%rdx), %%r13\n"
590 "movq 24(%%rdx), %%r14\n"
591 /* (rax,rdx) = a0 * b0 */
592 "movq %%r15, %%rax\n"
595 "movq %%rax, 0(%%rsi)\n"
596 /* (r8,r9,r10) = (rdx) */
599 "xorq %%r10, %%r10\n"
600 /* (r8,r9,r10) += a0 * b1 */
601 "movq %%r15, %%rax\n"
606 /* (r8,r9,r10) += a1 * b0 */
607 "movq %%rbx, %%rax\n"
613 "movq %%r8, 8(%%rsi)\n"
615 /* (r9,r10,r8) += a0 * b2 */
616 "movq %%r15, %%rax\n"
619 "adcq %%rdx, %%r10\n"
621 /* (r9,r10,r8) += a1 * b1 */
622 "movq %%rbx, %%rax\n"
625 "adcq %%rdx, %%r10\n"
627 /* (r9,r10,r8) += a2 * b0 */
628 "movq %%rcx, %%rax\n"
631 "adcq %%rdx, %%r10\n"
634 "movq %%r9, 16(%%rsi)\n"
636 /* (r10,r8,r9) += a0 * b3 */
637 "movq %%r15, %%rax\n"
639 "addq %%rax, %%r10\n"
643 "movq 24(%%rdi), %%r15\n"
644 /* (r10,r8,r9) += a1 * b2 */
645 "movq %%rbx, %%rax\n"
647 "addq %%rax, %%r10\n"
650 /* (r10,r8,r9) += a2 * b1 */
651 "movq %%rcx, %%rax\n"
653 "addq %%rax, %%r10\n"
656 /* (r10,r8,r9) += a3 * b0 */
657 "movq %%r15, %%rax\n"
659 "addq %%rax, %%r10\n"
663 "movq %%r10, 24(%%rsi)\n"
664 "xorq %%r10, %%r10\n"
665 /* (r8,r9,r10) += a1 * b3 */
666 "movq %%rbx, %%rax\n"
671 /* (r8,r9,r10) += a2 * b2 */
672 "movq %%rcx, %%rax\n"
677 /* (r8,r9,r10) += a3 * b1 */
678 "movq %%r15, %%rax\n"
684 "movq %%r8, 32(%%rsi)\n"
686 /* (r9,r10,r8) += a2 * b3 */
687 "movq %%rcx, %%rax\n"
690 "adcq %%rdx, %%r10\n"
692 /* (r9,r10,r8) += a3 * b2 */
693 "movq %%r15, %%rax\n"
696 "adcq %%rdx, %%r10\n"
699 "movq %%r9, 40(%%rsi)\n"
700 /* (r10,r8) += a3 * b3 */
701 "movq %%r15, %%rax\n"
703 "addq %%rax, %%r10\n"
706 "movq %%r10, 48(%%rsi)\n"
708 "movq %%r8, 56(%%rsi)\n"
711 : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
713 /* 160 bit accumulator. */
714 uint64_t c0
= 0, c1
= 0;
717 /* l[0..7] = a[0..3] * b[0..3]. */
718 muladd_fast(a
->d
[0], b
->d
[0]);
720 muladd(a
->d
[0], b
->d
[1]);
721 muladd(a
->d
[1], b
->d
[0]);
723 muladd(a
->d
[0], b
->d
[2]);
724 muladd(a
->d
[1], b
->d
[1]);
725 muladd(a
->d
[2], b
->d
[0]);
727 muladd(a
->d
[0], b
->d
[3]);
728 muladd(a
->d
[1], b
->d
[2]);
729 muladd(a
->d
[2], b
->d
[1]);
730 muladd(a
->d
[3], b
->d
[0]);
732 muladd(a
->d
[1], b
->d
[3]);
733 muladd(a
->d
[2], b
->d
[2]);
734 muladd(a
->d
[3], b
->d
[1]);
736 muladd(a
->d
[2], b
->d
[3]);
737 muladd(a
->d
[3], b
->d
[2]);
739 muladd_fast(a
->d
[3], b
->d
[3]);
741 VERIFY_CHECK(c1
== 0);
746 static void secp256k1_scalar_sqr_512(uint64_t l
[8], const secp256k1_scalar
*a
) {
747 #ifdef USE_ASM_X86_64
748 __asm__
__volatile__(
750 "movq 0(%%rdi), %%r11\n"
751 "movq 8(%%rdi), %%r12\n"
752 "movq 16(%%rdi), %%r13\n"
753 "movq 24(%%rdi), %%r14\n"
754 /* (rax,rdx) = a0 * a0 */
755 "movq %%r11, %%rax\n"
758 "movq %%rax, 0(%%rsi)\n"
759 /* (r8,r9,r10) = (rdx,0) */
762 "xorq %%r10, %%r10\n"
763 /* (r8,r9,r10) += 2 * a0 * a1 */
764 "movq %%r11, %%rax\n"
773 "movq %%r8, 8(%%rsi)\n"
775 /* (r9,r10,r8) += 2 * a0 * a2 */
776 "movq %%r11, %%rax\n"
779 "adcq %%rdx, %%r10\n"
782 "adcq %%rdx, %%r10\n"
784 /* (r9,r10,r8) += a1 * a1 */
785 "movq %%r12, %%rax\n"
788 "adcq %%rdx, %%r10\n"
791 "movq %%r9, 16(%%rsi)\n"
793 /* (r10,r8,r9) += 2 * a0 * a3 */
794 "movq %%r11, %%rax\n"
796 "addq %%rax, %%r10\n"
799 "addq %%rax, %%r10\n"
802 /* (r10,r8,r9) += 2 * a1 * a2 */
803 "movq %%r12, %%rax\n"
805 "addq %%rax, %%r10\n"
808 "addq %%rax, %%r10\n"
812 "movq %%r10, 24(%%rsi)\n"
813 "xorq %%r10, %%r10\n"
814 /* (r8,r9,r10) += 2 * a1 * a3 */
815 "movq %%r12, %%rax\n"
823 /* (r8,r9,r10) += a2 * a2 */
824 "movq %%r13, %%rax\n"
830 "movq %%r8, 32(%%rsi)\n"
832 /* (r9,r10,r8) += 2 * a2 * a3 */
833 "movq %%r13, %%rax\n"
836 "adcq %%rdx, %%r10\n"
839 "adcq %%rdx, %%r10\n"
842 "movq %%r9, 40(%%rsi)\n"
843 /* (r10,r8) += a3 * a3 */
844 "movq %%r14, %%rax\n"
846 "addq %%rax, %%r10\n"
849 "movq %%r10, 48(%%rsi)\n"
851 "movq %%r8, 56(%%rsi)\n"
854 : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory");
856 /* 160 bit accumulator. */
857 uint64_t c0
= 0, c1
= 0;
860 /* l[0..7] = a[0..3] * b[0..3]. */
861 muladd_fast(a
->d
[0], a
->d
[0]);
863 muladd2(a
->d
[0], a
->d
[1]);
865 muladd2(a
->d
[0], a
->d
[2]);
866 muladd(a
->d
[1], a
->d
[1]);
868 muladd2(a
->d
[0], a
->d
[3]);
869 muladd2(a
->d
[1], a
->d
[2]);
871 muladd2(a
->d
[1], a
->d
[3]);
872 muladd(a
->d
[2], a
->d
[2]);
874 muladd2(a
->d
[2], a
->d
[3]);
876 muladd_fast(a
->d
[3], a
->d
[3]);
878 VERIFY_CHECK(c1
== 0);
891 static void secp256k1_scalar_mul(secp256k1_scalar
*r
, const secp256k1_scalar
*a
, const secp256k1_scalar
*b
) {
893 secp256k1_scalar_mul_512(l
, a
, b
);
894 secp256k1_scalar_reduce_512(r
, l
);
897 static int secp256k1_scalar_shr_int(secp256k1_scalar
*r
, int n
) {
900 VERIFY_CHECK(n
< 16);
901 ret
= r
->d
[0] & ((1 << n
) - 1);
902 r
->d
[0] = (r
->d
[0] >> n
) + (r
->d
[1] << (64 - n
));
903 r
->d
[1] = (r
->d
[1] >> n
) + (r
->d
[2] << (64 - n
));
904 r
->d
[2] = (r
->d
[2] >> n
) + (r
->d
[3] << (64 - n
));
905 r
->d
[3] = (r
->d
[3] >> n
);
909 static void secp256k1_scalar_sqr(secp256k1_scalar
*r
, const secp256k1_scalar
*a
) {
911 secp256k1_scalar_sqr_512(l
, a
);
912 secp256k1_scalar_reduce_512(r
, l
);
915 #ifdef USE_ENDOMORPHISM
916 static void secp256k1_scalar_split_128(secp256k1_scalar
*r1
, secp256k1_scalar
*r2
, const secp256k1_scalar
*a
) {
928 SECP256K1_INLINE
static int secp256k1_scalar_eq(const secp256k1_scalar
*a
, const secp256k1_scalar
*b
) {
929 return ((a
->d
[0] ^ b
->d
[0]) | (a
->d
[1] ^ b
->d
[1]) | (a
->d
[2] ^ b
->d
[2]) | (a
->d
[3] ^ b
->d
[3])) == 0;
932 SECP256K1_INLINE
static void secp256k1_scalar_mul_shift_var(secp256k1_scalar
*r
, const secp256k1_scalar
*a
, const secp256k1_scalar
*b
, unsigned int shift
) {
934 unsigned int shiftlimbs
;
935 unsigned int shiftlow
;
936 unsigned int shifthigh
;
937 VERIFY_CHECK(shift
>= 256);
938 secp256k1_scalar_mul_512(l
, a
, b
);
939 shiftlimbs
= shift
>> 6;
940 shiftlow
= shift
& 0x3F;
941 shifthigh
= 64 - shiftlow
;
942 r
->d
[0] = shift
< 512 ? (l
[0 + shiftlimbs
] >> shiftlow
| (shift
< 448 && shiftlow
? (l
[1 + shiftlimbs
] << shifthigh
) : 0)) : 0;
943 r
->d
[1] = shift
< 448 ? (l
[1 + shiftlimbs
] >> shiftlow
| (shift
< 384 && shiftlow
? (l
[2 + shiftlimbs
] << shifthigh
) : 0)) : 0;
944 r
->d
[2] = shift
< 384 ? (l
[2 + shiftlimbs
] >> shiftlow
| (shift
< 320 && shiftlow
? (l
[3 + shiftlimbs
] << shifthigh
) : 0)) : 0;
945 r
->d
[3] = shift
< 320 ? (l
[3 + shiftlimbs
] >> shiftlow
) : 0;
946 secp256k1_scalar_cadd_bit(r
, 0, (l
[(shift
- 1) >> 6] >> ((shift
- 1) & 0x3f)) & 1);
949 #endif /* SECP256K1_SCALAR_REPR_IMPL_H */