Squashed 'src/secp256k1/' changes from 8225239..84973d3
[bitcoinplatinum.git] / src / scalar_impl.h
blob2690d86558a9a1973ee5f4cc9c339c527718c54c
1 /**********************************************************************
2 * Copyright (c) 2014 Pieter Wuille *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
5 **********************************************************************/
7 #ifndef _SECP256K1_SCALAR_IMPL_H_
8 #define _SECP256K1_SCALAR_IMPL_H_
10 #include "group.h"
11 #include "scalar.h"
13 #if defined HAVE_CONFIG_H
14 #include "libsecp256k1-config.h"
15 #endif
17 #if defined(EXHAUSTIVE_TEST_ORDER)
18 #include "scalar_low_impl.h"
19 #elif defined(USE_SCALAR_4X64)
20 #include "scalar_4x64_impl.h"
21 #elif defined(USE_SCALAR_8X32)
22 #include "scalar_8x32_impl.h"
23 #else
24 #error "Please select scalar implementation"
25 #endif
27 #ifndef USE_NUM_NONE
28 static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a) {
29 unsigned char c[32];
30 secp256k1_scalar_get_b32(c, a);
31 secp256k1_num_set_bin(r, c, 32);
34 /** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */
35 static void secp256k1_scalar_order_get_num(secp256k1_num *r) {
36 #if defined(EXHAUSTIVE_TEST_ORDER)
37 static const unsigned char order[32] = {
38 0,0,0,0,0,0,0,0,
39 0,0,0,0,0,0,0,0,
40 0,0,0,0,0,0,0,0,
41 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
43 #else
44 static const unsigned char order[32] = {
45 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
46 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
47 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
48 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
50 #endif
51 secp256k1_num_set_bin(r, order, 32);
53 #endif
55 static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
56 #if defined(EXHAUSTIVE_TEST_ORDER)
57 int i;
58 *r = 0;
59 for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
60 if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
61 *r = i;
62 /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
63 * have a composite group order; fix it in exhaustive_tests.c). */
64 VERIFY_CHECK(*r != 0);
66 #else
67 secp256k1_scalar *t;
68 int i;
69 /* First compute xN as x ^ (2^N - 1) for some values of N,
70 * and uM as x ^ M for some values of M. */
71 secp256k1_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126;
72 secp256k1_scalar u2, u5, u9, u11, u13;
74 secp256k1_scalar_sqr(&u2, x);
75 secp256k1_scalar_mul(&x2, &u2, x);
76 secp256k1_scalar_mul(&u5, &u2, &x2);
77 secp256k1_scalar_mul(&x3, &u5, &u2);
78 secp256k1_scalar_mul(&u9, &x3, &u2);
79 secp256k1_scalar_mul(&u11, &u9, &u2);
80 secp256k1_scalar_mul(&u13, &u11, &u2);
82 secp256k1_scalar_sqr(&x6, &u13);
83 secp256k1_scalar_sqr(&x6, &x6);
84 secp256k1_scalar_mul(&x6, &x6, &u11);
86 secp256k1_scalar_sqr(&x8, &x6);
87 secp256k1_scalar_sqr(&x8, &x8);
88 secp256k1_scalar_mul(&x8, &x8, &x2);
90 secp256k1_scalar_sqr(&x14, &x8);
91 for (i = 0; i < 5; i++) {
92 secp256k1_scalar_sqr(&x14, &x14);
94 secp256k1_scalar_mul(&x14, &x14, &x6);
96 secp256k1_scalar_sqr(&x28, &x14);
97 for (i = 0; i < 13; i++) {
98 secp256k1_scalar_sqr(&x28, &x28);
100 secp256k1_scalar_mul(&x28, &x28, &x14);
102 secp256k1_scalar_sqr(&x56, &x28);
103 for (i = 0; i < 27; i++) {
104 secp256k1_scalar_sqr(&x56, &x56);
106 secp256k1_scalar_mul(&x56, &x56, &x28);
108 secp256k1_scalar_sqr(&x112, &x56);
109 for (i = 0; i < 55; i++) {
110 secp256k1_scalar_sqr(&x112, &x112);
112 secp256k1_scalar_mul(&x112, &x112, &x56);
114 secp256k1_scalar_sqr(&x126, &x112);
115 for (i = 0; i < 13; i++) {
116 secp256k1_scalar_sqr(&x126, &x126);
118 secp256k1_scalar_mul(&x126, &x126, &x14);
120 /* Then accumulate the final result (t starts at x126). */
121 t = &x126;
122 for (i = 0; i < 3; i++) {
123 secp256k1_scalar_sqr(t, t);
125 secp256k1_scalar_mul(t, t, &u5); /* 101 */
126 for (i = 0; i < 4; i++) { /* 0 */
127 secp256k1_scalar_sqr(t, t);
129 secp256k1_scalar_mul(t, t, &x3); /* 111 */
130 for (i = 0; i < 4; i++) { /* 0 */
131 secp256k1_scalar_sqr(t, t);
133 secp256k1_scalar_mul(t, t, &u5); /* 101 */
134 for (i = 0; i < 5; i++) { /* 0 */
135 secp256k1_scalar_sqr(t, t);
137 secp256k1_scalar_mul(t, t, &u11); /* 1011 */
138 for (i = 0; i < 4; i++) {
139 secp256k1_scalar_sqr(t, t);
141 secp256k1_scalar_mul(t, t, &u11); /* 1011 */
142 for (i = 0; i < 4; i++) { /* 0 */
143 secp256k1_scalar_sqr(t, t);
145 secp256k1_scalar_mul(t, t, &x3); /* 111 */
146 for (i = 0; i < 5; i++) { /* 00 */
147 secp256k1_scalar_sqr(t, t);
149 secp256k1_scalar_mul(t, t, &x3); /* 111 */
150 for (i = 0; i < 6; i++) { /* 00 */
151 secp256k1_scalar_sqr(t, t);
153 secp256k1_scalar_mul(t, t, &u13); /* 1101 */
154 for (i = 0; i < 4; i++) { /* 0 */
155 secp256k1_scalar_sqr(t, t);
157 secp256k1_scalar_mul(t, t, &u5); /* 101 */
158 for (i = 0; i < 3; i++) {
159 secp256k1_scalar_sqr(t, t);
161 secp256k1_scalar_mul(t, t, &x3); /* 111 */
162 for (i = 0; i < 5; i++) { /* 0 */
163 secp256k1_scalar_sqr(t, t);
165 secp256k1_scalar_mul(t, t, &u9); /* 1001 */
166 for (i = 0; i < 6; i++) { /* 000 */
167 secp256k1_scalar_sqr(t, t);
169 secp256k1_scalar_mul(t, t, &u5); /* 101 */
170 for (i = 0; i < 10; i++) { /* 0000000 */
171 secp256k1_scalar_sqr(t, t);
173 secp256k1_scalar_mul(t, t, &x3); /* 111 */
174 for (i = 0; i < 4; i++) { /* 0 */
175 secp256k1_scalar_sqr(t, t);
177 secp256k1_scalar_mul(t, t, &x3); /* 111 */
178 for (i = 0; i < 9; i++) { /* 0 */
179 secp256k1_scalar_sqr(t, t);
181 secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
182 for (i = 0; i < 5; i++) { /* 0 */
183 secp256k1_scalar_sqr(t, t);
185 secp256k1_scalar_mul(t, t, &u9); /* 1001 */
186 for (i = 0; i < 6; i++) { /* 00 */
187 secp256k1_scalar_sqr(t, t);
189 secp256k1_scalar_mul(t, t, &u11); /* 1011 */
190 for (i = 0; i < 4; i++) {
191 secp256k1_scalar_sqr(t, t);
193 secp256k1_scalar_mul(t, t, &u13); /* 1101 */
194 for (i = 0; i < 5; i++) {
195 secp256k1_scalar_sqr(t, t);
197 secp256k1_scalar_mul(t, t, &x2); /* 11 */
198 for (i = 0; i < 6; i++) { /* 00 */
199 secp256k1_scalar_sqr(t, t);
201 secp256k1_scalar_mul(t, t, &u13); /* 1101 */
202 for (i = 0; i < 10; i++) { /* 000000 */
203 secp256k1_scalar_sqr(t, t);
205 secp256k1_scalar_mul(t, t, &u13); /* 1101 */
206 for (i = 0; i < 4; i++) {
207 secp256k1_scalar_sqr(t, t);
209 secp256k1_scalar_mul(t, t, &u9); /* 1001 */
210 for (i = 0; i < 6; i++) { /* 00000 */
211 secp256k1_scalar_sqr(t, t);
213 secp256k1_scalar_mul(t, t, x); /* 1 */
214 for (i = 0; i < 8; i++) { /* 00 */
215 secp256k1_scalar_sqr(t, t);
217 secp256k1_scalar_mul(r, t, &x6); /* 111111 */
220 SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
221 return !(a->d[0] & 1);
223 #endif
225 static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
226 #if defined(USE_SCALAR_INV_BUILTIN)
227 secp256k1_scalar_inverse(r, x);
228 #elif defined(USE_SCALAR_INV_NUM)
229 unsigned char b[32];
230 secp256k1_num n, m;
231 secp256k1_scalar t = *x;
232 secp256k1_scalar_get_b32(b, &t);
233 secp256k1_num_set_bin(&n, b, 32);
234 secp256k1_scalar_order_get_num(&m);
235 secp256k1_num_mod_inverse(&n, &n, &m);
236 secp256k1_num_get_bin(b, 32, &n);
237 secp256k1_scalar_set_b32(r, b, NULL);
238 /* Verify that the inverse was computed correctly, without GMP code. */
239 secp256k1_scalar_mul(&t, &t, r);
240 CHECK(secp256k1_scalar_is_one(&t));
241 #else
242 #error "Please select scalar inverse implementation"
243 #endif
246 #ifdef USE_ENDOMORPHISM
247 #if defined(EXHAUSTIVE_TEST_ORDER)
249 * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the
250 * full case we don't bother making k1 and k2 be small, we just want them to be
251 * nontrivial to get full test coverage for the exhaustive tests. We therefore
252 * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda.
254 static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
255 *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
256 *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
258 #else
260 * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
261 * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
262 * 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72}
264 * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
265 * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
266 * and k2 have a small size.
267 * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are:
269 * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
270 * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
271 * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
272 * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
274 * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives
275 * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
276 * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2.
278 * g1, g2 are precomputed constants used to replace division with a rounded multiplication
279 * when decomposing the scalar for an endomorphism-based point multiplication.
281 * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve
282 * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5.
284 * The derivation is described in the paper "Efficient Software Implementation of Public-Key
285 * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
286 * Section 4.3 (here we use a somewhat higher-precision estimate):
287 * d = a1*b2 - b1*a2
288 * g1 = round((2^272)*b2/d)
289 * g2 = round((2^272)*b1/d)
291 * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found
292 * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda').
294 * The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order).
297 static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
298 secp256k1_scalar c1, c2;
299 static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST(
300 0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL,
301 0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL
303 static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
304 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
305 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
307 static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST(
308 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
309 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
311 static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
312 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL,
313 0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL
315 static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
316 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL,
317 0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL
319 VERIFY_CHECK(r1 != a);
320 VERIFY_CHECK(r2 != a);
321 /* these _var calls are constant time since the shift amount is constant */
322 secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272);
323 secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272);
324 secp256k1_scalar_mul(&c1, &c1, &minus_b1);
325 secp256k1_scalar_mul(&c2, &c2, &minus_b2);
326 secp256k1_scalar_add(r2, &c1, &c2);
327 secp256k1_scalar_mul(r1, r2, &minus_lambda);
328 secp256k1_scalar_add(r1, r1, a);
330 #endif
331 #endif
333 #endif