2 * Copyright (c) 2013, Kenneth MacKay
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <linux/random.h>
28 #include <linux/slab.h>
29 #include <linux/swab.h>
30 #include <linux/fips.h>
31 #include <crypto/ecdh.h>
32 #include <crypto/rng.h>
35 #include "ecc_curve_defs.h"
42 static inline const struct ecc_curve
*ecc_get_curve(unsigned int curve_id
)
45 /* In FIPS mode only allow P256 and higher */
46 case ECC_CURVE_NIST_P192
:
47 return fips_enabled
? NULL
: &nist_p192
;
48 case ECC_CURVE_NIST_P256
:
55 static u64
*ecc_alloc_digits_space(unsigned int ndigits
)
57 size_t len
= ndigits
* sizeof(u64
);
62 return kmalloc(len
, GFP_KERNEL
);
65 static void ecc_free_digits_space(u64
*space
)
70 static struct ecc_point
*ecc_alloc_point(unsigned int ndigits
)
72 struct ecc_point
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
77 p
->x
= ecc_alloc_digits_space(ndigits
);
81 p
->y
= ecc_alloc_digits_space(ndigits
);
90 ecc_free_digits_space(p
->x
);
96 static void ecc_free_point(struct ecc_point
*p
)
106 static void vli_clear(u64
*vli
, unsigned int ndigits
)
110 for (i
= 0; i
< ndigits
; i
++)
114 /* Returns true if vli == 0, false otherwise. */
115 static bool vli_is_zero(const u64
*vli
, unsigned int ndigits
)
119 for (i
= 0; i
< ndigits
; i
++) {
127 /* Returns nonzero if bit bit of vli is set. */
128 static u64
vli_test_bit(const u64
*vli
, unsigned int bit
)
130 return (vli
[bit
/ 64] & ((u64
)1 << (bit
% 64)));
133 /* Counts the number of 64-bit "digits" in vli. */
134 static unsigned int vli_num_digits(const u64
*vli
, unsigned int ndigits
)
138 /* Search from the end until we find a non-zero digit.
139 * We do it in reverse because we expect that most digits will
142 for (i
= ndigits
- 1; i
>= 0 && vli
[i
] == 0; i
--);
147 /* Counts the number of bits required for vli. */
148 static unsigned int vli_num_bits(const u64
*vli
, unsigned int ndigits
)
150 unsigned int i
, num_digits
;
153 num_digits
= vli_num_digits(vli
, ndigits
);
157 digit
= vli
[num_digits
- 1];
158 for (i
= 0; digit
; i
++)
161 return ((num_digits
- 1) * 64 + i
);
164 /* Sets dest = src. */
165 static void vli_set(u64
*dest
, const u64
*src
, unsigned int ndigits
)
169 for (i
= 0; i
< ndigits
; i
++)
173 /* Returns sign of left - right. */
174 static int vli_cmp(const u64
*left
, const u64
*right
, unsigned int ndigits
)
178 for (i
= ndigits
- 1; i
>= 0; i
--) {
179 if (left
[i
] > right
[i
])
181 else if (left
[i
] < right
[i
])
188 /* Computes result = in << c, returning carry. Can modify in place
189 * (if result == in). 0 < shift < 64.
191 static u64
vli_lshift(u64
*result
, const u64
*in
, unsigned int shift
,
192 unsigned int ndigits
)
197 for (i
= 0; i
< ndigits
; i
++) {
200 result
[i
] = (temp
<< shift
) | carry
;
201 carry
= temp
>> (64 - shift
);
207 /* Computes vli = vli >> 1. */
208 static void vli_rshift1(u64
*vli
, unsigned int ndigits
)
215 while (vli
-- > end
) {
217 *vli
= (temp
>> 1) | carry
;
222 /* Computes result = left + right, returning carry. Can modify in place. */
223 static u64
vli_add(u64
*result
, const u64
*left
, const u64
*right
,
224 unsigned int ndigits
)
229 for (i
= 0; i
< ndigits
; i
++) {
232 sum
= left
[i
] + right
[i
] + carry
;
234 carry
= (sum
< left
[i
]);
242 /* Computes result = left - right, returning borrow. Can modify in place. */
243 static u64
vli_sub(u64
*result
, const u64
*left
, const u64
*right
,
244 unsigned int ndigits
)
249 for (i
= 0; i
< ndigits
; i
++) {
252 diff
= left
[i
] - right
[i
] - borrow
;
254 borrow
= (diff
> left
[i
]);
262 static uint128_t
mul_64_64(u64 left
, u64 right
)
264 u64 a0
= left
& 0xffffffffull
;
266 u64 b0
= right
& 0xffffffffull
;
267 u64 b1
= right
>> 32;
279 m3
+= 0x100000000ull
;
281 result
.m_low
= (m0
& 0xffffffffull
) | (m2
<< 32);
282 result
.m_high
= m3
+ (m2
>> 32);
287 static uint128_t
add_128_128(uint128_t a
, uint128_t b
)
291 result
.m_low
= a
.m_low
+ b
.m_low
;
292 result
.m_high
= a
.m_high
+ b
.m_high
+ (result
.m_low
< a
.m_low
);
297 static void vli_mult(u64
*result
, const u64
*left
, const u64
*right
,
298 unsigned int ndigits
)
300 uint128_t r01
= { 0, 0 };
304 /* Compute each digit of result in sequence, maintaining the
307 for (k
= 0; k
< ndigits
* 2 - 1; k
++) {
313 min
= (k
+ 1) - ndigits
;
315 for (i
= min
; i
<= k
&& i
< ndigits
; i
++) {
318 product
= mul_64_64(left
[i
], right
[k
- i
]);
320 r01
= add_128_128(r01
, product
);
321 r2
+= (r01
.m_high
< product
.m_high
);
324 result
[k
] = r01
.m_low
;
325 r01
.m_low
= r01
.m_high
;
330 result
[ndigits
* 2 - 1] = r01
.m_low
;
333 static void vli_square(u64
*result
, const u64
*left
, unsigned int ndigits
)
335 uint128_t r01
= { 0, 0 };
339 for (k
= 0; k
< ndigits
* 2 - 1; k
++) {
345 min
= (k
+ 1) - ndigits
;
347 for (i
= min
; i
<= k
&& i
<= k
- i
; i
++) {
350 product
= mul_64_64(left
[i
], left
[k
- i
]);
353 r2
+= product
.m_high
>> 63;
354 product
.m_high
= (product
.m_high
<< 1) |
355 (product
.m_low
>> 63);
359 r01
= add_128_128(r01
, product
);
360 r2
+= (r01
.m_high
< product
.m_high
);
363 result
[k
] = r01
.m_low
;
364 r01
.m_low
= r01
.m_high
;
369 result
[ndigits
* 2 - 1] = r01
.m_low
;
372 /* Computes result = (left + right) % mod.
373 * Assumes that left < mod and right < mod, result != mod.
375 static void vli_mod_add(u64
*result
, const u64
*left
, const u64
*right
,
376 const u64
*mod
, unsigned int ndigits
)
380 carry
= vli_add(result
, left
, right
, ndigits
);
382 /* result > mod (result = mod + remainder), so subtract mod to
385 if (carry
|| vli_cmp(result
, mod
, ndigits
) >= 0)
386 vli_sub(result
, result
, mod
, ndigits
);
389 /* Computes result = (left - right) % mod.
390 * Assumes that left < mod and right < mod, result != mod.
392 static void vli_mod_sub(u64
*result
, const u64
*left
, const u64
*right
,
393 const u64
*mod
, unsigned int ndigits
)
395 u64 borrow
= vli_sub(result
, left
, right
, ndigits
);
397 /* In this case, p_result == -diff == (max int) - diff.
398 * Since -x % d == d - x, we can get the correct result from
399 * result + mod (with overflow).
402 vli_add(result
, result
, mod
, ndigits
);
405 /* Computes p_result = p_product % curve_p.
406 * See algorithm 5 and 6 from
407 * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
409 static void vli_mmod_fast_192(u64
*result
, const u64
*product
,
410 const u64
*curve_prime
, u64
*tmp
)
412 const unsigned int ndigits
= 3;
415 vli_set(result
, product
, ndigits
);
417 vli_set(tmp
, &product
[3], ndigits
);
418 carry
= vli_add(result
, result
, tmp
, ndigits
);
423 carry
+= vli_add(result
, result
, tmp
, ndigits
);
425 tmp
[0] = tmp
[1] = product
[5];
427 carry
+= vli_add(result
, result
, tmp
, ndigits
);
429 while (carry
|| vli_cmp(curve_prime
, result
, ndigits
) != 1)
430 carry
-= vli_sub(result
, result
, curve_prime
, ndigits
);
433 /* Computes result = product % curve_prime
434 * from http://www.nsa.gov/ia/_files/nist-routines.pdf
436 static void vli_mmod_fast_256(u64
*result
, const u64
*product
,
437 const u64
*curve_prime
, u64
*tmp
)
440 const unsigned int ndigits
= 4;
443 vli_set(result
, product
, ndigits
);
447 tmp
[1] = product
[5] & 0xffffffff00000000ull
;
450 carry
= vli_lshift(tmp
, tmp
, 1, ndigits
);
451 carry
+= vli_add(result
, result
, tmp
, ndigits
);
454 tmp
[1] = product
[6] << 32;
455 tmp
[2] = (product
[6] >> 32) | (product
[7] << 32);
456 tmp
[3] = product
[7] >> 32;
457 carry
+= vli_lshift(tmp
, tmp
, 1, ndigits
);
458 carry
+= vli_add(result
, result
, tmp
, ndigits
);
462 tmp
[1] = product
[5] & 0xffffffff;
465 carry
+= vli_add(result
, result
, tmp
, ndigits
);
468 tmp
[0] = (product
[4] >> 32) | (product
[5] << 32);
469 tmp
[1] = (product
[5] >> 32) | (product
[6] & 0xffffffff00000000ull
);
471 tmp
[3] = (product
[6] >> 32) | (product
[4] << 32);
472 carry
+= vli_add(result
, result
, tmp
, ndigits
);
475 tmp
[0] = (product
[5] >> 32) | (product
[6] << 32);
476 tmp
[1] = (product
[6] >> 32);
478 tmp
[3] = (product
[4] & 0xffffffff) | (product
[5] << 32);
479 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
485 tmp
[3] = (product
[4] >> 32) | (product
[5] & 0xffffffff00000000ull
);
486 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
489 tmp
[0] = (product
[6] >> 32) | (product
[7] << 32);
490 tmp
[1] = (product
[7] >> 32) | (product
[4] << 32);
491 tmp
[2] = (product
[4] >> 32) | (product
[5] << 32);
492 tmp
[3] = (product
[6] << 32);
493 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
497 tmp
[1] = product
[4] & 0xffffffff00000000ull
;
499 tmp
[3] = product
[6] & 0xffffffff00000000ull
;
500 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
504 carry
+= vli_add(result
, result
, curve_prime
, ndigits
);
507 while (carry
|| vli_cmp(curve_prime
, result
, ndigits
) != 1)
508 carry
-= vli_sub(result
, result
, curve_prime
, ndigits
);
512 /* Computes result = product % curve_prime
513 * from http://www.nsa.gov/ia/_files/nist-routines.pdf
515 static bool vli_mmod_fast(u64
*result
, u64
*product
,
516 const u64
*curve_prime
, unsigned int ndigits
)
518 u64 tmp
[2 * ndigits
];
522 vli_mmod_fast_192(result
, product
, curve_prime
, tmp
);
525 vli_mmod_fast_256(result
, product
, curve_prime
, tmp
);
528 pr_err("unsupports digits size!\n");
535 /* Computes result = (left * right) % curve_prime. */
536 static void vli_mod_mult_fast(u64
*result
, const u64
*left
, const u64
*right
,
537 const u64
*curve_prime
, unsigned int ndigits
)
539 u64 product
[2 * ndigits
];
541 vli_mult(product
, left
, right
, ndigits
);
542 vli_mmod_fast(result
, product
, curve_prime
, ndigits
);
545 /* Computes result = left^2 % curve_prime. */
546 static void vli_mod_square_fast(u64
*result
, const u64
*left
,
547 const u64
*curve_prime
, unsigned int ndigits
)
549 u64 product
[2 * ndigits
];
551 vli_square(product
, left
, ndigits
);
552 vli_mmod_fast(result
, product
, curve_prime
, ndigits
);
555 #define EVEN(vli) (!(vli[0] & 1))
556 /* Computes result = (1 / p_input) % mod. All VLIs are the same size.
557 * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
558 * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
560 static void vli_mod_inv(u64
*result
, const u64
*input
, const u64
*mod
,
561 unsigned int ndigits
)
563 u64 a
[ndigits
], b
[ndigits
];
564 u64 u
[ndigits
], v
[ndigits
];
568 if (vli_is_zero(input
, ndigits
)) {
569 vli_clear(result
, ndigits
);
573 vli_set(a
, input
, ndigits
);
574 vli_set(b
, mod
, ndigits
);
575 vli_clear(u
, ndigits
);
577 vli_clear(v
, ndigits
);
579 while ((cmp_result
= vli_cmp(a
, b
, ndigits
)) != 0) {
583 vli_rshift1(a
, ndigits
);
586 carry
= vli_add(u
, u
, mod
, ndigits
);
588 vli_rshift1(u
, ndigits
);
590 u
[ndigits
- 1] |= 0x8000000000000000ull
;
591 } else if (EVEN(b
)) {
592 vli_rshift1(b
, ndigits
);
595 carry
= vli_add(v
, v
, mod
, ndigits
);
597 vli_rshift1(v
, ndigits
);
599 v
[ndigits
- 1] |= 0x8000000000000000ull
;
600 } else if (cmp_result
> 0) {
601 vli_sub(a
, a
, b
, ndigits
);
602 vli_rshift1(a
, ndigits
);
604 if (vli_cmp(u
, v
, ndigits
) < 0)
605 vli_add(u
, u
, mod
, ndigits
);
607 vli_sub(u
, u
, v
, ndigits
);
609 carry
= vli_add(u
, u
, mod
, ndigits
);
611 vli_rshift1(u
, ndigits
);
613 u
[ndigits
- 1] |= 0x8000000000000000ull
;
615 vli_sub(b
, b
, a
, ndigits
);
616 vli_rshift1(b
, ndigits
);
618 if (vli_cmp(v
, u
, ndigits
) < 0)
619 vli_add(v
, v
, mod
, ndigits
);
621 vli_sub(v
, v
, u
, ndigits
);
623 carry
= vli_add(v
, v
, mod
, ndigits
);
625 vli_rshift1(v
, ndigits
);
627 v
[ndigits
- 1] |= 0x8000000000000000ull
;
631 vli_set(result
, u
, ndigits
);
634 /* ------ Point operations ------ */
636 /* Returns true if p_point is the point at infinity, false otherwise. */
637 static bool ecc_point_is_zero(const struct ecc_point
*point
)
639 return (vli_is_zero(point
->x
, point
->ndigits
) &&
640 vli_is_zero(point
->y
, point
->ndigits
));
643 /* Point multiplication algorithm using Montgomery's ladder with co-Z
644 * coordinates. From http://eprint.iacr.org/2011/338.pdf
647 /* Double in place */
648 static void ecc_point_double_jacobian(u64
*x1
, u64
*y1
, u64
*z1
,
649 u64
*curve_prime
, unsigned int ndigits
)
651 /* t1 = x, t2 = y, t3 = z */
655 if (vli_is_zero(z1
, ndigits
))
659 vli_mod_square_fast(t4
, y1
, curve_prime
, ndigits
);
660 /* t5 = x1*y1^2 = A */
661 vli_mod_mult_fast(t5
, x1
, t4
, curve_prime
, ndigits
);
663 vli_mod_square_fast(t4
, t4
, curve_prime
, ndigits
);
664 /* t2 = y1*z1 = z3 */
665 vli_mod_mult_fast(y1
, y1
, z1
, curve_prime
, ndigits
);
667 vli_mod_square_fast(z1
, z1
, curve_prime
, ndigits
);
670 vli_mod_add(x1
, x1
, z1
, curve_prime
, ndigits
);
672 vli_mod_add(z1
, z1
, z1
, curve_prime
, ndigits
);
674 vli_mod_sub(z1
, x1
, z1
, curve_prime
, ndigits
);
675 /* t1 = x1^2 - z1^4 */
676 vli_mod_mult_fast(x1
, x1
, z1
, curve_prime
, ndigits
);
678 /* t3 = 2*(x1^2 - z1^4) */
679 vli_mod_add(z1
, x1
, x1
, curve_prime
, ndigits
);
680 /* t1 = 3*(x1^2 - z1^4) */
681 vli_mod_add(x1
, x1
, z1
, curve_prime
, ndigits
);
682 if (vli_test_bit(x1
, 0)) {
683 u64 carry
= vli_add(x1
, x1
, curve_prime
, ndigits
);
685 vli_rshift1(x1
, ndigits
);
686 x1
[ndigits
- 1] |= carry
<< 63;
688 vli_rshift1(x1
, ndigits
);
690 /* t1 = 3/2*(x1^2 - z1^4) = B */
693 vli_mod_square_fast(z1
, x1
, curve_prime
, ndigits
);
695 vli_mod_sub(z1
, z1
, t5
, curve_prime
, ndigits
);
696 /* t3 = B^2 - 2A = x3 */
697 vli_mod_sub(z1
, z1
, t5
, curve_prime
, ndigits
);
699 vli_mod_sub(t5
, t5
, z1
, curve_prime
, ndigits
);
700 /* t1 = B * (A - x3) */
701 vli_mod_mult_fast(x1
, x1
, t5
, curve_prime
, ndigits
);
702 /* t4 = B * (A - x3) - y1^4 = y3 */
703 vli_mod_sub(t4
, x1
, t4
, curve_prime
, ndigits
);
705 vli_set(x1
, z1
, ndigits
);
706 vli_set(z1
, y1
, ndigits
);
707 vli_set(y1
, t4
, ndigits
);
710 /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
711 static void apply_z(u64
*x1
, u64
*y1
, u64
*z
, u64
*curve_prime
,
712 unsigned int ndigits
)
716 vli_mod_square_fast(t1
, z
, curve_prime
, ndigits
); /* z^2 */
717 vli_mod_mult_fast(x1
, x1
, t1
, curve_prime
, ndigits
); /* x1 * z^2 */
718 vli_mod_mult_fast(t1
, t1
, z
, curve_prime
, ndigits
); /* z^3 */
719 vli_mod_mult_fast(y1
, y1
, t1
, curve_prime
, ndigits
); /* y1 * z^3 */
722 /* P = (x1, y1) => 2P, (x2, y2) => P' */
723 static void xycz_initial_double(u64
*x1
, u64
*y1
, u64
*x2
, u64
*y2
,
724 u64
*p_initial_z
, u64
*curve_prime
,
725 unsigned int ndigits
)
729 vli_set(x2
, x1
, ndigits
);
730 vli_set(y2
, y1
, ndigits
);
732 vli_clear(z
, ndigits
);
736 vli_set(z
, p_initial_z
, ndigits
);
738 apply_z(x1
, y1
, z
, curve_prime
, ndigits
);
740 ecc_point_double_jacobian(x1
, y1
, z
, curve_prime
, ndigits
);
742 apply_z(x2
, y2
, z
, curve_prime
, ndigits
);
745 /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
746 * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
747 * or P => P', Q => P + Q
749 static void xycz_add(u64
*x1
, u64
*y1
, u64
*x2
, u64
*y2
, u64
*curve_prime
,
750 unsigned int ndigits
)
752 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
756 vli_mod_sub(t5
, x2
, x1
, curve_prime
, ndigits
);
757 /* t5 = (x2 - x1)^2 = A */
758 vli_mod_square_fast(t5
, t5
, curve_prime
, ndigits
);
760 vli_mod_mult_fast(x1
, x1
, t5
, curve_prime
, ndigits
);
762 vli_mod_mult_fast(x2
, x2
, t5
, curve_prime
, ndigits
);
764 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
765 /* t5 = (y2 - y1)^2 = D */
766 vli_mod_square_fast(t5
, y2
, curve_prime
, ndigits
);
769 vli_mod_sub(t5
, t5
, x1
, curve_prime
, ndigits
);
770 /* t5 = D - B - C = x3 */
771 vli_mod_sub(t5
, t5
, x2
, curve_prime
, ndigits
);
773 vli_mod_sub(x2
, x2
, x1
, curve_prime
, ndigits
);
774 /* t2 = y1*(C - B) */
775 vli_mod_mult_fast(y1
, y1
, x2
, curve_prime
, ndigits
);
777 vli_mod_sub(x2
, x1
, t5
, curve_prime
, ndigits
);
778 /* t4 = (y2 - y1)*(B - x3) */
779 vli_mod_mult_fast(y2
, y2
, x2
, curve_prime
, ndigits
);
781 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
783 vli_set(x2
, t5
, ndigits
);
786 /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
787 * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
788 * or P => P - Q, Q => P + Q
790 static void xycz_add_c(u64
*x1
, u64
*y1
, u64
*x2
, u64
*y2
, u64
*curve_prime
,
791 unsigned int ndigits
)
793 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
799 vli_mod_sub(t5
, x2
, x1
, curve_prime
, ndigits
);
800 /* t5 = (x2 - x1)^2 = A */
801 vli_mod_square_fast(t5
, t5
, curve_prime
, ndigits
);
803 vli_mod_mult_fast(x1
, x1
, t5
, curve_prime
, ndigits
);
805 vli_mod_mult_fast(x2
, x2
, t5
, curve_prime
, ndigits
);
807 vli_mod_add(t5
, y2
, y1
, curve_prime
, ndigits
);
809 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
812 vli_mod_sub(t6
, x2
, x1
, curve_prime
, ndigits
);
813 /* t2 = y1 * (C - B) */
814 vli_mod_mult_fast(y1
, y1
, t6
, curve_prime
, ndigits
);
816 vli_mod_add(t6
, x1
, x2
, curve_prime
, ndigits
);
817 /* t3 = (y2 - y1)^2 */
818 vli_mod_square_fast(x2
, y2
, curve_prime
, ndigits
);
820 vli_mod_sub(x2
, x2
, t6
, curve_prime
, ndigits
);
823 vli_mod_sub(t7
, x1
, x2
, curve_prime
, ndigits
);
824 /* t4 = (y2 - y1)*(B - x3) */
825 vli_mod_mult_fast(y2
, y2
, t7
, curve_prime
, ndigits
);
827 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
829 /* t7 = (y2 + y1)^2 = F */
830 vli_mod_square_fast(t7
, t5
, curve_prime
, ndigits
);
832 vli_mod_sub(t7
, t7
, t6
, curve_prime
, ndigits
);
834 vli_mod_sub(t6
, t7
, x1
, curve_prime
, ndigits
);
835 /* t6 = (y2 + y1)*(x3' - B) */
836 vli_mod_mult_fast(t6
, t6
, t5
, curve_prime
, ndigits
);
838 vli_mod_sub(y1
, t6
, y1
, curve_prime
, ndigits
);
840 vli_set(x1
, t7
, ndigits
);
843 static void ecc_point_mult(struct ecc_point
*result
,
844 const struct ecc_point
*point
, const u64
*scalar
,
845 u64
*initial_z
, u64
*curve_prime
,
846 unsigned int ndigits
)
853 int num_bits
= vli_num_bits(scalar
, ndigits
);
855 vli_set(rx
[1], point
->x
, ndigits
);
856 vli_set(ry
[1], point
->y
, ndigits
);
858 xycz_initial_double(rx
[1], ry
[1], rx
[0], ry
[0], initial_z
, curve_prime
,
861 for (i
= num_bits
- 2; i
> 0; i
--) {
862 nb
= !vli_test_bit(scalar
, i
);
863 xycz_add_c(rx
[1 - nb
], ry
[1 - nb
], rx
[nb
], ry
[nb
], curve_prime
,
865 xycz_add(rx
[nb
], ry
[nb
], rx
[1 - nb
], ry
[1 - nb
], curve_prime
,
869 nb
= !vli_test_bit(scalar
, 0);
870 xycz_add_c(rx
[1 - nb
], ry
[1 - nb
], rx
[nb
], ry
[nb
], curve_prime
,
873 /* Find final 1/Z value. */
875 vli_mod_sub(z
, rx
[1], rx
[0], curve_prime
, ndigits
);
877 vli_mod_mult_fast(z
, z
, ry
[1 - nb
], curve_prime
, ndigits
);
878 /* xP * Yb * (X1 - X0) */
879 vli_mod_mult_fast(z
, z
, point
->x
, curve_prime
, ndigits
);
881 /* 1 / (xP * Yb * (X1 - X0)) */
882 vli_mod_inv(z
, z
, curve_prime
, point
->ndigits
);
884 /* yP / (xP * Yb * (X1 - X0)) */
885 vli_mod_mult_fast(z
, z
, point
->y
, curve_prime
, ndigits
);
886 /* Xb * yP / (xP * Yb * (X1 - X0)) */
887 vli_mod_mult_fast(z
, z
, rx
[1 - nb
], curve_prime
, ndigits
);
888 /* End 1/Z calculation */
890 xycz_add(rx
[nb
], ry
[nb
], rx
[1 - nb
], ry
[1 - nb
], curve_prime
, ndigits
);
892 apply_z(rx
[0], ry
[0], z
, curve_prime
, ndigits
);
894 vli_set(result
->x
, rx
[0], ndigits
);
895 vli_set(result
->y
, ry
[0], ndigits
);
898 static inline void ecc_swap_digits(const u64
*in
, u64
*out
,
899 unsigned int ndigits
)
903 for (i
= 0; i
< ndigits
; i
++)
904 out
[i
] = __swab64(in
[ndigits
- 1 - i
]);
907 int ecc_is_key_valid(unsigned int curve_id
, unsigned int ndigits
,
908 const u64
*private_key
, unsigned int private_key_len
)
911 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
916 nbytes
= ndigits
<< ECC_DIGITS_TO_BYTES_SHIFT
;
918 if (private_key_len
!= nbytes
)
921 if (vli_is_zero(private_key
, ndigits
))
924 /* Make sure the private key is in the range [1, n-1]. */
925 if (vli_cmp(curve
->n
, private_key
, ndigits
) != 1)
932 * ECC private keys are generated using the method of extra random bits,
933 * equivalent to that described in FIPS 186-4, Appendix B.4.1.
935 * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
937 * 0 <= c mod(n-1) <= n-2 and implies that
940 * This method generates a private key uniformly distributed in the range
943 int ecc_gen_privkey(unsigned int curve_id
, unsigned int ndigits
, u64
*privkey
)
945 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
947 unsigned int nbytes
= ndigits
<< ECC_DIGITS_TO_BYTES_SHIFT
;
948 unsigned int nbits
= vli_num_bits(curve
->n
, ndigits
);
951 /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
956 * FIPS 186-4 recommends that the private key should be obtained from a
957 * RBG with a security strength equal to or greater than the security
958 * strength associated with N.
960 * The maximum security strength identified by NIST SP800-57pt1r4 for
961 * ECC is 256 (N >= 512).
963 * This condition is met by the default RNG because it selects a favored
964 * DRBG with a security strength of 256.
966 if (crypto_get_default_rng())
969 err
= crypto_rng_get_bytes(crypto_default_rng
, (u8
*)priv
, nbytes
);
970 crypto_put_default_rng();
974 if (vli_is_zero(priv
, ndigits
))
977 /* Make sure the private key is in the range [1, n-1]. */
978 if (vli_cmp(curve
->n
, priv
, ndigits
) != 1)
981 ecc_swap_digits(priv
, privkey
, ndigits
);
986 int ecc_make_pub_key(unsigned int curve_id
, unsigned int ndigits
,
987 const u64
*private_key
, u64
*public_key
)
990 struct ecc_point
*pk
;
992 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
994 if (!private_key
|| !curve
) {
999 ecc_swap_digits(private_key
, priv
, ndigits
);
1001 pk
= ecc_alloc_point(ndigits
);
1007 ecc_point_mult(pk
, &curve
->g
, priv
, NULL
, curve
->p
, ndigits
);
1008 if (ecc_point_is_zero(pk
)) {
1010 goto err_free_point
;
1013 ecc_swap_digits(pk
->x
, public_key
, ndigits
);
1014 ecc_swap_digits(pk
->y
, &public_key
[ndigits
], ndigits
);
1022 int crypto_ecdh_shared_secret(unsigned int curve_id
, unsigned int ndigits
,
1023 const u64
*private_key
, const u64
*public_key
,
1027 struct ecc_point
*product
, *pk
;
1029 u64 rand_z
[ndigits
];
1030 unsigned int nbytes
;
1031 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
1033 if (!private_key
|| !public_key
|| !curve
) {
1038 nbytes
= ndigits
<< ECC_DIGITS_TO_BYTES_SHIFT
;
1040 get_random_bytes(rand_z
, nbytes
);
1042 pk
= ecc_alloc_point(ndigits
);
1048 product
= ecc_alloc_point(ndigits
);
1051 goto err_alloc_product
;
1054 ecc_swap_digits(public_key
, pk
->x
, ndigits
);
1055 ecc_swap_digits(&public_key
[ndigits
], pk
->y
, ndigits
);
1056 ecc_swap_digits(private_key
, priv
, ndigits
);
1058 ecc_point_mult(product
, pk
, priv
, rand_z
, curve
->p
, ndigits
);
1060 ecc_swap_digits(product
->x
, secret
, ndigits
);
1062 if (ecc_point_is_zero(product
))
1065 ecc_free_point(product
);