ipfw: Use netisr wrappers
[dragonfly.git] / sys / kern / kern_nrandom.c
blob82a83d54b94a053f418179bf284ec1fe30c96f04
1 /*
2 * Copyright (c) 2004-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Alex Hornung <alex@alexhornung.com>
7 * by Robin J Carey
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
30 /* --- NOTES ---
32 * Note: The word "entropy" is often incorrectly used to describe
33 * random data. The word "entropy" originates from the science of
34 * Physics. The correct descriptive definition would be something
35 * along the lines of "seed", "unpredictable numbers" or
36 * "unpredictable data".
38 * Note: Some /dev/[u]random implementations save "seed" between
39 * boots which represents a security hazard since an adversary
40 * could acquire this data (since it is stored in a file). If
41 * the unpredictable data used in the above routines is only
42 * generated during Kernel operation, then an adversary can only
43 * acquire that data through a Kernel security compromise and/or
44 * a cryptographic algorithm failure/cryptanalysis.
46 * Note: On FreeBSD-4.11, interrupts have to be manually enabled
47 * using the rndcontrol(8) command.
49 * --- DESIGN (FreeBSD-4.11 based) ---
51 * The rnddev module automatically initializes itself the first time
52 * it is used (client calls any public rnddev_*() interface routine).
53 * Both CSPRNGs are initially seeded from the precise nano[up]time() routines.
54 * Tests show this method produces good enough results, suitable for intended
55 * use. It is necessary for both CSPRNGs to be completely seeded, initially.
57 * After initialization and during Kernel operation the only suitable
58 * unpredictable data available is:
60 * (1) Keyboard scan-codes.
61 * (2) Nanouptime acquired by a Keyboard/Read-Event.
62 * (3) Suitable interrupt source; hard-disk/ATA-device.
64 * (X) Mouse-event (xyz-data unsuitable); NOT IMPLEMENTED.
66 * This data is added to both CSPRNGs in real-time as it happens/
67 * becomes-available. Additionally, unpredictable (?) data may be
68 * acquired from a true-random number generator if such a device is
69 * available to the system (not advisable !).
70 * Nanouptime() acquired by a Read-Event is a very important aspect of
71 * this design, since it ensures that unpredictable data is added to
72 * the CSPRNGs even if there are no other sources.
73 * The nanouptime() Kernel routine is used since time relative to
74 * boot is less adversary-known than time itself.
76 * This design has been thoroughly tested with debug logging
77 * and the output from both /dev/random and /dev/urandom has
78 * been tested with the DIEHARD test-suite; both pass.
80 * MODIFICATIONS MADE TO ORIGINAL "kern_random.c":
82 * 6th July 2005:
84 * o Changed ReadSeed() function to schedule future read-seed-events
85 * by at least one second. Previous implementation used a randomised
86 * scheduling { 0, 1, 2, 3 seconds }.
87 * o Changed SEED_NANOUP() function to use a "previous" accumulator
88 * algorithm similar to ReadSeed(). This ensures that there is no
89 * way that an adversary can tell what number is being added to the
90 * CSPRNGs, since the number added to the CSPRNGs at Event-Time is
91 * the sum of nanouptime()@Event and an unknown/secret number.
92 * o Changed rnddev_add_interrupt() function to schedule future
93 * interrupt-events by at least one second. Previous implementation
94 * had no scheduling algorithm which allowed an "interrupt storm"
95 * to occur resulting in skewed data entering into the CSPRNGs.
98 * 9th July 2005:
100 * o Some small cleanups and change all internal functions to be
101 * static/private.
102 * o Removed ReadSeed() since its functionality is already performed
103 * by another function { rnddev_add_interrupt_OR_read() } and remove
104 * the silly rndByte accumulator/feedback-thing (since multipying by
105 * rndByte could yield a value of 0).
106 * o Made IBAA/L14 public interface become static/private;
107 * Local to this file (not changed to that in the original C modules).
109 * 16th July 2005:
111 * o SEED_NANOUP() -> NANOUP_EVENT() function rename.
112 * o Make NANOUP_EVENT() handle the time-buffering directly so that all
113 * time-stamp-events use this single time-buffer (including keyboard).
114 * This removes dependancy on "time_second" Kernel variable.
115 * o Removed second-time-buffer code in rnddev_add_interrupt_OR_read (void).
116 * o Rewrote the time-buffering algorithm in NANOUP_EVENT() to use a
117 * randomised time-delay range.
119 * 12th Dec 2005:
121 * o Updated to (hopefully final) L15 algorithm.
123 * 12th June 2006:
125 * o Added missing (u_char *) cast in RnddevRead() function.
126 * o Changed copyright to 3-clause BSD license and cleaned up the layout
127 * of this file.
129 * For a proper changelog, refer to the version control history of this
130 * file.
133 #include <sys/types.h>
134 #include <sys/kernel.h>
135 #include <sys/systm.h>
136 #include <sys/poll.h>
137 #include <sys/event.h>
138 #include <sys/random.h>
139 #include <sys/systimer.h>
140 #include <sys/time.h>
141 #include <sys/proc.h>
142 #include <sys/lock.h>
143 #include <sys/sysctl.h>
144 #include <sys/spinlock.h>
145 #include <sys/csprng.h>
146 #include <machine/atomic.h>
147 #include <machine/clock.h>
149 #include <sys/thread2.h>
150 #include <sys/spinlock2.h>
152 struct csprng_state csprng_state;
155 * Portability note: The u_char/unsigned char type is used where
156 * uint8_t from <stdint.h> or u_int8_t from <sys/types.h> should really
157 * be being used. On FreeBSD, it is safe to make the assumption that these
158 * different types are equivalent (on all architectures).
159 * The FreeBSD <sys/crypto/rc4> module also makes this assumption.
162 /*------------------------------ IBAA ----------------------------------*/
164 /*-------------------------- IBAA CSPRNG -------------------------------*/
167 * NOTE: The original source code from which this source code (IBAA)
168 * was taken has no copyright/license. The algorithm has no patent
169 * and is freely/publicly available from:
171 * http://www.burtleburtle.net/bob/rand/isaac.html
175 * ^ means XOR, & means bitwise AND, a<<b means shift a by b.
176 * barrel(a) shifts a 19 bits to the left, and bits wrap around
177 * ind(x) is (x AND 255), or (x mod 256)
179 typedef u_int32_t u4; /* unsigned four bytes, 32 bits */
181 #define ALPHA (8)
182 #define SIZE (1 << ALPHA)
183 #define MASK (SIZE - 1)
184 #define ind(x) ((x) & (SIZE - 1))
185 #define barrel(a) (((a) << 20) ^ ((a) >> 12)) /* beta=32,shift=20 */
187 static void IBAA
189 u4 *m, /* Memory: array of SIZE ALPHA-bit terms */
190 u4 *r, /* Results: the sequence, same size as m */
191 u4 *aa, /* Accumulator: a single value */
192 u4 *bb, /* the previous result */
193 u4 *counter /* counter */
196 u4 a, b, x, y, i;
198 a = *aa;
199 b = *bb + *counter;
200 ++*counter;
201 for (i = 0; i < SIZE; ++i) {
202 x = m[i];
203 a = barrel(a) + m[ind(i + (SIZE / 2))]; /* set a */
204 m[i] = y = m[ind(x)] + a + b; /* set m */
205 r[i] = b = m[ind(y >> ALPHA)] + x; /* set r */
207 *bb = b; *aa = a;
210 /*-------------------------- IBAA CSPRNG -------------------------------*/
213 static u4 IBAA_memory[SIZE];
214 static u4 IBAA_results[SIZE];
215 static u4 IBAA_aa;
216 static u4 IBAA_bb;
217 static u4 IBAA_counter;
219 static volatile int IBAA_byte_index;
222 static void IBAA_Init(void);
223 static void IBAA_Call(void);
224 static void IBAA_Seed(const u_int32_t val);
225 static u_char IBAA_Byte(void);
228 * Initialize IBAA.
230 static void
231 IBAA_Init(void)
233 size_t i;
235 for (i = 0; i < SIZE; ++i) {
236 IBAA_memory[i] = i;
238 IBAA_aa = IBAA_bb = 0;
239 IBAA_counter = 0;
240 IBAA_byte_index = sizeof(IBAA_results); /* force IBAA_Call() */
244 * PRIVATE: Call IBAA to produce 256 32-bit u4 results.
246 static void
247 IBAA_Call (void)
249 IBAA(IBAA_memory, IBAA_results, &IBAA_aa, &IBAA_bb, &IBAA_counter);
250 IBAA_byte_index = 0;
254 * Add a 32-bit u4 seed value into IBAAs memory. Mix the low 4 bits
255 * with 4 bits of PNG data to reduce the possibility of a seeding-based
256 * attack.
258 static void
259 IBAA_Seed (const u_int32_t val)
261 static int memIndex;
262 u4 *iptr;
264 iptr = &IBAA_memory[memIndex & MASK];
265 *iptr = ((*iptr << 3) | (*iptr >> 29)) + (val ^ (IBAA_Byte() & 15));
266 ++memIndex;
269 static void
270 IBAA_Vector (const char *buf, int bytes)
272 int i;
274 while (bytes >= sizeof(int)) {
275 IBAA_Seed(*(const int *)buf);
276 buf += sizeof(int);
277 bytes -= sizeof(int);
281 * Warm up the generator to get rid of weak initial states.
283 for (i = 0; i < 10; ++i)
284 IBAA_Call();
288 * Extract a byte from IBAAs 256 32-bit u4 results array.
290 * NOTE: This code is designed to prevent MP races from taking
291 * IBAA_byte_index out of bounds.
293 static u_char
294 IBAA_Byte(void)
296 u_char result;
297 int index;
299 index = IBAA_byte_index;
300 if (index == sizeof(IBAA_results)) {
301 IBAA_Call();
302 index = 0;
304 result = ((u_char *)IBAA_results)[index];
305 IBAA_byte_index = index + 1;
306 return result;
309 /*------------------------------ IBAA ----------------------------------*/
312 /*------------------------------- L15 ----------------------------------*/
315 * IMPORTANT NOTE: LByteType must be exactly 8-bits in size or this software
316 * will not function correctly.
318 typedef unsigned char LByteType;
320 #define L15_STATE_SIZE 256
322 static LByteType L15_x, L15_y;
323 static LByteType L15_start_x;
324 static LByteType L15_state[L15_STATE_SIZE];
327 * PRIVATE FUNCS:
330 static void L15_Swap(const LByteType pos1, const LByteType pos2);
331 static void L15_InitState(void);
332 static void L15_KSA(const LByteType * const key,
333 const size_t keyLen);
334 static void L15_Discard(const LByteType numCalls);
337 * PUBLIC INTERFACE:
339 static void L15(const LByteType * const key, const size_t keyLen);
340 static LByteType L15_Byte(void);
341 static void L15_Vector(const LByteType * const key,
342 const size_t keyLen);
344 static __inline void
345 L15_Swap(const LByteType pos1, const LByteType pos2)
347 const LByteType save1 = L15_state[pos1];
349 L15_state[pos1] = L15_state[pos2];
350 L15_state[pos2] = save1;
353 static void
354 L15_InitState (void)
356 size_t i;
357 for (i = 0; i < L15_STATE_SIZE; ++i)
358 L15_state[i] = i;
361 #define L_SCHEDULE(xx) \
363 for (i = 0; i < L15_STATE_SIZE; ++i) { \
364 L15_Swap(i, (stateIndex += (L15_state[i] + (xx)))); \
367 static void
368 L15_KSA (const LByteType * const key, const size_t keyLen)
370 size_t i, keyIndex;
371 static LByteType stateIndex = 0;
373 for (keyIndex = 0; keyIndex < keyLen; ++keyIndex) {
374 L_SCHEDULE(key[keyIndex]);
376 L_SCHEDULE(keyLen);
379 static void
380 L15_Discard(const LByteType numCalls)
382 LByteType i;
383 for (i = 0; i < numCalls; ++i) {
384 (void)L15_Byte();
390 * PUBLIC INTERFACE:
392 static void
393 L15(const LByteType * const key, const size_t keyLen)
395 L15_x = L15_start_x = 0;
396 L15_y = L15_STATE_SIZE - 1;
397 L15_InitState();
398 L15_KSA(key, keyLen);
399 L15_Discard(L15_Byte());
402 static LByteType
403 L15_Byte(void)
405 LByteType z;
407 L15_Swap(L15_state[L15_x], L15_y);
408 z = (L15_state [L15_x++] + L15_state[L15_y--]);
409 if (L15_x == L15_start_x) {
410 --L15_y;
412 return (L15_state[z]);
415 static void
416 L15_Vector (const LByteType * const key, const size_t keyLen)
418 L15_KSA(key, keyLen);
421 /*------------------------------- L15 ----------------------------------*/
423 /************************************************************************
424 * KERNEL INTERFACE *
425 ************************************************************************
427 * By Robin J Carey, Matthew Dillon and Alex Hornung.
430 static int rand_thread_value;
431 static void NANOUP_EVENT(void);
432 static thread_t rand_td;
433 static struct spinlock rand_spin;
435 static int sysctl_kern_random(SYSCTL_HANDLER_ARGS);
437 static int nrandevents;
438 static int rand_mode = 2;
439 static struct systimer systimer_rand;
441 static int sysctl_kern_rand_mode(SYSCTL_HANDLER_ARGS);
443 SYSCTL_INT(_kern, OID_AUTO, nrandevents, CTLFLAG_RD, &nrandevents, 0, "");
444 SYSCTL_PROC(_kern, OID_AUTO, random, CTLFLAG_RD | CTLFLAG_ANYBODY, 0, 0,
445 sysctl_kern_random, "I", "Acquire random data");
446 SYSCTL_PROC(_kern, OID_AUTO, rand_mode, CTLTYPE_STRING | CTLFLAG_RW, NULL, 0,
447 sysctl_kern_rand_mode, "A", "RNG mode (csprng, ibaa or mixed)");
451 * Called from early boot
453 void
454 rand_initialize(void)
456 struct timespec now;
457 int i;
459 csprng_init(&csprng_state);
460 #if 0
462 * XXX: we do the reseeding when someone uses the RNG instead
463 * of regularly using init_reseed (which initializes a callout)
464 * to avoid unnecessary and regular reseeding.
466 csprng_init_reseed(&csprng_state);
467 #endif
470 spin_init(&rand_spin, "randinit");
472 /* Initialize IBAA. */
473 IBAA_Init();
475 /* Initialize L15. */
476 nanouptime(&now);
477 L15((const LByteType *)&now.tv_nsec, sizeof(now.tv_nsec));
478 for (i = 0; i < (SIZE / 2); ++i) {
479 nanotime(&now);
480 add_buffer_randomness_src((const uint8_t *)&now.tv_nsec,
481 sizeof(now.tv_nsec), RAND_SRC_TIMING);
482 nanouptime(&now);
483 add_buffer_randomness_src((const uint8_t *)&now.tv_nsec,
484 sizeof(now.tv_nsec), RAND_SRC_TIMING);
488 * Warm up the generator to get rid of weak initial states.
490 for (i = 0; i < 10; ++i)
491 IBAA_Call();
495 * Keyboard events
497 void
498 add_keyboard_randomness(u_char scancode)
500 spin_lock(&rand_spin);
501 L15_Vector((const LByteType *) &scancode, sizeof (scancode));
502 spin_unlock(&rand_spin);
503 add_interrupt_randomness(0);
507 * Interrupt events. This is SMP safe and allowed to race.
509 * This adjusts rand_thread_value which will be incorporated into the next
510 * time-buffered seed. It does not effect the seeding period per-say.
512 void
513 add_interrupt_randomness(int intr)
515 if (tsc_present) {
516 rand_thread_value = (rand_thread_value << 4) ^ 1 ^
517 ((int)rdtsc() % 151);
519 ++rand_thread_value; /* ~1 bit */
523 * True random number source
526 add_buffer_randomness(const char *buf, int bytes)
528 spin_lock(&rand_spin);
529 L15_Vector((const LByteType *)buf, bytes);
530 IBAA_Vector(buf, bytes);
531 spin_unlock(&rand_spin);
533 atomic_add_int(&nrandevents, 1);
535 csprng_add_entropy(&csprng_state, RAND_SRC_UNKNOWN,
536 (const uint8_t *)buf, bytes, 0);
538 return 0;
543 add_buffer_randomness_src(const char *buf, int bytes, int srcid)
545 spin_lock(&rand_spin);
546 L15_Vector((const LByteType *)buf, bytes);
547 IBAA_Vector(buf, bytes);
548 spin_unlock(&rand_spin);
550 atomic_add_int(&nrandevents, 1);
552 csprng_add_entropy(&csprng_state, srcid & 0xff,
553 (const uint8_t *)buf, bytes, 0);
555 return 0;
560 * Kqueue filter (always succeeds)
563 random_filter_read(struct knote *kn, long hint)
565 return (1);
569 * Heavy weight random number generator. May return less then the
570 * requested number of bytes.
572 * Instead of stopping early,
574 u_int
575 read_random(void *buf, u_int nbytes)
577 int i, j;
579 if (rand_mode == 0) {
580 /* Only use CSPRNG */
581 i = csprng_get_random(&csprng_state, buf, nbytes, 0);
582 } else if (rand_mode == 1) {
583 /* Only use IBAA */
584 spin_lock(&rand_spin);
585 for (i = 0; i < nbytes; i++)
586 ((u_char *)buf)[i] = IBAA_Byte();
587 spin_unlock(&rand_spin);
588 } else {
589 /* Mix both CSPRNG and IBAA */
590 i = csprng_get_random(&csprng_state, buf, nbytes, 0);
591 spin_lock(&rand_spin);
592 for (j = 0; j < i; j++)
593 ((u_char *)buf)[j] ^= IBAA_Byte();
594 spin_unlock(&rand_spin);
597 add_interrupt_randomness(0);
598 return (i > 0) ? i : 0;
602 * Heavy weight random number generator. Must return the requested
603 * number of bytes.
605 u_int
606 read_random_unlimited(void *buf, u_int nbytes)
608 u_int i;
610 spin_lock(&rand_spin);
611 for (i = 0; i < nbytes; ++i)
612 ((u_char *)buf)[i] = IBAA_Byte();
613 spin_unlock(&rand_spin);
614 add_interrupt_randomness(0);
615 return (i);
619 * Read random data via sysctl().
621 static
623 sysctl_kern_random(SYSCTL_HANDLER_ARGS)
625 char buf[64];
626 size_t n;
627 size_t r;
628 int error = 0;
630 n = req->oldlen;
631 if (n > 1024 * 1024)
632 n = 1024 * 1024;
633 while (n > 0) {
634 if ((r = n) > sizeof(buf))
635 r = sizeof(buf);
636 read_random_unlimited(buf, r);
637 error = SYSCTL_OUT(req, buf, r);
638 if (error)
639 break;
640 n -= r;
642 return(error);
646 * Change the random mode via sysctl().
648 static
649 const char *
650 rand_mode_to_str(int mode)
652 switch (mode) {
653 case 0:
654 return "csprng";
655 case 1:
656 return "ibaa";
657 case 2:
658 return "mixed";
659 default:
660 return "unknown";
664 static
666 sysctl_kern_rand_mode(SYSCTL_HANDLER_ARGS)
668 char mode[32];
669 int error;
671 strncpy(mode, rand_mode_to_str(rand_mode), sizeof(mode)-1);
672 error = sysctl_handle_string(oidp, mode, sizeof(mode), req);
673 if (error || req->newptr == NULL)
674 return error;
676 if ((strncmp(mode, "csprng", sizeof(mode))) == 0)
677 rand_mode = 0;
678 else if ((strncmp(mode, "ibaa", sizeof(mode))) == 0)
679 rand_mode = 1;
680 else if ((strncmp(mode, "mixed", sizeof(mode))) == 0)
681 rand_mode = 2;
682 else
683 error = EINVAL;
685 return error;
689 * Random number generator helper thread. This limits code overhead from
690 * high frequency events by delaying the clearing of rand_thread_value.
692 * This is a time-buffered loop, with a randomizing delay. Note that interrupt
693 * entropy does not cause the thread to wakeup any faster, but does improve the
694 * quality of the entropy produced.
696 static
697 void
698 rand_thread_loop(void *dummy)
700 int64_t count;
702 for (;;) {
704 * Generate entropy.
706 NANOUP_EVENT();
707 spin_lock(&rand_spin);
708 count = (uint8_t)L15_Byte();
709 spin_unlock(&rand_spin);
712 * Calculate 1/10 of a second to 2/10 of a second, fine-grained
713 * using a L15_Byte() feedback.
715 * Go faster in the first 1200 seconds after boot. This effects
716 * the time-after-next interrupt (pipeline delay).
718 count = sys_cputimer->freq * (count + 256) / (256 * 10);
719 if (time_uptime < 120)
720 count = count / 10 + 1;
721 systimer_rand.periodic = count;
723 tsleep(rand_td, 0, "rwait", 0);
728 * Systimer trigger - fine-grained random trigger
730 static
731 void
732 rand_thread_wakeup(struct systimer *timer, int in_ipi, struct intrframe *frame)
734 wakeup(rand_td);
737 static
738 void
739 rand_thread_init(void)
741 systimer_init_periodic_nq(&systimer_rand, rand_thread_wakeup, NULL, 25);
742 lwkt_create(rand_thread_loop, NULL, &rand_td, NULL, 0, 0, "random");
745 SYSINIT(rand, SI_SUB_HELPER_THREADS, SI_ORDER_ANY, rand_thread_init, 0);
748 * Caller is time-buffered. Incorporate any accumulated interrupt randomness
749 * as well as the high frequency bits of the TSC.
751 * A delta nanoseconds value is used to remove absolute time from the generated
752 * entropy. Even though we are pushing 32 bits, this entropy is probably only
753 * good for one or two bits without any interrupt sources, and possibly 8 bits with.
755 static void
756 NANOUP_EVENT(void)
758 static struct timespec last;
759 struct timespec now;
760 int nsec;
763 * Delta nanoseconds since last event
765 nanouptime(&now);
766 nsec = now.tv_nsec - last.tv_nsec;
767 last = now;
770 * Interrupt randomness.
772 nsec ^= rand_thread_value;
775 * The TSC, if present, generally has an even higher
776 * resolution. Integrate a portion of it into our seed.
778 if (tsc_present)
779 nsec ^= (rdtsc() & 255) << 8;
782 * Ok.
785 add_buffer_randomness_src((const uint8_t *)&nsec, sizeof(nsec), RAND_SRC_INTR);