tools - Fix backup file permissions for hammer-backup.sh
[dragonfly.git] / sys / kern / kern_nrandom.c
blob0b1408f4ee0339f45f5b0da31516e8df548650d0
1 /*
2 * Copyright (c) 2004-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 * by Alex Hornung <alex@alexhornung.com>
7 * by Robin J Carey
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
30 /* --- NOTES ---
32 * Note: The word "entropy" is often incorrectly used to describe
33 * random data. The word "entropy" originates from the science of
34 * Physics. The correct descriptive definition would be something
35 * along the lines of "seed", "unpredictable numbers" or
36 * "unpredictable data".
38 * Note: Some /dev/[u]random implementations save "seed" between
39 * boots which represents a security hazard since an adversary
40 * could acquire this data (since it is stored in a file). If
41 * the unpredictable data used in the above routines is only
42 * generated during Kernel operation, then an adversary can only
43 * acquire that data through a Kernel security compromise and/or
44 * a cryptographic algorithm failure/cryptanalysis.
46 * Note: On FreeBSD-4.11, interrupts have to be manually enabled
47 * using the rndcontrol(8) command.
49 * --- DESIGN (FreeBSD-4.11 based) ---
51 * The rnddev module automatically initializes itself the first time
52 * it is used (client calls any public rnddev_*() interface routine).
53 * Both CSPRNGs are initially seeded from the precise nano[up]time() routines.
54 * Tests show this method produces good enough results, suitable for intended
55 * use. It is necessary for both CSPRNGs to be completely seeded, initially.
57 * After initialization and during Kernel operation the only suitable
58 * unpredictable data available is:
60 * (1) Keyboard scan-codes.
61 * (2) Nanouptime acquired by a Keyboard/Read-Event.
62 * (3) Suitable interrupt source; hard-disk/ATA-device.
64 * (X) Mouse-event (xyz-data unsuitable); NOT IMPLEMENTED.
66 * This data is added to both CSPRNGs in real-time as it happens/
67 * becomes-available. Additionally, unpredictable (?) data may be
68 * acquired from a true-random number generator if such a device is
69 * available to the system (not advisable !).
70 * Nanouptime() acquired by a Read-Event is a very important aspect of
71 * this design, since it ensures that unpredictable data is added to
72 * the CSPRNGs even if there are no other sources.
73 * The nanouptime() Kernel routine is used since time relative to
74 * boot is less adversary-known than time itself.
76 * This design has been thoroughly tested with debug logging
77 * and the output from both /dev/random and /dev/urandom has
78 * been tested with the DIEHARD test-suite; both pass.
80 * MODIFICATIONS MADE TO ORIGINAL "kern_random.c":
82 * 6th July 2005:
84 * o Changed ReadSeed() function to schedule future read-seed-events
85 * by at least one second. Previous implementation used a randomised
86 * scheduling { 0, 1, 2, 3 seconds }.
87 * o Changed SEED_NANOUP() function to use a "previous" accumulator
88 * algorithm similar to ReadSeed(). This ensures that there is no
89 * way that an adversary can tell what number is being added to the
90 * CSPRNGs, since the number added to the CSPRNGs at Event-Time is
91 * the sum of nanouptime()@Event and an unknown/secret number.
92 * o Changed rnddev_add_interrupt() function to schedule future
93 * interrupt-events by at least one second. Previous implementation
94 * had no scheduling algorithm which allowed an "interrupt storm"
95 * to occur resulting in skewed data entering into the CSPRNGs.
98 * 9th July 2005:
100 * o Some small cleanups and change all internal functions to be
101 * static/private.
102 * o Removed ReadSeed() since its functionality is already performed
103 * by another function { rnddev_add_interrupt_OR_read() } and remove
104 * the silly rndByte accumulator/feedback-thing (since multipying by
105 * rndByte could yield a value of 0).
106 * o Made IBAA/L14 public interface become static/private;
107 * Local to this file (not changed to that in the original C modules).
109 * 16th July 2005:
111 * o SEED_NANOUP() -> NANOUP_EVENT() function rename.
112 * o Make NANOUP_EVENT() handle the time-buffering directly so that all
113 * time-stamp-events use this single time-buffer (including keyboard).
114 * This removes dependancy on "time_second" Kernel variable.
115 * o Removed second-time-buffer code in rnddev_add_interrupt_OR_read (void).
116 * o Rewrote the time-buffering algorithm in NANOUP_EVENT() to use a
117 * randomised time-delay range.
119 * 12th Dec 2005:
121 * o Updated to (hopefully final) L15 algorithm.
123 * 12th June 2006:
125 * o Added missing (u_char *) cast in RnddevRead() function.
126 * o Changed copyright to 3-clause BSD license and cleaned up the layout
127 * of this file.
129 * For a proper changelog, refer to the version control history of this
130 * file.
133 #include <sys/types.h>
134 #include <sys/kernel.h>
135 #include <sys/systm.h>
136 #include <sys/poll.h>
137 #include <sys/event.h>
138 #include <sys/random.h>
139 #include <sys/systimer.h>
140 #include <sys/time.h>
141 #include <sys/proc.h>
142 #include <sys/lock.h>
143 #include <sys/sysctl.h>
144 #include <sys/spinlock.h>
145 #include <sys/csprng.h>
146 #include <machine/atomic.h>
147 #include <machine/clock.h>
149 #include <sys/thread2.h>
150 #include <sys/spinlock2.h>
151 #include <sys/mplock2.h>
154 struct csprng_state csprng_state;
157 * Portability note: The u_char/unsigned char type is used where
158 * uint8_t from <stdint.h> or u_int8_t from <sys/types.h> should really
159 * be being used. On FreeBSD, it is safe to make the assumption that these
160 * different types are equivalent (on all architectures).
161 * The FreeBSD <sys/crypto/rc4> module also makes this assumption.
164 /*------------------------------ IBAA ----------------------------------*/
166 /*-------------------------- IBAA CSPRNG -------------------------------*/
169 * NOTE: The original source code from which this source code (IBAA)
170 * was taken has no copyright/license. The algorithm has no patent
171 * and is freely/publicly available from:
173 * http://www.burtleburtle.net/bob/rand/isaac.html
177 * ^ means XOR, & means bitwise AND, a<<b means shift a by b.
178 * barrel(a) shifts a 19 bits to the left, and bits wrap around
179 * ind(x) is (x AND 255), or (x mod 256)
181 typedef u_int32_t u4; /* unsigned four bytes, 32 bits */
183 #define ALPHA (8)
184 #define SIZE (1 << ALPHA)
185 #define MASK (SIZE - 1)
186 #define ind(x) ((x) & (SIZE - 1))
187 #define barrel(a) (((a) << 20) ^ ((a) >> 12)) /* beta=32,shift=20 */
189 static void IBAA
191 u4 *m, /* Memory: array of SIZE ALPHA-bit terms */
192 u4 *r, /* Results: the sequence, same size as m */
193 u4 *aa, /* Accumulator: a single value */
194 u4 *bb, /* the previous result */
195 u4 *counter /* counter */
198 u4 a, b, x, y, i;
200 a = *aa;
201 b = *bb + *counter;
202 ++*counter;
203 for (i = 0; i < SIZE; ++i) {
204 x = m[i];
205 a = barrel(a) + m[ind(i + (SIZE / 2))]; /* set a */
206 m[i] = y = m[ind(x)] + a + b; /* set m */
207 r[i] = b = m[ind(y >> ALPHA)] + x; /* set r */
209 *bb = b; *aa = a;
212 /*-------------------------- IBAA CSPRNG -------------------------------*/
215 static u4 IBAA_memory[SIZE];
216 static u4 IBAA_results[SIZE];
217 static u4 IBAA_aa;
218 static u4 IBAA_bb;
219 static u4 IBAA_counter;
221 static volatile int IBAA_byte_index;
224 static void IBAA_Init(void);
225 static void IBAA_Call(void);
226 static void IBAA_Seed(const u_int32_t val);
227 static u_char IBAA_Byte(void);
230 * Initialize IBAA.
232 static void
233 IBAA_Init(void)
235 size_t i;
237 for (i = 0; i < SIZE; ++i) {
238 IBAA_memory[i] = i;
240 IBAA_aa = IBAA_bb = 0;
241 IBAA_counter = 0;
242 IBAA_byte_index = sizeof(IBAA_results); /* force IBAA_Call() */
246 * PRIVATE: Call IBAA to produce 256 32-bit u4 results.
248 static void
249 IBAA_Call (void)
251 IBAA(IBAA_memory, IBAA_results, &IBAA_aa, &IBAA_bb, &IBAA_counter);
252 IBAA_byte_index = 0;
256 * Add a 32-bit u4 seed value into IBAAs memory. Mix the low 4 bits
257 * with 4 bits of PNG data to reduce the possibility of a seeding-based
258 * attack.
260 static void
261 IBAA_Seed (const u_int32_t val)
263 static int memIndex;
264 u4 *iptr;
266 iptr = &IBAA_memory[memIndex & MASK];
267 *iptr = ((*iptr << 3) | (*iptr >> 29)) + (val ^ (IBAA_Byte() & 15));
268 ++memIndex;
271 static void
272 IBAA_Vector (const char *buf, int bytes)
274 int i;
276 while (bytes >= sizeof(int)) {
277 IBAA_Seed(*(const int *)buf);
278 buf += sizeof(int);
279 bytes -= sizeof(int);
283 * Warm up the generator to get rid of weak initial states.
285 for (i = 0; i < 10; ++i)
286 IBAA_Call();
290 * Extract a byte from IBAAs 256 32-bit u4 results array.
292 * NOTE: This code is designed to prevent MP races from taking
293 * IBAA_byte_index out of bounds.
295 static u_char
296 IBAA_Byte(void)
298 u_char result;
299 int index;
301 index = IBAA_byte_index;
302 if (index == sizeof(IBAA_results)) {
303 IBAA_Call();
304 index = 0;
306 result = ((u_char *)IBAA_results)[index];
307 IBAA_byte_index = index + 1;
308 return result;
311 /*------------------------------ IBAA ----------------------------------*/
314 /*------------------------------- L15 ----------------------------------*/
317 * IMPORTANT NOTE: LByteType must be exactly 8-bits in size or this software
318 * will not function correctly.
320 typedef unsigned char LByteType;
322 #define L15_STATE_SIZE 256
324 static LByteType L15_x, L15_y;
325 static LByteType L15_start_x;
326 static LByteType L15_state[L15_STATE_SIZE];
329 * PRIVATE FUNCS:
332 static void L15_Swap(const LByteType pos1, const LByteType pos2);
333 static void L15_InitState(void);
334 static void L15_KSA(const LByteType * const key,
335 const size_t keyLen);
336 static void L15_Discard(const LByteType numCalls);
339 * PUBLIC INTERFACE:
341 static void L15(const LByteType * const key, const size_t keyLen);
342 static LByteType L15_Byte(void);
343 static void L15_Vector(const LByteType * const key,
344 const size_t keyLen);
346 static __inline void
347 L15_Swap(const LByteType pos1, const LByteType pos2)
349 const LByteType save1 = L15_state[pos1];
351 L15_state[pos1] = L15_state[pos2];
352 L15_state[pos2] = save1;
355 static void
356 L15_InitState (void)
358 size_t i;
359 for (i = 0; i < L15_STATE_SIZE; ++i)
360 L15_state[i] = i;
363 #define L_SCHEDULE(xx) \
365 for (i = 0; i < L15_STATE_SIZE; ++i) { \
366 L15_Swap(i, (stateIndex += (L15_state[i] + (xx)))); \
369 static void
370 L15_KSA (const LByteType * const key, const size_t keyLen)
372 size_t i, keyIndex;
373 static LByteType stateIndex = 0;
375 for (keyIndex = 0; keyIndex < keyLen; ++keyIndex) {
376 L_SCHEDULE(key[keyIndex]);
378 L_SCHEDULE(keyLen);
381 static void
382 L15_Discard(const LByteType numCalls)
384 LByteType i;
385 for (i = 0; i < numCalls; ++i) {
386 (void)L15_Byte();
392 * PUBLIC INTERFACE:
394 static void
395 L15(const LByteType * const key, const size_t keyLen)
397 L15_x = L15_start_x = 0;
398 L15_y = L15_STATE_SIZE - 1;
399 L15_InitState();
400 L15_KSA(key, keyLen);
401 L15_Discard(L15_Byte());
404 static LByteType
405 L15_Byte(void)
407 LByteType z;
409 L15_Swap(L15_state[L15_x], L15_y);
410 z = (L15_state [L15_x++] + L15_state[L15_y--]);
411 if (L15_x == L15_start_x) {
412 --L15_y;
414 return (L15_state[z]);
417 static void
418 L15_Vector (const LByteType * const key, const size_t keyLen)
420 L15_KSA(key, keyLen);
423 /*------------------------------- L15 ----------------------------------*/
425 /************************************************************************
426 * KERNEL INTERFACE *
427 ************************************************************************
429 * By Robin J Carey, Matthew Dillon and Alex Hornung.
432 static int rand_thread_signal = 1;
433 static void NANOUP_EVENT(void);
434 static thread_t rand_td;
435 static struct spinlock rand_spin;
437 static int sysctl_kern_random(SYSCTL_HANDLER_ARGS);
439 static int nrandevents;
440 static int rand_mode = 2;
442 static int sysctl_kern_rand_mode(SYSCTL_HANDLER_ARGS);
444 SYSCTL_INT(_kern, OID_AUTO, nrandevents, CTLFLAG_RD, &nrandevents, 0, "");
445 SYSCTL_PROC(_kern, OID_AUTO, random, CTLFLAG_RD | CTLFLAG_ANYBODY, 0, 0,
446 sysctl_kern_random, "I", "Acquire random data");
447 SYSCTL_PROC(_kern, OID_AUTO, rand_mode, CTLTYPE_STRING | CTLFLAG_RW, NULL, 0,
448 sysctl_kern_rand_mode, "A", "RNG mode (csprng, ibaa or mixed)");
452 * Called from early boot
454 void
455 rand_initialize(void)
457 struct timespec now;
458 int i;
460 csprng_init(&csprng_state);
461 #if 0
463 * XXX: we do the reseeding when someone uses the RNG instead
464 * of regularly using init_reseed (which initializes a callout)
465 * to avoid unnecessary and regular reseeding.
467 csprng_init_reseed(&csprng_state);
468 #endif
471 spin_init(&rand_spin, "randinit");
473 /* Initialize IBAA. */
474 IBAA_Init();
476 /* Initialize L15. */
477 nanouptime(&now);
478 L15((const LByteType *)&now.tv_nsec, sizeof(now.tv_nsec));
479 for (i = 0; i < (SIZE / 2); ++i) {
480 nanotime(&now);
481 add_buffer_randomness_src((const uint8_t *)&now.tv_nsec,
482 sizeof(now.tv_nsec), RAND_SRC_TIMING);
483 nanouptime(&now);
484 add_buffer_randomness_src((const uint8_t *)&now.tv_nsec,
485 sizeof(now.tv_nsec), RAND_SRC_TIMING);
489 * Warm up the generator to get rid of weak initial states.
491 for (i = 0; i < 10; ++i)
492 IBAA_Call();
496 * Keyboard events
498 void
499 add_keyboard_randomness(u_char scancode)
501 spin_lock(&rand_spin);
502 L15_Vector((const LByteType *) &scancode, sizeof (scancode));
503 spin_unlock(&rand_spin);
504 add_interrupt_randomness(0);
508 * Interrupt events. This is SMP safe and allowed to race.
510 void
511 add_interrupt_randomness(int intr)
513 if (rand_thread_signal == 0) {
514 rand_thread_signal = 1;
515 lwkt_schedule(rand_td);
520 * True random number source
523 add_buffer_randomness(const char *buf, int bytes)
525 spin_lock(&rand_spin);
526 L15_Vector((const LByteType *)buf, bytes);
527 IBAA_Vector(buf, bytes);
528 spin_unlock(&rand_spin);
530 atomic_add_int(&nrandevents, 1);
532 csprng_add_entropy(&csprng_state, RAND_SRC_UNKNOWN,
533 (const uint8_t *)buf, bytes, 0);
535 return 0;
540 add_buffer_randomness_src(const char *buf, int bytes, int srcid)
542 spin_lock(&rand_spin);
543 L15_Vector((const LByteType *)buf, bytes);
544 IBAA_Vector(buf, bytes);
545 spin_unlock(&rand_spin);
547 atomic_add_int(&nrandevents, 1);
549 csprng_add_entropy(&csprng_state, srcid & 0xff,
550 (const uint8_t *)buf, bytes, 0);
552 return 0;
557 * Kqueue filter (always succeeds)
560 random_filter_read(struct knote *kn, long hint)
562 return (1);
566 * Heavy weight random number generator. May return less then the
567 * requested number of bytes.
569 * Instead of stopping early,
571 u_int
572 read_random(void *buf, u_int nbytes)
574 int i, j;
576 if (rand_mode == 0) {
577 /* Only use CSPRNG */
578 i = csprng_get_random(&csprng_state, buf, nbytes, 0);
579 } else if (rand_mode == 1) {
580 /* Only use IBAA */
581 spin_lock(&rand_spin);
582 for (i = 0; i < nbytes; i++)
583 ((u_char *)buf)[i] = IBAA_Byte();
584 spin_unlock(&rand_spin);
585 } else {
586 /* Mix both CSPRNG and IBAA */
587 i = csprng_get_random(&csprng_state, buf, nbytes, 0);
588 spin_lock(&rand_spin);
589 for (j = 0; j < i; j++)
590 ((u_char *)buf)[j] ^= IBAA_Byte();
591 spin_unlock(&rand_spin);
594 add_interrupt_randomness(0);
595 return (i > 0) ? i : 0;
599 * Heavy weight random number generator. Must return the requested
600 * number of bytes.
602 u_int
603 read_random_unlimited(void *buf, u_int nbytes)
605 u_int i;
607 spin_lock(&rand_spin);
608 for (i = 0; i < nbytes; ++i)
609 ((u_char *)buf)[i] = IBAA_Byte();
610 spin_unlock(&rand_spin);
611 add_interrupt_randomness(0);
612 return (i);
616 * Read random data via sysctl().
618 static
620 sysctl_kern_random(SYSCTL_HANDLER_ARGS)
622 char buf[64];
623 size_t n;
624 size_t r;
625 int error = 0;
627 n = req->oldlen;
628 if (n > 1024 * 1024)
629 n = 1024 * 1024;
630 while (n > 0) {
631 if ((r = n) > sizeof(buf))
632 r = sizeof(buf);
633 read_random_unlimited(buf, r);
634 error = SYSCTL_OUT(req, buf, r);
635 if (error)
636 break;
637 n -= r;
639 return(error);
643 * Change the random mode via sysctl().
645 static
646 const char *
647 rand_mode_to_str(int mode)
649 switch (mode) {
650 case 0:
651 return "csprng";
652 case 1:
653 return "ibaa";
654 case 2:
655 return "mixed";
656 default:
657 return "unknown";
661 static
663 sysctl_kern_rand_mode(SYSCTL_HANDLER_ARGS)
665 char mode[32];
666 int error;
668 strncpy(mode, rand_mode_to_str(rand_mode), sizeof(mode)-1);
669 error = sysctl_handle_string(oidp, mode, sizeof(mode), req);
670 if (error || req->newptr == NULL)
671 return error;
673 if ((strncmp(mode, "csprng", sizeof(mode))) == 0)
674 rand_mode = 0;
675 else if ((strncmp(mode, "ibaa", sizeof(mode))) == 0)
676 rand_mode = 1;
677 else if ((strncmp(mode, "mixed", sizeof(mode))) == 0)
678 rand_mode = 2;
679 else
680 error = EINVAL;
682 return error;
686 * Random number generator helper thread. This limits code overhead from
687 * high frequency events by delaying the clearing of rand_thread_signal.
689 * MPSAFE thread
691 static
692 void
693 rand_thread_loop(void *dummy)
695 int count;
697 for (;;) {
698 NANOUP_EVENT ();
699 spin_lock(&rand_spin);
700 count = (int)(L15_Byte() * hz / (256 * 10) + hz / 10 + 1);
701 spin_unlock(&rand_spin);
702 tsleep(rand_td, 0, "rwait", count);
703 crit_enter();
704 lwkt_deschedule_self(rand_td);
705 cpu_sfence();
706 rand_thread_signal = 0;
707 crit_exit();
708 lwkt_switch();
712 static
713 void
714 rand_thread_init(void)
716 lwkt_create(rand_thread_loop, NULL, &rand_td, NULL, 0, 0, "random");
719 SYSINIT(rand, SI_SUB_HELPER_THREADS, SI_ORDER_ANY, rand_thread_init, 0);
722 * Time-buffered event time-stamping. This is necessary to cutoff higher
723 * event frequencies, e.g. an interrupt occuring at 25Hz. In such cases
724 * the CPU is being chewed and the timestamps are skewed (minimal variation).
725 * Use a nano-second time-delay to limit how many times an Event can occur
726 * in one second; <= 5Hz. Note that this doesn't prevent time-stamp skewing.
727 * This implementation randmoises the time-delay between events, which adds
728 * a layer of security/unpredictability with regard to read-events (a user
729 * controlled input).
731 * Note: now.tv_nsec should range [ 0 - 1000,000,000 ].
732 * Note: "ACCUM" is a security measure (result = capped-unknown + unknown),
733 * and also produces an uncapped (>=32-bit) value.
735 static void
736 NANOUP_EVENT(void)
738 static struct timespec ACCUM = { 0, 0 };
739 static struct timespec NEXT = { 0, 0 };
740 struct timespec now;
742 nanouptime(&now);
743 spin_lock(&rand_spin);
744 if ((now.tv_nsec > NEXT.tv_nsec) || (now.tv_sec != NEXT.tv_sec)) {
746 * Randomised time-delay: 200e6 - 350e6 ns; 5 - 2.86 Hz.
748 unsigned long one_mil;
749 unsigned long timeDelay;
751 one_mil = 1000000UL; /* 0.001 s */
752 timeDelay = (one_mil * 200) +
753 (((unsigned long)ACCUM.tv_nsec % 151) * one_mil);
754 NEXT.tv_nsec = now.tv_nsec + timeDelay;
755 NEXT.tv_sec = now.tv_sec;
756 ACCUM.tv_nsec += now.tv_nsec;
759 * The TSC, if present, generally has an even higher
760 * resolution. Integrate a portion of it into our seed.
762 if (tsc_present)
763 ACCUM.tv_nsec ^= rdtsc() & 255;
765 spin_unlock(&rand_spin);
766 add_buffer_randomness_src((const uint8_t *)&ACCUM.tv_nsec,
767 sizeof(ACCUM.tv_nsec), RAND_SRC_INTR);
768 spin_lock(&rand_spin);
770 spin_unlock(&rand_spin);