4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
24 * Copyright 2017 Joyent, Inc.
28 * This file implements the interfaces that the /dev/random
29 * driver uses for read(2), write(2) and poll(2) on /dev/random or
30 * /dev/urandom. It also implements the kernel API - random_add_entropy(),
31 * random_add_pseudo_entropy(), random_get_pseudo_bytes()
32 * and random_get_bytes().
34 * We periodically collect random bits from providers which are registered
35 * with the Kernel Cryptographic Framework (kCF) as capable of random
36 * number generation. The random bits are maintained in a cache and
37 * it is used for high quality random numbers (/dev/random) requests.
38 * We pick a provider and call its SPI routine, if the cache does not have
39 * enough bytes to satisfy a request.
41 * /dev/urandom requests use a software-based generator algorithm that uses the
42 * random bits in the cache as a seed. We create one pseudo-random generator
43 * (for /dev/urandom) per possible CPU on the system, and use it,
44 * kmem-magazine-style, to avoid cache line contention.
47 * 1) rmp->rm_mag.rm_lock protects the per-cpu pseudo-random generators.
48 * 2) rndpool_lock protects the high-quality randomness pool.
49 * It may be locked while a rmp->rm_mag.rm_lock is held.
51 * A history note: The kernel API and the software-based algorithms in this
52 * file used to be part of the /dev/random driver.
55 #include <sys/types.h>
57 #include <sys/sunddi.h>
59 #include <sys/modctl.h>
61 #include <sys/crypto/common.h>
62 #include <sys/crypto/api.h>
63 #include <sys/crypto/impl.h>
64 #include <sys/crypto/sched_impl.h>
65 #include <sys/crypto/ioctladmin.h>
66 #include <sys/random.h>
69 #include <sys/sysmacros.h>
70 #include <sys/cpuvar.h>
71 #include <sys/taskq.h>
72 #include <rng/fips_random.h>
74 #define RNDPOOLSIZE 1024 /* Pool size in bytes */
75 #define MINEXTRACTBYTES 20
76 #define MAXEXTRACTBYTES 1024
77 #define PRNG_MAXOBLOCKS 1310720 /* Max output block per prng key */
78 #define TIMEOUT_INTERVAL 5 /* Periodic mixing interval in secs */
80 typedef enum extract_type
{
87 * Hash-algo generic definitions. For now, they are SHA1's. We use SHA1
88 * routines directly instead of using k-API because we can't return any
89 * error code in /dev/urandom case and we can get an error using k-API
90 * if a mechanism is disabled.
93 #define HASH_CTX SHA1_CTX
94 #define HashInit(ctx) SHA1Init((ctx))
95 #define HashUpdate(ctx, p, s) SHA1Update((ctx), (p), (s))
96 #define HashFinal(d, ctx) SHA1Final((d), (ctx))
99 #define HMAC_KEYSIZE 20
102 * Cache of random bytes implemented as a circular buffer. findex and rindex
103 * track the front and back of the circular buffer.
105 uint8_t rndpool
[RNDPOOLSIZE
];
106 static int findex
, rindex
;
107 static int rnbyte_cnt
; /* Number of bytes in the cache */
109 static kmutex_t rndpool_lock
; /* protects r/w accesses to the cache, */
110 /* and the global variables */
111 static kcondvar_t rndpool_read_cv
; /* serializes poll/read syscalls */
112 static int num_waiters
; /* #threads waiting to read from /dev/random */
114 static struct pollhead rnd_pollhead
;
115 /* LINTED E_STATIC_UNUSED */
116 static timeout_id_t kcf_rndtimeout_id
;
117 static crypto_mech_type_t rngmech_type
= CRYPTO_MECH_INVALID
;
118 rnd_stats_t rnd_stats
;
119 static boolean_t rng_prov_found
= B_TRUE
;
120 static boolean_t rng_ok_to_log
= B_TRUE
;
121 static boolean_t rngprov_task_idle
= B_TRUE
;
123 static void rndc_addbytes(uint8_t *, size_t);
124 static void rndc_getbytes(uint8_t *ptr
, size_t len
);
125 static void rnd_handler(void *);
126 static void rnd_alloc_magazines(void);
127 static void rnd_fips_discard_initial(void);
128 static void rnd_init2(void *);
129 static void rnd_schedule_timeout(void);
132 * Called from kcf:_init()
140 mutex_init(&rndpool_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
141 cv_init(&rndpool_read_cv
, NULL
, CV_DEFAULT
, NULL
);
144 * Add bytes to the cache using
145 * . 2 unpredictable times: high resolution time since the boot-time,
146 * and the current time-of-the day.
147 * This is used only to make the timeout value in the timer
151 rndc_addbytes((uint8_t *)&ts
, sizeof (ts
));
153 (void) drv_getparm(TIME
, &now
);
154 rndc_addbytes((uint8_t *)&now
, sizeof (now
));
160 rnd_alloc_magazines();
162 (void) taskq_dispatch(system_taskq
, rnd_init2
, NULL
, TQ_SLEEP
);
166 * This is called via the system taskq, so that we can do further
167 * initializations that have to wait until the kcf module itself is
168 * done loading. (After kcf:_init returns.)
171 rnd_init2(void *unused
)
174 _NOTE(ARGUNUSED(unused
));
177 * This will load a randomness provider; typically "swrand",
178 * but could be another provider if so configured.
180 rngmech_type
= crypto_mech2id(SUN_RANDOM
);
182 /* Update rng_prov_found etc. */
183 (void) kcf_rngprov_check();
185 /* FIPS 140-2 init. */
186 rnd_fips_discard_initial();
188 /* Start rnd_handler calls. */
189 rnd_schedule_timeout();
193 * Return TRUE if at least one provider exists that can
194 * supply random numbers.
197 kcf_rngprov_check(void)
200 kcf_provider_desc_t
*pd
;
202 if ((pd
= kcf_get_mech_provider(rngmech_type
, NULL
, NULL
, &rv
,
203 NULL
, CRYPTO_FG_RANDOM
, 0)) != NULL
) {
204 KCF_PROV_REFRELE(pd
);
206 * We logged a warning once about no provider being available
207 * and now a provider became available. So, set the flag so
208 * that we can log again if the problem recurs.
210 rng_ok_to_log
= B_TRUE
;
211 rng_prov_found
= B_TRUE
;
214 rng_prov_found
= B_FALSE
;
220 * Pick a software-based provider and submit a request to seed
221 * its random number generator.
224 rngprov_seed(uint8_t *buf
, int len
, uint_t entropy_est
, uint32_t flags
)
226 kcf_provider_desc_t
*pd
= NULL
;
228 if (kcf_get_sw_prov(rngmech_type
, &pd
, NULL
, B_FALSE
) ==
230 (void) KCF_PROV_SEED_RANDOM(pd
, pd
->pd_sid
, buf
, len
,
231 entropy_est
, flags
, NULL
);
232 KCF_PROV_REFRELE(pd
);
237 * This routine is called for blocking reads.
239 * The argument is_taskq_thr indicates whether the caller is
240 * the taskq thread dispatched by the timeout handler routine.
241 * In this case, we cycle through all the providers
242 * submitting a request to each provider to generate random numbers.
244 * For other cases, we pick a provider and submit a request to generate
245 * random numbers. We retry using another provider if we get an error.
247 * Returns the number of bytes that are written to 'ptr'. Returns -1
248 * if no provider is found. ptr and need are unchanged.
251 rngprov_getbytes(uint8_t *ptr
, size_t need
, boolean_t is_taskq_thr
)
256 kcf_provider_desc_t
*pd
;
257 kcf_req_params_t params
;
258 kcf_prov_tried_t
*list
= NULL
;
260 while ((pd
= kcf_get_mech_provider(rngmech_type
, NULL
, NULL
, &rv
,
261 list
, CRYPTO_FG_RANDOM
, 0)) != NULL
) {
265 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms
, KCF_OP_RANDOM_GENERATE
,
266 pd
->pd_sid
, ptr
, need
, 0, 0);
267 rv
= kcf_submit_request(pd
, NULL
, NULL
, ¶ms
, B_FALSE
);
268 ASSERT(rv
!= CRYPTO_QUEUED
);
270 if (rv
== CRYPTO_SUCCESS
) {
273 rndc_addbytes(ptr
, need
);
275 KCF_PROV_REFRELE(pd
);
280 if (is_taskq_thr
|| rv
!= CRYPTO_SUCCESS
) {
281 /* Add pd to the linked list of providers tried. */
282 if (kcf_insert_triedlist(&list
, pd
, KM_SLEEP
) == NULL
) {
283 KCF_PROV_REFRELE(pd
);
291 kcf_free_triedlist(list
);
293 if (prov_cnt
== 0) { /* no provider could be found. */
294 rng_prov_found
= B_FALSE
;
297 rng_prov_found
= B_TRUE
;
298 /* See comments in kcf_rngprov_check() */
299 rng_ok_to_log
= B_TRUE
;
302 return (total_bytes
);
306 notify_done(void *arg
, int rv
)
308 uchar_t
*rndbuf
= arg
;
310 if (rv
== CRYPTO_SUCCESS
)
311 rndc_addbytes(rndbuf
, MINEXTRACTBYTES
);
313 bzero(rndbuf
, MINEXTRACTBYTES
);
314 kmem_free(rndbuf
, MINEXTRACTBYTES
);
318 * Cycle through all the providers submitting a request to each provider
319 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT
320 * and ALWAYS_EXTRACT.
322 * Returns the number of bytes that are written to 'ptr'. Returns -1
323 * if no provider is found. ptr and len are unchanged.
326 rngprov_getbytes_nblk(uint8_t *ptr
, size_t len
)
331 kcf_provider_desc_t
*pd
;
332 kcf_req_params_t params
;
333 crypto_call_req_t req
;
334 kcf_prov_tried_t
*list
= NULL
;
339 req
.cr_flag
= CRYPTO_SKIP_REQID
;
340 req
.cr_callback_func
= notify_done
;
342 while ((pd
= kcf_get_mech_provider(rngmech_type
, NULL
, NULL
, &rv
,
343 list
, CRYPTO_FG_RANDOM
, 0)) != NULL
) {
346 switch (pd
->pd_prov_type
) {
347 case CRYPTO_HW_PROVIDER
:
349 * We have to allocate a buffer here as we can not
350 * assume that the input buffer will remain valid
351 * when the callback comes. We use a fixed size buffer
352 * to simplify the book keeping.
354 rndbuf
= kmem_alloc(MINEXTRACTBYTES
, KM_NOSLEEP
);
355 if (rndbuf
== NULL
) {
356 KCF_PROV_REFRELE(pd
);
358 kcf_free_triedlist(list
);
359 return (total_bytes
);
361 req
.cr_callback_arg
= rndbuf
;
362 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms
,
363 KCF_OP_RANDOM_GENERATE
,
364 pd
->pd_sid
, rndbuf
, MINEXTRACTBYTES
, 0, 0);
367 case CRYPTO_SW_PROVIDER
:
369 * We do not need to allocate a buffer in the software
370 * provider case as there is no callback involved. We
371 * avoid any extra data copy by directly passing 'ptr'.
373 KCF_WRAP_RANDOM_OPS_PARAMS(¶ms
,
374 KCF_OP_RANDOM_GENERATE
,
375 pd
->pd_sid
, ptr
, len
, 0, 0);
379 rv
= kcf_submit_request(pd
, NULL
, &req
, ¶ms
, B_FALSE
);
380 if (rv
== CRYPTO_SUCCESS
) {
381 switch (pd
->pd_prov_type
) {
382 case CRYPTO_HW_PROVIDER
:
384 * Since we have the input buffer handy,
385 * we directly copy to it rather than
386 * adding to the pool.
388 blen
= min(MINEXTRACTBYTES
, len
);
389 bcopy(rndbuf
, ptr
, blen
);
390 if (len
< MINEXTRACTBYTES
)
391 rndc_addbytes(rndbuf
+ len
,
392 MINEXTRACTBYTES
- len
);
398 case CRYPTO_SW_PROVIDER
:
406 * We free the buffer in the callback routine
407 * for the CRYPTO_QUEUED case.
409 if (pd
->pd_prov_type
== CRYPTO_HW_PROVIDER
&&
410 rv
!= CRYPTO_QUEUED
) {
411 bzero(rndbuf
, MINEXTRACTBYTES
);
412 kmem_free(rndbuf
, MINEXTRACTBYTES
);
416 KCF_PROV_REFRELE(pd
);
420 if (rv
!= CRYPTO_SUCCESS
) {
421 /* Add pd to the linked list of providers tried. */
422 if (kcf_insert_triedlist(&list
, pd
, KM_NOSLEEP
) ==
424 KCF_PROV_REFRELE(pd
);
431 kcf_free_triedlist(list
);
434 if (prov_cnt
== 0) { /* no provider could be found. */
435 rng_prov_found
= B_FALSE
;
438 rng_prov_found
= B_TRUE
;
439 /* See comments in kcf_rngprov_check() */
440 rng_ok_to_log
= B_TRUE
;
443 return (total_bytes
);
447 rngprov_task(void *arg
)
449 int len
= (int)(uintptr_t)arg
;
450 uchar_t tbuf
[MAXEXTRACTBYTES
];
452 ASSERT(len
<= MAXEXTRACTBYTES
);
453 (void) rngprov_getbytes(tbuf
, len
, B_TRUE
);
454 rngprov_task_idle
= B_TRUE
;
458 * Returns "len" random or pseudo-random bytes in *ptr.
459 * Will block if not enough random bytes are available and the
462 * Called with rndpool_lock held (allowing caller to do optimistic locking;
463 * releases the lock before return).
466 rnd_get_bytes(uint8_t *ptr
, size_t len
, extract_type_t how
)
471 ASSERT(mutex_owned(&rndpool_lock
));
473 * Check if the request can be satisfied from the cache
476 if (len
<= rnbyte_cnt
) {
477 rndc_getbytes(ptr
, len
);
478 mutex_exit(&rndpool_lock
);
481 mutex_exit(&rndpool_lock
);
484 case BLOCKING_EXTRACT
:
485 if ((got
= rngprov_getbytes(ptr
, len
, B_FALSE
)) == -1)
486 break; /* No provider found */
494 case NONBLOCK_EXTRACT
:
496 if ((got
= rngprov_getbytes_nblk(ptr
, len
)) == -1) {
497 /* No provider found */
498 if (how
== NONBLOCK_EXTRACT
) {
507 if (how
== NONBLOCK_EXTRACT
&& (rnbyte_cnt
< len
))
512 mutex_enter(&rndpool_lock
);
514 if (how
== BLOCKING_EXTRACT
) {
515 /* Check if there is enough */
516 while (rnbyte_cnt
< MINEXTRACTBYTES
) {
518 if (cv_wait_sig(&rndpool_read_cv
,
519 &rndpool_lock
) == 0) {
521 mutex_exit(&rndpool_lock
);
528 /* Figure out how many bytes to extract */
529 bytes
= min(len
, rnbyte_cnt
);
530 rndc_getbytes(ptr
, bytes
);
535 if (len
> 0 && how
== ALWAYS_EXTRACT
) {
537 * There are not enough bytes, but we can not block.
538 * This only happens in the case of /dev/urandom which
539 * runs an additional generation algorithm. So, there
543 *ptr
= rndpool
[findex
];
545 rindex
= findex
= (findex
+ 1) &
552 mutex_exit(&rndpool_lock
);
557 kcf_rnd_get_bytes(uint8_t *ptr
, size_t len
, boolean_t noblock
)
562 how
= noblock
? NONBLOCK_EXTRACT
: BLOCKING_EXTRACT
;
563 mutex_enter(&rndpool_lock
);
564 if ((error
= rnd_get_bytes(ptr
, len
, how
)) != 0)
567 BUMP_RND_STATS(rs_rndOut
, len
);
572 * Revisit this if the structs grow or we come up with a better way
573 * of cache-line-padding structures.
575 #define RND_CPU_CACHE_SIZE 64
576 #define RND_CPU_PAD_SIZE RND_CPU_CACHE_SIZE*6
577 #define RND_CPU_PAD (RND_CPU_PAD_SIZE - \
580 * Per-CPU random state. Somewhat like like kmem's magazines, this provides
581 * a per-CPU instance of the pseudo-random generator. We have it much easier
582 * than kmem, as we can afford to "leak" random bits if a CPU is DR'ed out.
584 * Note that this usage is preemption-safe; a thread
585 * entering a critical section remembers which generator it locked
586 * and unlocks the same one; should it be preempted and wind up running on
587 * a different CPU, there will be a brief period of increased contention
588 * before it exits the critical section but nothing will melt.
590 typedef struct rndmag_s
593 uint8_t *rm_buffer
; /* Start of buffer */
594 uint8_t *rm_eptr
; /* End of buffer */
595 uint8_t *rm_rptr
; /* Current read pointer */
596 uint32_t rm_oblocks
; /* time to rekey? */
597 uint32_t rm_ofuzz
; /* Rekey backoff state */
598 uint32_t rm_olimit
; /* Hard rekey limit */
599 rnd_stats_t rm_stats
; /* Per-CPU Statistics */
600 uint32_t rm_key
[HASHSIZE
/BYTES_IN_WORD
]; /* FIPS XKEY */
601 uint32_t rm_seed
[HASHSIZE
/BYTES_IN_WORD
]; /* seed for rekey */
602 uint32_t rm_previous
[HASHSIZE
/BYTES_IN_WORD
]; /* prev random */
605 typedef struct rndmag_pad_s
608 uint8_t rm_pad
[RND_CPU_PAD
];
612 * Generate random bytes for /dev/urandom by applying the
613 * FIPS 186-2 algorithm with a key created from bytes extracted
614 * from the pool. A maximum of PRNG_MAXOBLOCKS output blocks
615 * is generated before a new key is obtained.
617 * Note that callers to this routine are likely to assume it can't fail.
619 * Called with rmp locked; releases lock.
622 rnd_generate_pseudo_bytes(rndmag_pad_t
*rmp
, uint8_t *ptr
, size_t len
)
624 size_t bytes
= len
, size
;
627 uint32_t tempout
[HASHSIZE
/BYTES_IN_WORD
];
628 uint32_t seed
[HASHSIZE
/BYTES_IN_WORD
];
633 ASSERT(mutex_owned(&rmp
->rm_mag
.rm_lock
));
635 /* Nothing is being asked */
637 mutex_exit(&rmp
->rm_mag
.rm_lock
);
641 nblock
= howmany(len
, HASHSIZE
);
643 rmp
->rm_mag
.rm_oblocks
+= nblock
;
644 oblocks
= rmp
->rm_mag
.rm_oblocks
;
647 if (oblocks
>= rmp
->rm_mag
.rm_olimit
) {
650 * Contention-avoiding rekey: see if
651 * the pool is locked, and if so, wait a bit.
652 * Do an 'exponential back-in' to ensure we don't
653 * run too long without rekey.
655 if (rmp
->rm_mag
.rm_ofuzz
) {
657 * Decaying exponential back-in for rekey.
659 if ((rnbyte_cnt
< MINEXTRACTBYTES
) ||
660 (!mutex_tryenter(&rndpool_lock
))) {
661 rmp
->rm_mag
.rm_olimit
+=
662 rmp
->rm_mag
.rm_ofuzz
;
663 rmp
->rm_mag
.rm_ofuzz
>>= 1;
667 mutex_enter(&rndpool_lock
);
670 /* Get a new chunk of entropy */
671 (void) rnd_get_bytes((uint8_t *)rmp
->rm_mag
.rm_key
,
672 HMAC_KEYSIZE
, ALWAYS_EXTRACT
);
674 rmp
->rm_mag
.rm_olimit
= PRNG_MAXOBLOCKS
/2;
675 rmp
->rm_mag
.rm_ofuzz
= PRNG_MAXOBLOCKS
/4;
677 rmp
->rm_mag
.rm_oblocks
= nblock
;
680 timestamp
= gethrtime();
682 src
= (uint8_t *)×tamp
;
683 dst
= (uint8_t *)rmp
->rm_mag
.rm_seed
;
685 for (i
= 0; i
< HASHSIZE
; i
++) {
686 dst
[i
] ^= src
[i
% sizeof (timestamp
)];
689 bcopy(rmp
->rm_mag
.rm_seed
, seed
, HASHSIZE
);
691 fips_random_inner(rmp
->rm_mag
.rm_key
, tempout
,
694 if (bytes
>= HASHSIZE
) {
697 size
= min(bytes
, HASHSIZE
);
701 * FIPS 140-2: Continuous RNG test - each generation
702 * of an n-bit block shall be compared with the previously
703 * generated block. Test shall fail if any two compared
704 * n-bit blocks are equal.
706 for (i
= 0; i
< HASHSIZE
/BYTES_IN_WORD
; i
++) {
707 if (tempout
[i
] != rmp
->rm_mag
.rm_previous
[i
])
710 if (i
== HASHSIZE
/BYTES_IN_WORD
) {
711 cmn_err(CE_WARN
, "kcf_random: The value of 160-bit "
712 "block random bytes are same as the previous "
714 /* discard random bytes and return error */
715 mutex_exit(&rmp
->rm_mag
.rm_lock
);
719 bcopy(tempout
, rmp
->rm_mag
.rm_previous
,
722 bcopy(tempout
, ptr
, size
);
729 /* Zero out sensitive information */
730 bzero(seed
, HASHSIZE
);
731 bzero(tempout
, HASHSIZE
);
732 mutex_exit(&rmp
->rm_mag
.rm_lock
);
737 * Per-CPU Random magazines.
739 static rndmag_pad_t
*rndmag
;
740 static uint8_t *rndbuf
;
741 static size_t rndmag_total
;
743 * common/os/cpu.c says that platform support code can shrinkwrap
744 * max_ncpus. On the off chance that we get loaded very early, we
745 * read it exactly once, to copy it here.
747 static uint32_t random_max_ncpus
= 0;
750 * Boot-time tunables, for experimentation.
752 size_t rndmag_threshold
= 2560;
753 size_t rndbuf_len
= 5120;
754 size_t rndmag_size
= 1280;
758 kcf_rnd_get_pseudo_bytes(uint8_t *ptr
, size_t len
)
761 uint8_t *cptr
, *eptr
;
764 * Anyone who asks for zero bytes of randomness should get slapped.
772 rmp
= &rndmag
[CPU
->cpu_seqid
];
773 mutex_enter(&rmp
->rm_mag
.rm_lock
);
776 * Big requests bypass buffer and tail-call the
777 * generate routine directly.
779 if (len
> rndmag_threshold
) {
780 BUMP_CPU_RND_STATS(rmp
, rs_urndOut
, len
);
781 return (rnd_generate_pseudo_bytes(rmp
, ptr
, len
));
784 cptr
= rmp
->rm_mag
.rm_rptr
;
787 if (eptr
<= rmp
->rm_mag
.rm_eptr
) {
788 rmp
->rm_mag
.rm_rptr
= eptr
;
789 bcopy(cptr
, ptr
, len
);
790 BUMP_CPU_RND_STATS(rmp
, rs_urndOut
, len
);
791 mutex_exit(&rmp
->rm_mag
.rm_lock
);
798 rmp
->rm_mag
.rm_rptr
= rmp
->rm_mag
.rm_buffer
;
800 * Note: We assume the generate routine always succeeds
801 * in this case (because it does at present..)
802 * It also always releases rm_lock.
804 (void) rnd_generate_pseudo_bytes(rmp
, rmp
->rm_mag
.rm_buffer
,
810 * We set up (empty) magazines for all of max_ncpus, possibly wasting a
811 * little memory on big systems that don't have the full set installed.
812 * See above; "empty" means "rptr equal to eptr"; this will trigger the
813 * refill path in rnd_get_pseudo_bytes above on the first call for each CPU.
815 * TODO: make rndmag_size tunable at run time!
818 rnd_alloc_magazines()
823 rndbuf_len
= roundup(rndbuf_len
, HASHSIZE
);
824 if (rndmag_size
< rndbuf_len
)
825 rndmag_size
= rndbuf_len
;
826 rndmag_size
= roundup(rndmag_size
, RND_CPU_CACHE_SIZE
);
828 random_max_ncpus
= max_ncpus
;
829 rndmag_total
= rndmag_size
* random_max_ncpus
;
831 rndbuf
= kmem_alloc(rndmag_total
, KM_SLEEP
);
832 rndmag
= kmem_zalloc(sizeof (rndmag_pad_t
) * random_max_ncpus
,
835 for (i
= 0; i
< random_max_ncpus
; i
++) {
839 mutex_init(&rmp
->rm_mag
.rm_lock
, NULL
, MUTEX_DRIVER
, NULL
);
841 buf
= rndbuf
+ i
* rndmag_size
;
843 rmp
->rm_mag
.rm_buffer
= buf
;
844 rmp
->rm_mag
.rm_eptr
= buf
+ rndbuf_len
;
845 rmp
->rm_mag
.rm_rptr
= buf
+ rndbuf_len
;
846 rmp
->rm_mag
.rm_oblocks
= 1;
851 * FIPS 140-2: the first n-bit (n > 15) block generated
852 * after power-up, initialization, or reset shall not
853 * be used, but shall be saved for comparison.
856 rnd_fips_discard_initial(void)
858 uint8_t discard_buf
[HASHSIZE
];
862 for (i
= 0; i
< random_max_ncpus
; i
++) {
865 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
866 mutex_enter(&rndpool_lock
);
867 (void) rnd_get_bytes(discard_buf
,
868 HMAC_KEYSIZE
, ALWAYS_EXTRACT
);
869 bcopy(discard_buf
, rmp
->rm_mag
.rm_previous
,
871 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
872 mutex_enter(&rndpool_lock
);
873 (void) rnd_get_bytes((uint8_t *)rmp
->rm_mag
.rm_key
,
874 HMAC_KEYSIZE
, ALWAYS_EXTRACT
);
875 /* rnd_get_bytes() will call mutex_exit(&rndpool_lock) */
876 mutex_enter(&rndpool_lock
);
877 (void) rnd_get_bytes((uint8_t *)rmp
->rm_mag
.rm_seed
,
878 HMAC_KEYSIZE
, ALWAYS_EXTRACT
);
883 rnd_schedule_timeout(void)
885 clock_t ut
; /* time in microseconds */
888 * The new timeout value is taken from the buffer of random bytes.
889 * We're merely reading the first 32 bits from the buffer here, not
890 * consuming any random bytes.
891 * The timeout multiplier value is a random value between 0.5 sec and
892 * 1.544480 sec (0.5 sec + 0xFF000 microseconds).
893 * The new timeout is TIMEOUT_INTERVAL times that multiplier.
895 ut
= 500000 + (clock_t)((((uint32_t)rndpool
[findex
]) << 12) & 0xFF000);
896 kcf_rndtimeout_id
= timeout(rnd_handler
, NULL
,
897 TIMEOUT_INTERVAL
* drv_usectohz(ut
));
901 * Called from the driver for a poll on /dev/random
902 * . POLLOUT always succeeds.
903 * . POLLIN and POLLRDNORM will block until a
904 * minimum amount of entropy is available.
906 * &rnd_pollhead is passed in *phpp in order to indicate the calling thread
907 * will block. When enough random bytes are available, later, the timeout
908 * handler routine will issue the pollwakeup() calls.
911 kcf_rnd_chpoll(short events
, int anyyet
, short *reventsp
,
912 struct pollhead
**phpp
)
914 *reventsp
= events
& POLLOUT
;
916 if (events
& (POLLIN
| POLLRDNORM
)) {
918 * Sampling of rnbyte_cnt is an atomic
919 * operation. Hence we do not need any locking.
921 if (rnbyte_cnt
>= MINEXTRACTBYTES
)
922 *reventsp
|= (events
& (POLLIN
| POLLRDNORM
));
925 if ((*reventsp
== 0 && !anyyet
) || (events
& POLLET
))
926 *phpp
= &rnd_pollhead
;
931 rnd_handler(void *arg
)
935 if (!rng_prov_found
&& rng_ok_to_log
) {
936 cmn_err(CE_WARN
, "No randomness provider enabled for "
937 "/dev/random. Use cryptoadm(1M) to enable a provider.");
938 rng_ok_to_log
= B_FALSE
;
943 * Note: len has no relationship with how many bytes
944 * a poll thread needs.
946 len
= MAXEXTRACTBYTES
;
947 else if (rnbyte_cnt
< RNDPOOLSIZE
)
948 len
= MINEXTRACTBYTES
;
951 * Only one thread gets to set rngprov_task_idle at a given point
952 * of time and the order of the writes is defined. Also, it is OK
953 * if we read an older value of it and skip the dispatch once
954 * since we will get the correct value during the next time here.
955 * So, no locking is needed here.
957 if (len
> 0 && rngprov_task_idle
) {
958 rngprov_task_idle
= B_FALSE
;
961 * It is OK if taskq_dispatch fails here. We will retry
962 * the next time around. Meanwhile, a thread doing a
963 * read() will go to the provider directly, if the
964 * cache becomes empty.
966 if (taskq_dispatch(system_taskq
, rngprov_task
,
967 (void *)(uintptr_t)len
, TQ_NOSLEEP
| TQ_NOQUEUE
) == 0) {
968 rngprov_task_idle
= B_TRUE
;
972 mutex_enter(&rndpool_lock
);
974 * Wake up threads waiting in poll() or for enough accumulated
975 * random bytes to read from /dev/random. In case a poll() is
976 * concurrent with a read(), the polling process may be woken up
977 * indicating that enough randomness is now available for reading,
978 * and another process *steals* the bits from the pool, causing the
979 * subsequent read() from the first process to block. It is acceptable
980 * since the blocking will eventually end, after the timeout
981 * has expired enough times to honor the read.
983 * Note - Since we hold the rndpool_lock across the pollwakeup() call
984 * we MUST NOT grab the rndpool_lock in kcf_rndchpoll().
986 if (rnbyte_cnt
>= MINEXTRACTBYTES
)
987 pollwakeup(&rnd_pollhead
, POLLIN
| POLLRDNORM
);
990 cv_broadcast(&rndpool_read_cv
);
991 mutex_exit(&rndpool_lock
);
993 rnd_schedule_timeout();
997 rndc_addbytes(uint8_t *ptr
, size_t len
)
999 ASSERT(ptr
!= NULL
&& len
> 0);
1000 ASSERT(rnbyte_cnt
<= RNDPOOLSIZE
);
1002 mutex_enter(&rndpool_lock
);
1003 while ((len
> 0) && (rnbyte_cnt
< RNDPOOLSIZE
)) {
1004 rndpool
[rindex
] ^= *ptr
;
1006 rindex
= (rindex
+ 1) & (RNDPOOLSIZE
- 1);
1010 /* Handle buffer full case */
1012 rndpool
[rindex
] ^= *ptr
;
1014 findex
= rindex
= (rindex
+ 1) & (RNDPOOLSIZE
- 1);
1016 mutex_exit(&rndpool_lock
);
1020 * Caller should check len <= rnbyte_cnt under the
1021 * rndpool_lock before calling.
1024 rndc_getbytes(uint8_t *ptr
, size_t len
)
1026 ASSERT(MUTEX_HELD(&rndpool_lock
));
1027 ASSERT(len
<= rnbyte_cnt
&& rnbyte_cnt
<= RNDPOOLSIZE
);
1029 BUMP_RND_STATS(rs_rndcOut
, len
);
1032 *ptr
= rndpool
[findex
];
1034 findex
= (findex
+ 1) & (RNDPOOLSIZE
- 1);
1039 /* Random number exported entry points */
1042 * Mix the supplied bytes into the entropy pool of a kCF
1046 random_add_pseudo_entropy(uint8_t *ptr
, size_t len
, uint_t entropy_est
)
1051 rngprov_seed(ptr
, len
, entropy_est
, 0);
1057 * Mix the supplied bytes into the entropy pool of a kCF
1058 * RNG provider. Mix immediately.
1061 random_add_entropy(uint8_t *ptr
, size_t len
, uint_t entropy_est
)
1066 rngprov_seed(ptr
, len
, entropy_est
, CRYPTO_SEED_NOW
);
1072 * Get bytes from the /dev/urandom generator. This function
1073 * always succeeds. Returns 0.
1076 random_get_pseudo_bytes(uint8_t *ptr
, size_t len
)
1078 ASSERT(!mutex_owned(&rndpool_lock
));
1082 return (kcf_rnd_get_pseudo_bytes(ptr
, len
));
1086 * Get bytes from the /dev/random generator. Returns 0
1087 * on success. Returns EAGAIN if there is insufficient entropy.
1090 random_get_bytes(uint8_t *ptr
, size_t len
)
1092 ASSERT(!mutex_owned(&rndpool_lock
));
1096 return (kcf_rnd_get_bytes(ptr
, len
, B_TRUE
));
1100 random_get_blocking_bytes(uint8_t *ptr
, size_t len
)
1102 ASSERT(!mutex_owned(&rndpool_lock
));
1106 return (kcf_rnd_get_bytes(ptr
, len
, B_FALSE
));