1/* $NetBSD: subr_cprng.c,v 1.27 2015/04/13 22:43:41 riastradh Exp $ */
2
3/*-
4 * Copyright (c) 2011-2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Thor Lancelot Simon and Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: subr_cprng.c,v 1.27 2015/04/13 22:43:41 riastradh Exp $");
34
35#include <sys/param.h>
36#include <sys/types.h>
37#include <sys/condvar.h>
38#include <sys/cprng.h>
39#include <sys/errno.h>
40#include <sys/event.h> /* XXX struct knote */
41#include <sys/fcntl.h> /* XXX FNONBLOCK */
42#include <sys/kernel.h>
43#include <sys/kmem.h>
44#include <sys/lwp.h>
45#include <sys/once.h>
46#include <sys/percpu.h>
47#include <sys/poll.h> /* XXX POLLIN/POLLOUT/&c. */
48#include <sys/select.h>
49#include <sys/systm.h>
50#include <sys/sysctl.h>
51#include <sys/rndsink.h>
52#if DIAGNOSTIC
53#include <sys/rngtest.h>
54#endif
55
56#include <crypto/nist_ctr_drbg/nist_ctr_drbg.h>
57
58#if defined(__HAVE_CPU_COUNTER)
59#include <machine/cpu_counter.h>
60#endif
61
62static int sysctl_kern_urnd(SYSCTLFN_PROTO);
63static int sysctl_kern_arnd(SYSCTLFN_PROTO);
64
65static void cprng_strong_generate(struct cprng_strong *, void *, size_t);
66static void cprng_strong_reseed(struct cprng_strong *);
67static void cprng_strong_reseed_from(struct cprng_strong *, const void *,
68 size_t, bool);
69#if DIAGNOSTIC
70static void cprng_strong_rngtest(struct cprng_strong *);
71#endif
72
73static rndsink_callback_t cprng_strong_rndsink_callback;
74
75void
76cprng_init(void)
77{
78 static struct sysctllog *random_sysctllog;
79
80 nist_ctr_initialize();
81
82 sysctl_createv(&random_sysctllog, 0, NULL, NULL,
83 CTLFLAG_PERMANENT,
84 CTLTYPE_INT, "urandom",
85 SYSCTL_DESCR("Random integer value"),
86 sysctl_kern_urnd, 0, NULL, 0,
87 CTL_KERN, KERN_URND, CTL_EOL);
88 sysctl_createv(&random_sysctllog, 0, NULL, NULL,
89 CTLFLAG_PERMANENT,
90 CTLTYPE_INT, "arandom",
91 SYSCTL_DESCR("n bytes of random data"),
92 sysctl_kern_arnd, 0, NULL, 0,
93 CTL_KERN, KERN_ARND, CTL_EOL);
94}
95
96static inline uint32_t
97cprng_counter(void)
98{
99 struct timeval tv;
100
101#if defined(__HAVE_CPU_COUNTER)
102 if (cpu_hascounter())
103 return cpu_counter32();
104#endif
105 if (__predict_false(cold)) {
106 static int ctr;
107 /* microtime unsafe if clock not running yet */
108 return ctr++;
109 }
110 getmicrotime(&tv);
111 return (tv.tv_sec * 1000000 + tv.tv_usec);
112}
113
114struct cprng_strong {
115 char cs_name[16];
116 int cs_flags;
117 kmutex_t cs_lock;
118 percpu_t *cs_percpu;
119 kcondvar_t cs_cv;
120 struct selinfo cs_selq;
121 struct rndsink *cs_rndsink;
122 bool cs_ready;
123 NIST_CTR_DRBG cs_drbg;
124
125 /* XXX Kludge for /dev/random `information-theoretic' properties. */
126 unsigned int cs_remaining;
127};
128
129struct cprng_strong *
130cprng_strong_create(const char *name, int ipl, int flags)
131{
132 const uint32_t cc = cprng_counter();
133 struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
134 KM_SLEEP);
135
136 /*
137 * rndsink_request takes a spin lock at IPL_VM, so we can be no
138 * higher than that.
139 */
140 KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);
141
142 /* Initialize the easy fields. */
143 (void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
144 cprng->cs_flags = flags;
145 mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
146 cv_init(&cprng->cs_cv, cprng->cs_name);
147 selinit(&cprng->cs_selq);
148 cprng->cs_rndsink = rndsink_create(NIST_BLOCK_KEYLEN_BYTES,
149 &cprng_strong_rndsink_callback, cprng);
150
151 /* Get some initial entropy. Record whether it is full entropy. */
152 uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
153 mutex_enter(&cprng->cs_lock);
154 cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
155 sizeof(seed));
156 if (nist_ctr_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
157 &cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
158 /* XXX Fix nist_ctr_drbg API so this can't happen. */
159 panic("cprng %s: NIST CTR_DRBG instantiation failed",
160 cprng->cs_name);
161 explicit_memset(seed, 0, sizeof(seed));
162
163 if (ISSET(flags, CPRNG_HARD))
164 cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
165 else
166 cprng->cs_remaining = 0;
167
168 if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
169 printf("cprng %s: creating with partial entropy\n",
170 cprng->cs_name);
171 mutex_exit(&cprng->cs_lock);
172
173 return cprng;
174}
175
176void
177cprng_strong_destroy(struct cprng_strong *cprng)
178{
179
180 /*
181 * Destroy the rndsink first to prevent calls to the callback.
182 */
183 rndsink_destroy(cprng->cs_rndsink);
184
185 KASSERT(!cv_has_waiters(&cprng->cs_cv));
186#if 0
187 KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
188#endif
189
190 nist_ctr_drbg_destroy(&cprng->cs_drbg);
191 seldestroy(&cprng->cs_selq);
192 cv_destroy(&cprng->cs_cv);
193 mutex_destroy(&cprng->cs_lock);
194
195 explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
196 kmem_free(cprng, sizeof(*cprng));
197}
198
199/*
200 * Generate some data from cprng. Block or return zero bytes,
201 * depending on flags & FNONBLOCK, if cprng was created without
202 * CPRNG_REKEY_ANY.
203 */
204size_t
205cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
206{
207 size_t result;
208
209 /* Caller must loop for more than CPRNG_MAX_LEN bytes. */
210 bytes = MIN(bytes, CPRNG_MAX_LEN);
211
212 mutex_enter(&cprng->cs_lock);
213
214 if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
215 if (!cprng->cs_ready)
216 cprng_strong_reseed(cprng);
217 } else {
218 while (!cprng->cs_ready) {
219 if (ISSET(flags, FNONBLOCK) ||
220 !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
221 cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
222 result = 0;
223 goto out;
224 }
225 }
226 }
227
228 /*
229 * Debit the entropy if requested.
230 *
231 * XXX Kludge for /dev/random `information-theoretic' properties.
232 */
233 if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
234 KASSERT(0 < cprng->cs_remaining);
235 KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
236 if (bytes < cprng->cs_remaining) {
237 cprng->cs_remaining -= bytes;
238 } else {
239 bytes = cprng->cs_remaining;
240 cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
241 cprng->cs_ready = false;
242 rndsink_schedule(cprng->cs_rndsink);
243 }
244 KASSERT(bytes <= NIST_BLOCK_KEYLEN_BYTES);
245 KASSERT(0 < cprng->cs_remaining);
246 KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
247 }
248
249 cprng_strong_generate(cprng, buffer, bytes);
250 result = bytes;
251
252out: mutex_exit(&cprng->cs_lock);
253 return result;
254}
255
256static void filt_cprng_detach(struct knote *);
257static int filt_cprng_event(struct knote *, long);
258
259static const struct filterops cprng_filtops =
260 { 1, NULL, filt_cprng_detach, filt_cprng_event };
261
262int
263cprng_strong_kqfilter(struct cprng_strong *cprng, struct knote *kn)
264{
265
266 switch (kn->kn_filter) {
267 case EVFILT_READ:
268 kn->kn_fop = &cprng_filtops;
269 kn->kn_hook = cprng;
270 mutex_enter(&cprng->cs_lock);
271 SLIST_INSERT_HEAD(&cprng->cs_selq.sel_klist, kn, kn_selnext);
272 mutex_exit(&cprng->cs_lock);
273 return 0;
274
275 case EVFILT_WRITE:
276 default:
277 return EINVAL;
278 }
279}
280
281static void
282filt_cprng_detach(struct knote *kn)
283{
284 struct cprng_strong *const cprng = kn->kn_hook;
285
286 mutex_enter(&cprng->cs_lock);
287 SLIST_REMOVE(&cprng->cs_selq.sel_klist, kn, knote, kn_selnext);
288 mutex_exit(&cprng->cs_lock);
289}
290
291static int
292filt_cprng_event(struct knote *kn, long hint)
293{
294 struct cprng_strong *const cprng = kn->kn_hook;
295 int ret;
296
297 if (hint == NOTE_SUBMIT)
298 KASSERT(mutex_owned(&cprng->cs_lock));
299 else
300 mutex_enter(&cprng->cs_lock);
301 if (cprng->cs_ready) {
302 kn->kn_data = CPRNG_MAX_LEN; /* XXX Too large? */
303 ret = 1;
304 } else {
305 ret = 0;
306 }
307 if (hint == NOTE_SUBMIT)
308 KASSERT(mutex_owned(&cprng->cs_lock));
309 else
310 mutex_exit(&cprng->cs_lock);
311
312 return ret;
313}
314
315int
316cprng_strong_poll(struct cprng_strong *cprng, int events)
317{
318 int revents;
319
320 if (!ISSET(events, (POLLIN | POLLRDNORM)))
321 return 0;
322
323 mutex_enter(&cprng->cs_lock);
324 if (cprng->cs_ready) {
325 revents = (events & (POLLIN | POLLRDNORM));
326 } else {
327 selrecord(curlwp, &cprng->cs_selq);
328 revents = 0;
329 }
330 mutex_exit(&cprng->cs_lock);
331
332 return revents;
333}
334
335/*
336 * XXX Move nist_ctr_drbg_reseed_advised_p and
337 * nist_ctr_drbg_reseed_needed_p into the nist_ctr_drbg API and make
338 * the NIST_CTR_DRBG structure opaque.
339 */
340static bool
341nist_ctr_drbg_reseed_advised_p(NIST_CTR_DRBG *drbg)
342{
343
344 return (drbg->reseed_counter > (NIST_CTR_DRBG_RESEED_INTERVAL / 2));
345}
346
347static bool
348nist_ctr_drbg_reseed_needed_p(NIST_CTR_DRBG *drbg)
349{
350
351 return (drbg->reseed_counter >= NIST_CTR_DRBG_RESEED_INTERVAL);
352}
353
354/*
355 * Generate some data from the underlying generator.
356 */
357static void
358cprng_strong_generate(struct cprng_strong *cprng, void *buffer, size_t bytes)
359{
360 const uint32_t cc = cprng_counter();
361
362 KASSERT(bytes <= CPRNG_MAX_LEN);
363 KASSERT(mutex_owned(&cprng->cs_lock));
364
365 /*
366 * Generate some data from the NIST CTR_DRBG. Caller
367 * guarantees reseed if we're not ready, and if we exhaust the
368 * generator, we mark ourselves not ready. Consequently, this
369 * call to the CTR_DRBG should not fail.
370 */
371 if (__predict_false(nist_ctr_drbg_generate(&cprng->cs_drbg, buffer,
372 bytes, &cc, sizeof(cc))))
373 panic("cprng %s: NIST CTR_DRBG failed", cprng->cs_name);
374
375 /*
376 * If we've been seeing a lot of use, ask for some fresh
377 * entropy soon.
378 */
379 if (__predict_false(nist_ctr_drbg_reseed_advised_p(&cprng->cs_drbg)))
380 rndsink_schedule(cprng->cs_rndsink);
381
382 /*
383 * If we just exhausted the generator, inform the next user
384 * that we need a reseed.
385 */
386 if (__predict_false(nist_ctr_drbg_reseed_needed_p(&cprng->cs_drbg))) {
387 cprng->cs_ready = false;
388 rndsink_schedule(cprng->cs_rndsink); /* paranoia */
389 }
390}
391
392/*
393 * Reseed with whatever we can get from the system entropy pool right now.
394 */
395static void
396cprng_strong_reseed(struct cprng_strong *cprng)
397{
398 uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
399
400 KASSERT(mutex_owned(&cprng->cs_lock));
401
402 const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
403 sizeof(seed));
404 cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
405 explicit_memset(seed, 0, sizeof(seed));
406}
407
408/*
409 * Reseed with the given seed. If we now have full entropy, notify waiters.
410 */
411static void
412cprng_strong_reseed_from(struct cprng_strong *cprng,
413 const void *seed, size_t bytes, bool full_entropy)
414{
415 const uint32_t cc = cprng_counter();
416
417 KASSERT(bytes == NIST_BLOCK_KEYLEN_BYTES);
418 KASSERT(mutex_owned(&cprng->cs_lock));
419
420 /*
421 * Notify anyone interested in the partiality of entropy in our
422 * seed -- anyone waiting for full entropy, or any system
423 * operators interested in knowing when the entropy pool is
424 * running on fumes.
425 */
426 if (full_entropy) {
427 if (!cprng->cs_ready) {
428 cprng->cs_ready = true;
429 cv_broadcast(&cprng->cs_cv);
430 selnotify(&cprng->cs_selq, (POLLIN | POLLRDNORM),
431 NOTE_SUBMIT);
432 }
433 } else {
434 /*
435 * XXX Is there is any harm in reseeding with partial
436 * entropy when we had full entropy before? If so,
437 * remove the conditional on this message.
438 */
439 if (!cprng->cs_ready &&
440 !ISSET(cprng->cs_flags, CPRNG_REKEY_ANY))
441 printf("cprng %s: reseeding with partial entropy\n",
442 cprng->cs_name);
443 }
444
445 if (nist_ctr_drbg_reseed(&cprng->cs_drbg, seed, bytes, &cc, sizeof(cc)))
446 /* XXX Fix nist_ctr_drbg API so this can't happen. */
447 panic("cprng %s: NIST CTR_DRBG reseed failed", cprng->cs_name);
448
449#if DIAGNOSTIC
450 cprng_strong_rngtest(cprng);
451#endif
452}
453
454#if DIAGNOSTIC
455/*
456 * Generate some output and apply a statistical RNG test to it.
457 */
458static void
459cprng_strong_rngtest(struct cprng_strong *cprng)
460{
461
462 KASSERT(mutex_owned(&cprng->cs_lock));
463
464 /* XXX Switch to a pool cache instead? */
465 rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP);
466 if (rt == NULL)
467 /* XXX Warn? */
468 return;
469
470 (void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name));
471
472 if (nist_ctr_drbg_generate(&cprng->cs_drbg, rt->rt_b, sizeof(rt->rt_b),
473 NULL, 0))
474 panic("cprng %s: NIST CTR_DRBG failed after reseed",
475 cprng->cs_name);
476
477 if (rngtest(rt)) {
478 printf("cprng %s: failed statistical RNG test\n",
479 cprng->cs_name);
480 /* XXX Not clear that this does any good... */
481 cprng->cs_ready = false;
482 rndsink_schedule(cprng->cs_rndsink);
483 }
484
485 explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */
486 kmem_intr_free(rt, sizeof(*rt));
487}
488#endif
489
490/*
491 * Feed entropy from an rndsink request into the CPRNG for which the
492 * request was issued.
493 */
494static void
495cprng_strong_rndsink_callback(void *context, const void *seed, size_t bytes)
496{
497 struct cprng_strong *const cprng = context;
498
499 mutex_enter(&cprng->cs_lock);
500 /* Assume that rndsinks provide only full-entropy output. */
501 cprng_strong_reseed_from(cprng, seed, bytes, true);
502 mutex_exit(&cprng->cs_lock);
503}
504
505static cprng_strong_t *sysctl_prng;
506
507static int
508makeprng(void)
509{
510
511 /* can't create in cprng_init(), too early */
512 sysctl_prng = cprng_strong_create("sysctl", IPL_NONE,
513 CPRNG_INIT_ANY|CPRNG_REKEY_ANY);
514 return 0;
515}
516
517/*
518 * sysctl helper routine for kern.urandom node. Picks a random number
519 * for you.
520 */
521static int
522sysctl_kern_urnd(SYSCTLFN_ARGS)
523{
524 static ONCE_DECL(control);
525 int v, rv;
526
527 RUN_ONCE(&control, makeprng);
528 rv = cprng_strong(sysctl_prng, &v, sizeof(v), 0);
529 if (rv == sizeof(v)) {
530 struct sysctlnode node = *rnode;
531 node.sysctl_data = &v;
532 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
533 }
534 else
535 return (EIO); /*XXX*/
536}
537
538/*
539 * sysctl helper routine for kern.arandom node. Fills the supplied
540 * structure with random data for you.
541 *
542 * This node was originally declared as type "int" but its implementation
543 * in OpenBSD, whence it came, would happily return up to 8K of data if
544 * requested. Evidently this was used to key RC4 in userspace.
545 *
546 * In NetBSD, the libc stack-smash-protection code reads 64 bytes
547 * from here at every program startup. So though it would be nice
548 * to make this node return only 32 or 64 bits, we can't. Too bad!
549 */
550static int
551sysctl_kern_arnd(SYSCTLFN_ARGS)
552{
553 int error;
554 void *v;
555 struct sysctlnode node = *rnode;
556
557 switch (*oldlenp) {
558 case 0:
559 return 0;
560 default:
561 if (*oldlenp > 256) {
562 return E2BIG;
563 }
564 v = kmem_alloc(*oldlenp, KM_SLEEP);
565 cprng_fast(v, *oldlenp);
566 node.sysctl_data = v;
567 node.sysctl_size = *oldlenp;
568 error = sysctl_lookup(SYSCTLFN_CALL(&node));
569 kmem_free(v, *oldlenp);
570 return error;
571 }
572}
573