1/* $NetBSD: kern_rndsink.c,v 1.17 2016/05/21 15:33:40 riastradh Exp $ */
2
3/*-
4 * Copyright (c) 2013 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: kern_rndsink.c,v 1.17 2016/05/21 15:33:40 riastradh Exp $");
34
35#include <sys/param.h>
36#include <sys/types.h>
37#include <sys/condvar.h>
38#include <sys/kmem.h>
39#include <sys/mutex.h>
40#include <sys/queue.h>
41#include <sys/rndsink.h>
42
43#include <dev/rnd_private.h>
44
45enum rsink_state {
46 RNDSINK_IDLE, /* no callback in progress */
47 RNDSINK_QUEUED, /* queued for callback */
48 RNDSINK_IN_FLIGHT, /* callback called */
49 RNDSINK_REQUEUED, /* queued again before callback done */
50 RNDSINK_DEAD, /* destroyed */
51};
52
53struct rndsink {
54 /* Callback state. */
55 enum rsink_state rsink_state;
56
57 /* Entry on the queue of rndsinks, iff in the RNDSINK_QUEUED state. */
58 TAILQ_ENTRY(rndsink) rsink_entry;
59
60 /* rndsink_create parameters. */
61 unsigned int rsink_bytes;
62 rndsink_callback_t *rsink_callback;
63 void *rsink_arg;
64};
65
66static struct {
67 kmutex_t lock;
68 kcondvar_t cv;
69 TAILQ_HEAD(, rndsink) q;
70} rndsinks __cacheline_aligned;
71
72void
73rndsinks_init(void)
74{
75
76 /*
77 * This mutex must be at an ipl as high as the highest ipl of
78 * anyone who wants to call rndsink_request.
79 *
80 * XXX Call this IPL_RND, perhaps.
81 */
82 mutex_init(&rndsinks.lock, MUTEX_DEFAULT, IPL_VM);
83 cv_init(&rndsinks.cv, "rndsink");
84 TAILQ_INIT(&rndsinks.q);
85}
86
87void
88rndsinks_distribute(void)
89{
90 uint8_t buffer[RNDSINK_MAX_BYTES];
91 struct rndsink *rndsink;
92
93 explicit_memset(buffer, 0, sizeof(buffer)); /* paranoia */
94
95 mutex_spin_enter(&rndsinks.lock);
96 while ((rndsink = TAILQ_FIRST(&rndsinks.q)) != NULL) {
97 KASSERT(rndsink->rsink_state == RNDSINK_QUEUED);
98
99 /* Bail if we can't get some entropy for this rndsink. */
100 if (!rnd_tryextract(buffer, rndsink->rsink_bytes))
101 break;
102
103 /*
104 * Got some entropy. Take the sink off the queue and
105 * feed the entropy to the callback, with rndsinks_lock
106 * dropped. While running the callback, lock out
107 * rndsink_destroy by marking the sink in flight.
108 */
109 TAILQ_REMOVE(&rndsinks.q, rndsink, rsink_entry);
110 rndsink->rsink_state = RNDSINK_IN_FLIGHT;
111 mutex_spin_exit(&rndsinks.lock);
112
113 (*rndsink->rsink_callback)(rndsink->rsink_arg, buffer,
114 rndsink->rsink_bytes);
115 explicit_memset(buffer, 0, rndsink->rsink_bytes);
116
117 mutex_spin_enter(&rndsinks.lock);
118
119 /*
120 * If, while the callback was running, anyone requested
121 * it be queued up again, do so now. Otherwise, idle.
122 * Either way, it is now safe to destroy, so wake the
123 * pending rndsink_destroy, if there is one.
124 */
125 if (rndsink->rsink_state == RNDSINK_REQUEUED) {
126 TAILQ_INSERT_TAIL(&rndsinks.q, rndsink, rsink_entry);
127 rndsink->rsink_state = RNDSINK_QUEUED;
128 } else {
129 KASSERT(rndsink->rsink_state == RNDSINK_IN_FLIGHT);
130 rndsink->rsink_state = RNDSINK_IDLE;
131 }
132 cv_broadcast(&rndsinks.cv);
133 }
134 mutex_spin_exit(&rndsinks.lock);
135
136 explicit_memset(buffer, 0, sizeof(buffer)); /* paranoia */
137}
138
139static void
140rndsinks_enqueue(struct rndsink *rndsink)
141{
142
143 KASSERT(mutex_owned(&rndsinks.lock));
144
145 /* Kick on-demand entropy sources. */
146 rnd_getmore(rndsink->rsink_bytes);
147
148 /* Ensure this rndsink is on the queue. */
149 switch (rndsink->rsink_state) {
150 case RNDSINK_IDLE:
151 /* Not on the queue and nobody is handling it. */
152 TAILQ_INSERT_TAIL(&rndsinks.q, rndsink, rsink_entry);
153 rndsink->rsink_state = RNDSINK_QUEUED;
154 break;
155
156 case RNDSINK_QUEUED:
157 /* Already on the queue. */
158 break;
159
160 case RNDSINK_IN_FLIGHT:
161 /* Someone is handling it. Ask to queue it up again. */
162 rndsink->rsink_state = RNDSINK_REQUEUED;
163 break;
164
165 case RNDSINK_REQUEUED:
166 /* Already asked to queue it up again. */
167 break;
168
169 case RNDSINK_DEAD:
170 panic("requesting entropy from dead rndsink: %p", rndsink);
171
172 default:
173 panic("rndsink %p in unknown state: %d", rndsink,
174 (int)rndsink->rsink_state);
175 }
176}
177
178struct rndsink *
179rndsink_create(size_t bytes, rndsink_callback_t *callback, void *arg)
180{
181 struct rndsink *const rndsink = kmem_alloc(sizeof(*rndsink), KM_SLEEP);
182
183 KASSERT(bytes <= RNDSINK_MAX_BYTES);
184
185 rndsink->rsink_state = RNDSINK_IDLE;
186 rndsink->rsink_bytes = bytes;
187 rndsink->rsink_callback = callback;
188 rndsink->rsink_arg = arg;
189
190 return rndsink;
191}
192
193void
194rndsink_destroy(struct rndsink *rndsink)
195{
196
197 /*
198 * Make sure the rndsink is off the queue, and if it's already
199 * in flight, wait for the callback to complete.
200 */
201 mutex_spin_enter(&rndsinks.lock);
202 while (rndsink->rsink_state != RNDSINK_IDLE) {
203 switch (rndsink->rsink_state) {
204 case RNDSINK_QUEUED:
205 TAILQ_REMOVE(&rndsinks.q, rndsink, rsink_entry);
206 rndsink->rsink_state = RNDSINK_IDLE;
207 break;
208
209 case RNDSINK_IN_FLIGHT:
210 case RNDSINK_REQUEUED:
211 cv_wait(&rndsinks.cv, &rndsinks.lock);
212 break;
213
214 case RNDSINK_DEAD:
215 panic("destroying dead rndsink: %p", rndsink);
216
217 default:
218 panic("rndsink %p in unknown state: %d", rndsink,
219 (int)rndsink->rsink_state);
220 }
221 }
222 rndsink->rsink_state = RNDSINK_DEAD;
223 mutex_spin_exit(&rndsinks.lock);
224
225 kmem_free(rndsink, sizeof(*rndsink));
226}
227
228void
229rndsink_schedule(struct rndsink *rndsink)
230{
231
232 /* Optimistically check without the lock whether we're queued. */
233 if ((rndsink->rsink_state != RNDSINK_QUEUED) &&
234 (rndsink->rsink_state != RNDSINK_REQUEUED)) {
235 mutex_spin_enter(&rndsinks.lock);
236 rndsinks_enqueue(rndsink);
237 mutex_spin_exit(&rndsinks.lock);
238 }
239}
240
241bool
242rndsink_request(struct rndsink *rndsink, void *buffer, size_t bytes)
243{
244
245 KASSERT(bytes == rndsink->rsink_bytes);
246
247 mutex_spin_enter(&rndsinks.lock);
248 const bool full_entropy = rnd_extract(buffer, bytes);
249 if (!full_entropy)
250 rndsinks_enqueue(rndsink);
251 mutex_spin_exit(&rndsinks.lock);
252
253 return full_entropy;
254}
255