1 | /* $NetBSD: ubsec.c,v 1.43 2016/07/07 06:55:41 msaitoh Exp $ */ |
2 | /* $FreeBSD: src/sys/dev/ubsec/ubsec.c,v 1.6.2.6 2003/01/23 21:06:43 sam Exp $ */ |
3 | /* $OpenBSD: ubsec.c,v 1.143 2009/03/27 13:31:30 reyk Exp$ */ |
4 | |
5 | /* |
6 | * Copyright (c) 2000 Jason L. Wright (jason@thought.net) |
7 | * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org) |
8 | * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com) |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
20 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
22 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, |
23 | * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
25 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
26 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
27 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
28 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | * |
31 | * Effort sponsored in part by the Defense Advanced Research Projects |
32 | * Agency (DARPA) and Air Force Research Laboratory, Air Force |
33 | * Materiel Command, USAF, under agreement number F30602-01-2-0537. |
34 | * |
35 | */ |
36 | |
37 | #include <sys/cdefs.h> |
38 | __KERNEL_RCSID(0, "$NetBSD: ubsec.c,v 1.43 2016/07/07 06:55:41 msaitoh Exp $" ); |
39 | |
40 | #undef UBSEC_DEBUG |
41 | |
42 | /* |
43 | * uBsec 5[56]01, 58xx hardware crypto accelerator |
44 | */ |
45 | |
46 | #include <sys/param.h> |
47 | #include <sys/systm.h> |
48 | #include <sys/proc.h> |
49 | #include <sys/endian.h> |
50 | #ifdef __NetBSD__ |
51 | #define UBSEC_NO_RNG /* hangs on attach */ |
52 | #define letoh16 htole16 |
53 | #define letoh32 htole32 |
54 | #endif |
55 | #include <sys/errno.h> |
56 | #include <sys/malloc.h> |
57 | #include <sys/kernel.h> |
58 | #include <sys/mbuf.h> |
59 | #include <sys/device.h> |
60 | #include <sys/module.h> |
61 | #include <sys/queue.h> |
62 | #include <sys/sysctl.h> |
63 | |
64 | #include <opencrypto/cryptodev.h> |
65 | #include <opencrypto/xform.h> |
66 | #ifdef __OpenBSD__ |
67 | #include <dev/rndvar.h> |
68 | #include <sys/md5k.h> |
69 | #else |
70 | #include <sys/cprng.h> |
71 | #include <sys/md5.h> |
72 | #include <sys/rndpool.h> |
73 | #include <sys/rndsource.h> |
74 | #endif |
75 | #include <sys/sha1.h> |
76 | |
77 | #include <dev/pci/pcireg.h> |
78 | #include <dev/pci/pcivar.h> |
79 | #include <dev/pci/pcidevs.h> |
80 | |
81 | #include <dev/pci/ubsecreg.h> |
82 | #include <dev/pci/ubsecvar.h> |
83 | |
84 | /* |
85 | * Prototypes and count for the pci_device structure |
86 | */ |
87 | static int ubsec_probe(device_t, cfdata_t, void *); |
88 | static void ubsec_attach(device_t, device_t, void *); |
89 | static int ubsec_detach(device_t, int); |
90 | static int ubsec_sysctl_init(void); |
91 | static void ubsec_reset_board(struct ubsec_softc *); |
92 | static void ubsec_init_board(struct ubsec_softc *); |
93 | static void ubsec_init_pciregs(struct pci_attach_args *pa); |
94 | static void ubsec_cleanchip(struct ubsec_softc *); |
95 | static void ubsec_totalreset(struct ubsec_softc *); |
96 | static int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *); |
97 | |
98 | #ifdef __OpenBSD__ |
99 | struct cfattach ubsec_ca = { |
100 | sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, |
101 | }; |
102 | |
103 | struct cfdriver ubsec_cd = { |
104 | 0, "ubsec" , DV_DULL |
105 | }; |
106 | #else |
107 | CFATTACH_DECL_NEW(ubsec, sizeof(struct ubsec_softc), ubsec_probe, ubsec_attach, |
108 | ubsec_detach, NULL); |
109 | extern struct cfdriver ubsec_cd; |
110 | #endif |
111 | |
112 | /* patchable */ |
113 | #ifdef UBSEC_DEBUG |
114 | extern int ubsec_debug; |
115 | int ubsec_debug=1; |
116 | #endif |
117 | |
118 | static int ubsec_intr(void *); |
119 | static int ubsec_newsession(void*, u_int32_t *, struct cryptoini *); |
120 | static int ubsec_freesession(void*, u_int64_t); |
121 | static int ubsec_process(void*, struct cryptop *, int hint); |
122 | static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *); |
123 | static void ubsec_feed(struct ubsec_softc *); |
124 | static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int); |
125 | static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *); |
126 | static void ubsec_feed2(struct ubsec_softc *); |
127 | static void ubsec_feed4(struct ubsec_softc *); |
128 | #ifndef UBSEC_NO_RNG |
129 | static void ubsec_rng(void *); |
130 | static void ubsec_rng_locked(void *); |
131 | static void ubsec_rng_get(size_t, void *); |
132 | #endif /* UBSEC_NO_RNG */ |
133 | static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t, |
134 | struct ubsec_dma_alloc *, int); |
135 | static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *); |
136 | static int ubsec_dmamap_aligned(bus_dmamap_t); |
137 | |
138 | static int ubsec_kprocess(void*, struct cryptkop *, int); |
139 | static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, |
140 | struct cryptkop *, int); |
141 | static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, |
142 | struct cryptkop *, int); |
143 | static int ubsec_kprocess_rsapriv(struct ubsec_softc *, |
144 | struct cryptkop *, int); |
145 | static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *); |
146 | static int ubsec_ksigbits(struct crparam *); |
147 | static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int); |
148 | static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int); |
149 | |
150 | #ifdef UBSEC_DEBUG |
151 | static void ubsec_dump_pb(volatile struct ubsec_pktbuf *); |
152 | static void ubsec_dump_mcr(struct ubsec_mcr *); |
153 | static void ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *); |
154 | #endif |
155 | |
156 | #define READ_REG(sc,r) \ |
157 | bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) |
158 | |
159 | #define WRITE_REG(sc,reg,val) \ |
160 | bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) |
161 | |
162 | #define SWAP32(x) (x) = htole32(ntohl((x))) |
163 | #ifndef HTOLE32 |
164 | #define HTOLE32(x) (x) = htole32(x) |
165 | #endif |
166 | |
167 | struct ubsec_stats ubsecstats; |
168 | |
169 | static struct sysctllog *ubsec_sysctllog; |
170 | |
171 | /* |
172 | * ubsec_maxbatch controls the number of crypto ops to voluntarily |
173 | * collect into one submission to the hardware. This batching happens |
174 | * when ops are dispatched from the crypto subsystem with a hint that |
175 | * more are to follow immediately. These ops must also not be marked |
176 | * with a ``no delay'' flag. |
177 | */ |
178 | static int ubsec_maxbatch = 1; |
179 | |
180 | /* |
181 | * ubsec_maxaggr controls the number of crypto ops to submit to the |
182 | * hardware as a unit. This aggregation reduces the number of interrupts |
183 | * to the host at the expense of increased latency (for all but the last |
184 | * operation). For network traffic setting this to one yields the highest |
185 | * performance but at the expense of more interrupt processing. |
186 | */ |
187 | static int ubsec_maxaggr = 1; |
188 | |
189 | static const struct ubsec_product { |
190 | pci_vendor_id_t ubsec_vendor; |
191 | pci_product_id_t ubsec_product; |
192 | int ubsec_flags; |
193 | int ubsec_statmask; |
194 | int ubsec_maxaggr; |
195 | const char *ubsec_name; |
196 | } ubsec_products[] = { |
197 | { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5501, |
198 | 0, |
199 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR, |
200 | UBS_MIN_AGGR, |
201 | "Bluesteel 5501" |
202 | }, |
203 | { PCI_VENDOR_BLUESTEEL, PCI_PRODUCT_BLUESTEEL_5601, |
204 | UBS_FLAGS_KEY | UBS_FLAGS_RNG, |
205 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR, |
206 | UBS_MIN_AGGR, |
207 | "Bluesteel 5601" |
208 | }, |
209 | |
210 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5801, |
211 | 0, |
212 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR, |
213 | UBS_MIN_AGGR, |
214 | "Broadcom BCM5801" |
215 | }, |
216 | |
217 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5802, |
218 | UBS_FLAGS_KEY | UBS_FLAGS_RNG, |
219 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR, |
220 | UBS_MIN_AGGR, |
221 | "Broadcom BCM5802" |
222 | }, |
223 | |
224 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5805, |
225 | UBS_FLAGS_KEY | UBS_FLAGS_RNG, |
226 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR, |
227 | UBS_MIN_AGGR, |
228 | "Broadcom BCM5805" |
229 | }, |
230 | |
231 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5820, |
232 | UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | |
233 | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, |
234 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR, |
235 | UBS_MIN_AGGR, |
236 | "Broadcom BCM5820" |
237 | }, |
238 | |
239 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5821, |
240 | UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | |
241 | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, |
242 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR | |
243 | BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, |
244 | UBS_MIN_AGGR, |
245 | "Broadcom BCM5821" |
246 | }, |
247 | { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_SCA1K, |
248 | UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | |
249 | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, |
250 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR | |
251 | BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, |
252 | UBS_MIN_AGGR, |
253 | "Sun Crypto Accelerator 1000" |
254 | }, |
255 | { PCI_VENDOR_SUN, PCI_PRODUCT_SUN_5821, |
256 | UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | |
257 | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, |
258 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR | |
259 | BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, |
260 | UBS_MIN_AGGR, |
261 | "Broadcom BCM5821 (Sun)" |
262 | }, |
263 | |
264 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5822, |
265 | UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | |
266 | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY, |
267 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR | |
268 | BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, |
269 | UBS_MIN_AGGR, |
270 | "Broadcom BCM5822" |
271 | }, |
272 | |
273 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5823, |
274 | UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | |
275 | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, |
276 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR | |
277 | BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, |
278 | UBS_MIN_AGGR, |
279 | "Broadcom BCM5823" |
280 | }, |
281 | |
282 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5825, |
283 | UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | |
284 | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, |
285 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR | |
286 | BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY, |
287 | UBS_MIN_AGGR, |
288 | "Broadcom BCM5825" |
289 | }, |
290 | |
291 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5860, |
292 | UBS_FLAGS_MULTIMCR | UBS_FLAGS_HWNORM | |
293 | UBS_FLAGS_LONGCTX | |
294 | UBS_FLAGS_RNG | UBS_FLAGS_RNG4 | |
295 | UBS_FLAGS_KEY | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, |
296 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR | |
297 | BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY | |
298 | BS_STAT_MCR3_ALLEMPTY | BS_STAT_MCR4_ALLEMPTY, |
299 | UBS_MAX_AGGR, |
300 | "Broadcom BCM5860" |
301 | }, |
302 | |
303 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5861, |
304 | UBS_FLAGS_MULTIMCR | UBS_FLAGS_HWNORM | |
305 | UBS_FLAGS_LONGCTX | |
306 | UBS_FLAGS_RNG | UBS_FLAGS_RNG4 | |
307 | UBS_FLAGS_KEY | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, |
308 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR | |
309 | BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY | |
310 | BS_STAT_MCR3_ALLEMPTY | BS_STAT_MCR4_ALLEMPTY, |
311 | UBS_MAX_AGGR, |
312 | "Broadcom BCM5861" |
313 | }, |
314 | |
315 | { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_5862, |
316 | UBS_FLAGS_MULTIMCR | UBS_FLAGS_HWNORM | |
317 | UBS_FLAGS_LONGCTX | |
318 | UBS_FLAGS_RNG | UBS_FLAGS_RNG4 | |
319 | UBS_FLAGS_KEY | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES, |
320 | BS_STAT_MCR1_DONE | BS_STAT_DMAERR | |
321 | BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY | |
322 | BS_STAT_MCR3_ALLEMPTY | BS_STAT_MCR4_ALLEMPTY, |
323 | UBS_MAX_AGGR, |
324 | "Broadcom BCM5862" |
325 | }, |
326 | |
327 | { 0, 0, |
328 | 0, |
329 | 0, |
330 | 0, |
331 | NULL |
332 | } |
333 | }; |
334 | |
335 | static const struct ubsec_product * |
336 | ubsec_lookup(const struct pci_attach_args *pa) |
337 | { |
338 | const struct ubsec_product *up; |
339 | |
340 | for (up = ubsec_products; up->ubsec_name != NULL; up++) { |
341 | if (PCI_VENDOR(pa->pa_id) == up->ubsec_vendor && |
342 | PCI_PRODUCT(pa->pa_id) == up->ubsec_product) |
343 | return (up); |
344 | } |
345 | return (NULL); |
346 | } |
347 | |
348 | static int |
349 | ubsec_probe(device_t parent, cfdata_t match, void *aux) |
350 | { |
351 | struct pci_attach_args *pa = (struct pci_attach_args *)aux; |
352 | |
353 | if (ubsec_lookup(pa) != NULL) |
354 | return (1); |
355 | |
356 | return (0); |
357 | } |
358 | |
359 | static void |
360 | ubsec_attach(device_t parent, device_t self, void *aux) |
361 | { |
362 | struct ubsec_softc *sc = device_private(self); |
363 | struct pci_attach_args *pa = aux; |
364 | const struct ubsec_product *up; |
365 | pci_chipset_tag_t pc = pa->pa_pc; |
366 | pci_intr_handle_t ih; |
367 | const char *intrstr = NULL; |
368 | pcireg_t memtype; |
369 | struct ubsec_dma *dmap; |
370 | u_int32_t cmd, i; |
371 | char intrbuf[PCI_INTRSTR_LEN]; |
372 | |
373 | sc->sc_dev = self; |
374 | sc->sc_pct = pc; |
375 | |
376 | up = ubsec_lookup(pa); |
377 | if (up == NULL) { |
378 | printf("\n" ); |
379 | panic("ubsec_attach: impossible" ); |
380 | } |
381 | |
382 | pci_aprint_devinfo_fancy(pa, "Crypto processor" , up->ubsec_name, 1); |
383 | |
384 | SIMPLEQ_INIT(&sc->sc_queue); |
385 | SIMPLEQ_INIT(&sc->sc_qchip); |
386 | SIMPLEQ_INIT(&sc->sc_queue2); |
387 | SIMPLEQ_INIT(&sc->sc_qchip2); |
388 | SIMPLEQ_INIT(&sc->sc_queue4); |
389 | SIMPLEQ_INIT(&sc->sc_qchip4); |
390 | SIMPLEQ_INIT(&sc->sc_q2free); |
391 | |
392 | sc->sc_flags = up->ubsec_flags; |
393 | sc->sc_statmask = up->ubsec_statmask; |
394 | sc->sc_maxaggr = up->ubsec_maxaggr; |
395 | |
396 | cmd = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); |
397 | cmd |= PCI_COMMAND_MASTER_ENABLE; |
398 | pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, cmd); |
399 | |
400 | memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BS_BAR); |
401 | if (pci_mapreg_map(pa, BS_BAR, memtype, 0, |
402 | &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_memsize)) { |
403 | aprint_error_dev(self, "can't find mem space" ); |
404 | return; |
405 | } |
406 | |
407 | sc->sc_dmat = pa->pa_dmat; |
408 | |
409 | if (pci_intr_map(pa, &ih)) { |
410 | aprint_error_dev(self, "couldn't map interrupt\n" ); |
411 | return; |
412 | } |
413 | intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); |
414 | sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ubsec_intr, sc); |
415 | if (sc->sc_ih == NULL) { |
416 | aprint_error_dev(self, "couldn't establish interrupt" ); |
417 | if (intrstr != NULL) |
418 | aprint_error(" at %s" , intrstr); |
419 | aprint_error("\n" ); |
420 | return; |
421 | } |
422 | aprint_normal_dev(self, "interrupting at %s\n" , intrstr); |
423 | |
424 | sc->sc_cid = crypto_get_driverid(0); |
425 | if (sc->sc_cid < 0) { |
426 | aprint_error_dev(self, "couldn't get crypto driver id\n" ); |
427 | pci_intr_disestablish(pc, sc->sc_ih); |
428 | return; |
429 | } |
430 | |
431 | sc->sc_rng_need = RND_POOLBITS / NBBY; |
432 | mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_VM); |
433 | |
434 | SIMPLEQ_INIT(&sc->sc_freequeue); |
435 | dmap = sc->sc_dmaa; |
436 | for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { |
437 | struct ubsec_q *q; |
438 | |
439 | q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), |
440 | M_DEVBUF, M_ZERO|M_NOWAIT); |
441 | if (q == NULL) { |
442 | aprint_error_dev(self, |
443 | "can't allocate queue buffers\n" ); |
444 | break; |
445 | } |
446 | |
447 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), |
448 | &dmap->d_alloc, 0)) { |
449 | aprint_error_dev(self, "can't allocate dma buffers\n" ); |
450 | free(q, M_DEVBUF); |
451 | break; |
452 | } |
453 | dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; |
454 | |
455 | q->q_dma = dmap; |
456 | sc->sc_queuea[i] = q; |
457 | |
458 | SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); |
459 | } |
460 | |
461 | crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, |
462 | ubsec_newsession, ubsec_freesession, ubsec_process, sc); |
463 | crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, |
464 | ubsec_newsession, ubsec_freesession, ubsec_process, sc); |
465 | crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC_96, 0, 0, |
466 | ubsec_newsession, ubsec_freesession, ubsec_process, sc); |
467 | crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC_96, 0, 0, |
468 | ubsec_newsession, ubsec_freesession, ubsec_process, sc); |
469 | if (sc->sc_flags & UBS_FLAGS_AES) { |
470 | crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, |
471 | ubsec_newsession, ubsec_freesession, ubsec_process, sc); |
472 | } |
473 | |
474 | /* |
475 | * Reset Broadcom chip |
476 | */ |
477 | ubsec_reset_board(sc); |
478 | |
479 | /* |
480 | * Init Broadcom specific PCI settings |
481 | */ |
482 | ubsec_init_pciregs(pa); |
483 | |
484 | /* |
485 | * Init Broadcom chip |
486 | */ |
487 | ubsec_init_board(sc); |
488 | |
489 | #ifndef UBSEC_NO_RNG |
490 | if (sc->sc_flags & UBS_FLAGS_RNG) { |
491 | if (sc->sc_flags & UBS_FLAGS_RNG4) |
492 | sc->sc_statmask |= BS_STAT_MCR4_DONE; |
493 | else |
494 | sc->sc_statmask |= BS_STAT_MCR2_DONE; |
495 | |
496 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), |
497 | &sc->sc_rng.rng_q.q_mcr, 0)) |
498 | goto skip_rng; |
499 | |
500 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), |
501 | &sc->sc_rng.rng_q.q_ctx, 0)) { |
502 | ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); |
503 | goto skip_rng; |
504 | } |
505 | |
506 | if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * |
507 | UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { |
508 | ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); |
509 | ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); |
510 | goto skip_rng; |
511 | } |
512 | |
513 | rndsource_setcb(&sc->sc_rnd_source, ubsec_rng_get, sc); |
514 | rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), |
515 | RND_TYPE_RNG, |
516 | RND_FLAG_COLLECT_VALUE|RND_FLAG_HASCB); |
517 | if (hz >= 100) |
518 | sc->sc_rnghz = hz / 100; |
519 | else |
520 | sc->sc_rnghz = 1; |
521 | #ifdef __OpenBSD__ |
522 | timeout_set(&sc->sc_rngto, ubsec_rng, sc); |
523 | timeout_add(&sc->sc_rngto, sc->sc_rnghz); |
524 | #else |
525 | callout_init(&sc->sc_rngto, 0); |
526 | callout_setfunc(&sc->sc_rngto, ubsec_rng, sc); |
527 | callout_schedule(&sc->sc_rngto, sc->sc_rnghz); |
528 | #endif |
529 | skip_rng: |
530 | if (sc->sc_rnghz) |
531 | aprint_normal_dev(self, |
532 | "random number generator enabled\n" ); |
533 | else |
534 | aprint_error_dev(self, |
535 | "WARNING: random number generator disabled\n" ); |
536 | } |
537 | #endif /* UBSEC_NO_RNG */ |
538 | |
539 | if (sc->sc_flags & UBS_FLAGS_KEY) { |
540 | sc->sc_statmask |= BS_STAT_MCR2_DONE; |
541 | |
542 | crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0, |
543 | ubsec_kprocess, sc); |
544 | #if 0 |
545 | crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0, |
546 | ubsec_kprocess, sc); |
547 | #endif |
548 | } |
549 | } |
550 | |
551 | static int |
552 | ubsec_detach(device_t self, int flags) |
553 | { |
554 | struct ubsec_softc *sc = device_private(self); |
555 | struct ubsec_q *q, *qtmp; |
556 | volatile u_int32_t ctrl; |
557 | |
558 | /* disable interrupts */ |
559 | /* XXX wait/abort current ops? where is DMAERR enabled? */ |
560 | ctrl = READ_REG(sc, BS_CTRL); |
561 | |
562 | ctrl &= ~(BS_CTRL_MCR2INT | BS_CTRL_MCR1INT | BS_CTRL_DMAERR); |
563 | if (sc->sc_flags & UBS_FLAGS_MULTIMCR) |
564 | ctrl &= ~BS_CTRL_MCR4INT; |
565 | |
566 | WRITE_REG(sc, BS_CTRL, ctrl); |
567 | |
568 | #ifndef UBSEC_NO_RNG |
569 | if (sc->sc_flags & UBS_FLAGS_RNG) { |
570 | callout_halt(&sc->sc_rngto, NULL); |
571 | ubsec_dma_free(sc, &sc->sc_rng.rng_buf); |
572 | ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); |
573 | ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); |
574 | rnd_detach_source(&sc->sc_rnd_source); |
575 | } |
576 | #endif /* UBSEC_NO_RNG */ |
577 | |
578 | crypto_unregister_all(sc->sc_cid); |
579 | |
580 | mutex_spin_enter(&sc->sc_mtx); |
581 | |
582 | ubsec_totalreset(sc); /* XXX leaves the chip running */ |
583 | |
584 | SIMPLEQ_FOREACH_SAFE(q, &sc->sc_freequeue, q_next, qtmp) { |
585 | ubsec_dma_free(sc, &q->q_dma->d_alloc); |
586 | if (q->q_src_map != NULL) |
587 | bus_dmamap_destroy(sc->sc_dmat, q->q_src_map); |
588 | if (q->q_cached_dst_map != NULL) |
589 | bus_dmamap_destroy(sc->sc_dmat, q->q_cached_dst_map); |
590 | free(q, M_DEVBUF); |
591 | } |
592 | |
593 | mutex_spin_exit(&sc->sc_mtx); |
594 | |
595 | if (sc->sc_ih != NULL) { |
596 | pci_intr_disestablish(sc->sc_pct, sc->sc_ih); |
597 | sc->sc_ih = NULL; |
598 | } |
599 | |
600 | if (sc->sc_memsize != 0) { |
601 | bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_memsize); |
602 | sc->sc_memsize = 0; |
603 | } |
604 | |
605 | return 0; |
606 | } |
607 | |
608 | MODULE(MODULE_CLASS_DRIVER, ubsec, "pci,opencrypto" ); |
609 | |
610 | #ifdef _MODULE |
611 | #include "ioconf.c" |
612 | #endif |
613 | |
614 | static int |
615 | ubsec_modcmd(modcmd_t cmd, void *data) |
616 | { |
617 | int error = 0; |
618 | |
619 | switch (cmd) { |
620 | case MODULE_CMD_INIT: |
621 | #ifdef _MODULE |
622 | error = config_init_component(cfdriver_ioconf_ubsec, |
623 | cfattach_ioconf_ubsec, cfdata_ioconf_ubsec); |
624 | #endif |
625 | if (error == 0) |
626 | error = ubsec_sysctl_init(); |
627 | return error; |
628 | case MODULE_CMD_FINI: |
629 | if (ubsec_sysctllog != NULL) |
630 | sysctl_teardown(&ubsec_sysctllog); |
631 | #ifdef _MODULE |
632 | error = config_fini_component(cfdriver_ioconf_ubsec, |
633 | cfattach_ioconf_ubsec, cfdata_ioconf_ubsec); |
634 | #endif |
635 | return error; |
636 | default: |
637 | return ENOTTY; |
638 | } |
639 | } |
640 | |
641 | static int |
642 | ubsec_sysctl_init(void) |
643 | { |
644 | const struct sysctlnode *node = NULL; |
645 | |
646 | ubsec_sysctllog = NULL; |
647 | |
648 | sysctl_createv(&ubsec_sysctllog, 0, NULL, &node, |
649 | CTLFLAG_PERMANENT, |
650 | CTLTYPE_NODE, "ubsec" , |
651 | SYSCTL_DESCR("ubsec opetions" ), |
652 | NULL, 0, NULL, 0, |
653 | CTL_HW, CTL_CREATE, CTL_EOL); |
654 | sysctl_createv(&ubsec_sysctllog, 0, &node, NULL, |
655 | CTLFLAG_PERMANENT | CTLFLAG_READWRITE, |
656 | CTLTYPE_INT, "maxbatch" , |
657 | SYSCTL_DESCR("max ops to batch w/o interrupt" ), |
658 | NULL, 0, &ubsec_maxbatch, 0, |
659 | CTL_CREATE, CTL_EOL); |
660 | sysctl_createv(&ubsec_sysctllog, 0, &node, NULL, |
661 | CTLFLAG_PERMANENT | CTLFLAG_READWRITE, |
662 | CTLTYPE_INT, "maxaggr" , |
663 | SYSCTL_DESCR("max ops to aggregate under one interrupt" ), |
664 | NULL, 0, &ubsec_maxaggr, 0, |
665 | CTL_CREATE, CTL_EOL); |
666 | |
667 | return 0; |
668 | } |
669 | |
670 | /* |
671 | * UBSEC Interrupt routine |
672 | */ |
673 | static int |
674 | ubsec_intr(void *arg) |
675 | { |
676 | struct ubsec_softc *sc = arg; |
677 | volatile u_int32_t stat; |
678 | struct ubsec_q *q; |
679 | struct ubsec_dma *dmap; |
680 | int flags; |
681 | int npkts = 0, i; |
682 | |
683 | mutex_spin_enter(&sc->sc_mtx); |
684 | stat = READ_REG(sc, BS_STAT); |
685 | stat &= sc->sc_statmask; |
686 | if (stat == 0) { |
687 | mutex_spin_exit(&sc->sc_mtx); |
688 | return (0); |
689 | } |
690 | |
691 | WRITE_REG(sc, BS_STAT, stat); /* IACK */ |
692 | |
693 | /* |
694 | * Check to see if we have any packets waiting for us |
695 | */ |
696 | if ((stat & BS_STAT_MCR1_DONE)) { |
697 | while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { |
698 | q = SIMPLEQ_FIRST(&sc->sc_qchip); |
699 | dmap = q->q_dma; |
700 | |
701 | if ((dmap->d_dma->d_mcr.mcr_flags |
702 | & htole16(UBS_MCR_DONE)) == 0) |
703 | break; |
704 | |
705 | q = SIMPLEQ_FIRST(&sc->sc_qchip); |
706 | SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); |
707 | |
708 | npkts = q->q_nstacked_mcrs; |
709 | sc->sc_nqchip -= 1+npkts; |
710 | /* |
711 | * search for further sc_qchip ubsec_q's that share |
712 | * the same MCR, and complete them too, they must be |
713 | * at the top. |
714 | */ |
715 | for (i = 0; i < npkts; i++) { |
716 | if(q->q_stacked_mcr[i]) |
717 | ubsec_callback(sc, q->q_stacked_mcr[i]); |
718 | else |
719 | break; |
720 | } |
721 | ubsec_callback(sc, q); |
722 | } |
723 | |
724 | /* |
725 | * Don't send any more packet to chip if there has been |
726 | * a DMAERR. |
727 | */ |
728 | if (!(stat & BS_STAT_DMAERR)) |
729 | ubsec_feed(sc); |
730 | } |
731 | |
732 | /* |
733 | * Check to see if we have any key setups/rng's waiting for us |
734 | */ |
735 | if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) && |
736 | (stat & BS_STAT_MCR2_DONE)) { |
737 | struct ubsec_q2 *q2; |
738 | struct ubsec_mcr *mcr; |
739 | |
740 | while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) { |
741 | q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); |
742 | |
743 | bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, |
744 | 0, q2->q_mcr.dma_map->dm_mapsize, |
745 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
746 | |
747 | mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; |
748 | |
749 | /* A bug in new devices requires to swap this field */ |
750 | if (sc->sc_flags & UBS_FLAGS_MULTIMCR) |
751 | flags = htole16(mcr->mcr_flags); |
752 | else |
753 | flags = mcr->mcr_flags; |
754 | if ((flags & htole16(UBS_MCR_DONE)) == 0) { |
755 | bus_dmamap_sync(sc->sc_dmat, |
756 | q2->q_mcr.dma_map, 0, |
757 | q2->q_mcr.dma_map->dm_mapsize, |
758 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
759 | break; |
760 | } |
761 | q2 = SIMPLEQ_FIRST(&sc->sc_qchip2); |
762 | SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, /*q2,*/ q_next); |
763 | ubsec_callback2(sc, q2); |
764 | /* |
765 | * Don't send any more packet to chip if there has been |
766 | * a DMAERR. |
767 | */ |
768 | if (!(stat & BS_STAT_DMAERR)) |
769 | ubsec_feed2(sc); |
770 | } |
771 | } |
772 | if ((sc->sc_flags & UBS_FLAGS_RNG4) && (stat & BS_STAT_MCR4_DONE)) { |
773 | struct ubsec_q2 *q2; |
774 | struct ubsec_mcr *mcr; |
775 | |
776 | while (!SIMPLEQ_EMPTY(&sc->sc_qchip4)) { |
777 | q2 = SIMPLEQ_FIRST(&sc->sc_qchip4); |
778 | |
779 | bus_dmamap_sync(sc->sc_dmat, q2->q_mcr.dma_map, |
780 | 0, q2->q_mcr.dma_map->dm_mapsize, |
781 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
782 | |
783 | mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr; |
784 | |
785 | /* A bug in new devices requires to swap this field */ |
786 | flags = htole16(mcr->mcr_flags); |
787 | |
788 | if ((flags & htole16(UBS_MCR_DONE)) == 0) { |
789 | bus_dmamap_sync(sc->sc_dmat, |
790 | q2->q_mcr.dma_map, 0, |
791 | q2->q_mcr.dma_map->dm_mapsize, |
792 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
793 | break; |
794 | } |
795 | SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip4, q_next); |
796 | ubsec_callback2(sc, q2); |
797 | /* |
798 | * Don't send any more packet to chip if there has been |
799 | * a DMAERR. |
800 | */ |
801 | if (!(stat & BS_STAT_DMAERR)) |
802 | ubsec_feed4(sc); |
803 | } |
804 | } |
805 | |
806 | /* |
807 | * Check to see if we got any DMA Error |
808 | */ |
809 | if (stat & BS_STAT_DMAERR) { |
810 | #ifdef UBSEC_DEBUG |
811 | if (ubsec_debug) { |
812 | volatile u_int32_t a = READ_REG(sc, BS_ERR); |
813 | |
814 | printf("%s: dmaerr %s@%08x\n" , device_xname(sc->sc_dev), |
815 | (a & BS_ERR_READ) ? "read" : "write" , |
816 | a & BS_ERR_ADDR); |
817 | } |
818 | #endif /* UBSEC_DEBUG */ |
819 | ubsecstats.hst_dmaerr++; |
820 | ubsec_totalreset(sc); |
821 | ubsec_feed(sc); |
822 | } |
823 | |
824 | if (sc->sc_needwakeup) { /* XXX check high watermark */ |
825 | int wkeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); |
826 | #ifdef UBSEC_DEBUG |
827 | if (ubsec_debug) |
828 | printf("%s: wakeup crypto (%x)\n" , |
829 | device_xname(sc->sc_dev), sc->sc_needwakeup); |
830 | #endif /* UBSEC_DEBUG */ |
831 | sc->sc_needwakeup &= ~wkeup; |
832 | crypto_unblock(sc->sc_cid, wkeup); |
833 | } |
834 | mutex_spin_exit(&sc->sc_mtx); |
835 | return (1); |
836 | } |
837 | |
838 | /* |
839 | * ubsec_feed() - aggregate and post requests to chip |
840 | * OpenBSD comments: |
841 | * It is assumed that the caller set splnet() |
842 | */ |
843 | static void |
844 | ubsec_feed(struct ubsec_softc *sc) |
845 | { |
846 | struct ubsec_q *q, *q2; |
847 | int npkts, i; |
848 | void *v; |
849 | u_int32_t stat; |
850 | #ifdef UBSEC_DEBUG |
851 | static int max; |
852 | #endif /* UBSEC_DEBUG */ |
853 | |
854 | npkts = sc->sc_nqueue; |
855 | if (npkts > ubsecstats.hst_maxqueue) |
856 | ubsecstats.hst_maxqueue = npkts; |
857 | if (npkts < 2) |
858 | goto feed1; |
859 | |
860 | /* |
861 | * Decide how many ops to combine in a single MCR. We cannot |
862 | * aggregate more than UBS_MAX_AGGR because this is the number |
863 | * of slots defined in the data structure. Otherwise we clamp |
864 | * based on the tunable parameter ubsec_maxaggr. Note that |
865 | * aggregation can happen in two ways: either by batching ops |
866 | * from above or because the h/w backs up and throttles us. |
867 | * Aggregating ops reduces the number of interrupts to the host |
868 | * but also (potentially) increases the latency for processing |
869 | * completed ops as we only get an interrupt when all aggregated |
870 | * ops have completed. |
871 | */ |
872 | if (npkts > sc->sc_maxaggr) |
873 | npkts = sc->sc_maxaggr; |
874 | if (npkts > ubsec_maxaggr) |
875 | npkts = ubsec_maxaggr; |
876 | if (npkts > ubsecstats.hst_maxbatch) |
877 | ubsecstats.hst_maxbatch = npkts; |
878 | if (npkts < 2) |
879 | goto feed1; |
880 | ubsecstats.hst_totbatch += npkts-1; |
881 | |
882 | if ((stat = READ_REG(sc, BS_STAT)) |
883 | & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { |
884 | if (stat & BS_STAT_DMAERR) { |
885 | ubsec_totalreset(sc); |
886 | ubsecstats.hst_dmaerr++; |
887 | } else { |
888 | ubsecstats.hst_mcr1full++; |
889 | } |
890 | return; |
891 | } |
892 | |
893 | #ifdef UBSEC_DEBUG |
894 | if (ubsec_debug) |
895 | printf("merging %d records\n" , npkts); |
896 | /* XXX temporary aggregation statistics reporting code */ |
897 | if (max < npkts) { |
898 | max = npkts; |
899 | printf("%s: new max aggregate %d\n" , device_xname(sc->sc_dev), |
900 | max); |
901 | } |
902 | #endif /* UBSEC_DEBUG */ |
903 | |
904 | q = SIMPLEQ_FIRST(&sc->sc_queue); |
905 | SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); |
906 | --sc->sc_nqueue; |
907 | |
908 | bus_dmamap_sync(sc->sc_dmat, q->q_src_map, |
909 | 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
910 | if (q->q_dst_map != NULL) |
911 | bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, |
912 | 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
913 | |
914 | q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */ |
915 | |
916 | for (i = 0; i < q->q_nstacked_mcrs; i++) { |
917 | q2 = SIMPLEQ_FIRST(&sc->sc_queue); |
918 | bus_dmamap_sync(sc->sc_dmat, q2->q_src_map, |
919 | 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
920 | if (q2->q_dst_map != NULL) |
921 | bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map, |
922 | 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
923 | q2= SIMPLEQ_FIRST(&sc->sc_queue); |
924 | SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q2,*/ q_next); |
925 | --sc->sc_nqueue; |
926 | |
927 | v = ((void *)&q2->q_dma->d_dma->d_mcr); |
928 | v = (char*)v + (sizeof(struct ubsec_mcr) - |
929 | sizeof(struct ubsec_mcr_add)); |
930 | memcpy(&q->q_dma->d_dma->d_mcradd[i], v, |
931 | sizeof(struct ubsec_mcr_add)); |
932 | q->q_stacked_mcr[i] = q2; |
933 | } |
934 | q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts); |
935 | SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); |
936 | sc->sc_nqchip += npkts; |
937 | if (sc->sc_nqchip > ubsecstats.hst_maxqchip) |
938 | ubsecstats.hst_maxqchip = sc->sc_nqchip; |
939 | bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, |
940 | 0, q->q_dma->d_alloc.dma_map->dm_mapsize, |
941 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
942 | WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + |
943 | offsetof(struct ubsec_dmachunk, d_mcr)); |
944 | return; |
945 | |
946 | feed1: |
947 | while (!SIMPLEQ_EMPTY(&sc->sc_queue)) { |
948 | if ((stat = READ_REG(sc, BS_STAT)) |
949 | & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) { |
950 | if (stat & BS_STAT_DMAERR) { |
951 | ubsec_totalreset(sc); |
952 | ubsecstats.hst_dmaerr++; |
953 | } else { |
954 | ubsecstats.hst_mcr1full++; |
955 | } |
956 | break; |
957 | } |
958 | |
959 | q = SIMPLEQ_FIRST(&sc->sc_queue); |
960 | |
961 | bus_dmamap_sync(sc->sc_dmat, q->q_src_map, |
962 | 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
963 | if (q->q_dst_map != NULL) |
964 | bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, |
965 | 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
966 | bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map, |
967 | 0, q->q_dma->d_alloc.dma_map->dm_mapsize, |
968 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
969 | |
970 | WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr + |
971 | offsetof(struct ubsec_dmachunk, d_mcr)); |
972 | #ifdef UBSEC_DEBUG |
973 | if (ubsec_debug) |
974 | printf("feed: q->chip %p %08x stat %08x\n" , |
975 | q, (u_int32_t)q->q_dma->d_alloc.dma_paddr, |
976 | stat); |
977 | #endif /* UBSEC_DEBUG */ |
978 | q = SIMPLEQ_FIRST(&sc->sc_queue); |
979 | SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, /*q,*/ q_next); |
980 | --sc->sc_nqueue; |
981 | SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next); |
982 | sc->sc_nqchip++; |
983 | } |
984 | if (sc->sc_nqchip > ubsecstats.hst_maxqchip) |
985 | ubsecstats.hst_maxqchip = sc->sc_nqchip; |
986 | } |
987 | |
988 | /* |
989 | * Allocate a new 'session' and return an encoded session id. 'sidp' |
990 | * contains our registration id, and should contain an encoded session |
991 | * id on successful allocation. |
992 | */ |
993 | static int |
994 | ubsec_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) |
995 | { |
996 | struct cryptoini *c, *encini = NULL, *macini = NULL; |
997 | struct ubsec_softc *sc; |
998 | struct ubsec_session *ses = NULL; |
999 | MD5_CTX md5ctx; |
1000 | SHA1_CTX sha1ctx; |
1001 | int i, sesn; |
1002 | |
1003 | sc = arg; |
1004 | KASSERT(sc != NULL /*, ("ubsec_newsession: null softc")*/); |
1005 | |
1006 | if (sidp == NULL || cri == NULL || sc == NULL) |
1007 | return (EINVAL); |
1008 | |
1009 | for (c = cri; c != NULL; c = c->cri_next) { |
1010 | if (c->cri_alg == CRYPTO_MD5_HMAC_96 || |
1011 | c->cri_alg == CRYPTO_SHA1_HMAC_96) { |
1012 | if (macini) |
1013 | return (EINVAL); |
1014 | macini = c; |
1015 | } else if (c->cri_alg == CRYPTO_DES_CBC || |
1016 | c->cri_alg == CRYPTO_3DES_CBC || |
1017 | c->cri_alg == CRYPTO_AES_CBC) { |
1018 | if (encini) |
1019 | return (EINVAL); |
1020 | encini = c; |
1021 | } else |
1022 | return (EINVAL); |
1023 | } |
1024 | if (encini == NULL && macini == NULL) |
1025 | return (EINVAL); |
1026 | |
1027 | if (encini && encini->cri_alg == CRYPTO_AES_CBC) { |
1028 | switch (encini->cri_klen) { |
1029 | case 128: |
1030 | case 192: |
1031 | case 256: |
1032 | break; |
1033 | default: |
1034 | return (EINVAL); |
1035 | } |
1036 | } |
1037 | |
1038 | if (sc->sc_sessions == NULL) { |
1039 | ses = sc->sc_sessions = (struct ubsec_session *)malloc( |
1040 | sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); |
1041 | if (ses == NULL) |
1042 | return (ENOMEM); |
1043 | sesn = 0; |
1044 | sc->sc_nsessions = 1; |
1045 | } else { |
1046 | for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { |
1047 | if (sc->sc_sessions[sesn].ses_used == 0) { |
1048 | ses = &sc->sc_sessions[sesn]; |
1049 | break; |
1050 | } |
1051 | } |
1052 | |
1053 | if (ses == NULL) { |
1054 | sesn = sc->sc_nsessions; |
1055 | ses = (struct ubsec_session *)malloc((sesn + 1) * |
1056 | sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); |
1057 | if (ses == NULL) |
1058 | return (ENOMEM); |
1059 | memcpy(ses, sc->sc_sessions, sesn * |
1060 | sizeof(struct ubsec_session)); |
1061 | memset(sc->sc_sessions, 0, sesn * |
1062 | sizeof(struct ubsec_session)); |
1063 | free(sc->sc_sessions, M_DEVBUF); |
1064 | sc->sc_sessions = ses; |
1065 | ses = &sc->sc_sessions[sesn]; |
1066 | sc->sc_nsessions++; |
1067 | } |
1068 | } |
1069 | |
1070 | memset(ses, 0, sizeof(struct ubsec_session)); |
1071 | ses->ses_used = 1; |
1072 | if (encini) { |
1073 | /* get an IV, network byte order */ |
1074 | #ifdef __NetBSD__ |
1075 | cprng_fast(ses->ses_iv, sizeof(ses->ses_iv)); |
1076 | #else |
1077 | get_random_bytes(ses->ses_iv, sizeof(ses->ses_iv)); |
1078 | #endif |
1079 | |
1080 | /* Go ahead and compute key in ubsec's byte order */ |
1081 | if (encini->cri_alg == CRYPTO_AES_CBC) { |
1082 | memcpy(ses->ses_key, encini->cri_key, |
1083 | encini->cri_klen / 8); |
1084 | } |
1085 | if (encini->cri_alg == CRYPTO_DES_CBC) { |
1086 | memcpy(&ses->ses_key[0], encini->cri_key, 8); |
1087 | memcpy(&ses->ses_key[2], encini->cri_key, 8); |
1088 | memcpy(&ses->ses_key[4], encini->cri_key, 8); |
1089 | } else |
1090 | memcpy(ses->ses_key, encini->cri_key, 24); |
1091 | |
1092 | SWAP32(ses->ses_key[0]); |
1093 | SWAP32(ses->ses_key[1]); |
1094 | SWAP32(ses->ses_key[2]); |
1095 | SWAP32(ses->ses_key[3]); |
1096 | SWAP32(ses->ses_key[4]); |
1097 | SWAP32(ses->ses_key[5]); |
1098 | } |
1099 | |
1100 | if (macini) { |
1101 | for (i = 0; i < macini->cri_klen / 8; i++) |
1102 | macini->cri_key[i] ^= HMAC_IPAD_VAL; |
1103 | |
1104 | if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { |
1105 | MD5Init(&md5ctx); |
1106 | MD5Update(&md5ctx, macini->cri_key, |
1107 | macini->cri_klen / 8); |
1108 | MD5Update(&md5ctx, hmac_ipad_buffer, |
1109 | HMAC_BLOCK_LEN - (macini->cri_klen / 8)); |
1110 | memcpy(ses->ses_hminner, md5ctx.state, |
1111 | sizeof(md5ctx.state)); |
1112 | } else { |
1113 | SHA1Init(&sha1ctx); |
1114 | SHA1Update(&sha1ctx, macini->cri_key, |
1115 | macini->cri_klen / 8); |
1116 | SHA1Update(&sha1ctx, hmac_ipad_buffer, |
1117 | HMAC_BLOCK_LEN - (macini->cri_klen / 8)); |
1118 | memcpy(ses->ses_hminner, sha1ctx.state, |
1119 | sizeof(sha1ctx.state)); |
1120 | } |
1121 | |
1122 | for (i = 0; i < macini->cri_klen / 8; i++) |
1123 | macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); |
1124 | |
1125 | if (macini->cri_alg == CRYPTO_MD5_HMAC_96) { |
1126 | MD5Init(&md5ctx); |
1127 | MD5Update(&md5ctx, macini->cri_key, |
1128 | macini->cri_klen / 8); |
1129 | MD5Update(&md5ctx, hmac_opad_buffer, |
1130 | HMAC_BLOCK_LEN - (macini->cri_klen / 8)); |
1131 | memcpy(ses->ses_hmouter, md5ctx.state, |
1132 | sizeof(md5ctx.state)); |
1133 | } else { |
1134 | SHA1Init(&sha1ctx); |
1135 | SHA1Update(&sha1ctx, macini->cri_key, |
1136 | macini->cri_klen / 8); |
1137 | SHA1Update(&sha1ctx, hmac_opad_buffer, |
1138 | HMAC_BLOCK_LEN - (macini->cri_klen / 8)); |
1139 | memcpy(ses->ses_hmouter, sha1ctx.state, |
1140 | sizeof(sha1ctx.state)); |
1141 | } |
1142 | |
1143 | for (i = 0; i < macini->cri_klen / 8; i++) |
1144 | macini->cri_key[i] ^= HMAC_OPAD_VAL; |
1145 | } |
1146 | |
1147 | *sidp = UBSEC_SID(device_unit(sc->sc_dev), sesn); |
1148 | return (0); |
1149 | } |
1150 | |
1151 | /* |
1152 | * Deallocate a session. |
1153 | */ |
1154 | static int |
1155 | ubsec_freesession(void *arg, u_int64_t tid) |
1156 | { |
1157 | struct ubsec_softc *sc; |
1158 | int session; |
1159 | u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; |
1160 | |
1161 | sc = arg; |
1162 | KASSERT(sc != NULL /*, ("ubsec_freesession: null softc")*/); |
1163 | |
1164 | session = UBSEC_SESSION(sid); |
1165 | if (session >= sc->sc_nsessions) |
1166 | return (EINVAL); |
1167 | |
1168 | memset(&sc->sc_sessions[session], 0, sizeof(sc->sc_sessions[session])); |
1169 | return (0); |
1170 | } |
1171 | |
1172 | #ifdef __FreeBSD__ /* Ugly gratuitous changes to bus_dma */ |
1173 | static void |
1174 | ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, |
1175 | int error) |
1176 | { |
1177 | struct ubsec_operand *op = arg; |
1178 | |
1179 | KASSERT(nsegs <= UBS_MAX_SCATTER |
1180 | /*, ("Too many DMA segments returned when mapping operand")*/); |
1181 | #ifdef UBSEC_DEBUG |
1182 | if (ubsec_debug) |
1183 | printf("ubsec_op_cb: mapsize %u nsegs %d\n" , |
1184 | (u_int) mapsize, nsegs); |
1185 | #endif |
1186 | op->mapsize = mapsize; |
1187 | op->nsegs = nsegs; |
1188 | memcpy(op->segs, seg, nsegs * sizeof (seg[0])); |
1189 | } |
1190 | #endif |
1191 | |
1192 | static int |
1193 | ubsec_process(void *arg, struct cryptop *crp, int hint) |
1194 | { |
1195 | struct ubsec_q *q = NULL; |
1196 | #ifdef __OpenBSD__ |
1197 | int card; |
1198 | #endif |
1199 | int err = 0, i, j, nicealign; |
1200 | struct ubsec_softc *sc; |
1201 | struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; |
1202 | int encoffset = 0, macoffset = 0, cpskip, cpoffset; |
1203 | int sskip, dskip, stheend, dtheend; |
1204 | int16_t coffset; |
1205 | struct ubsec_session *ses, key; |
1206 | struct ubsec_dma *dmap = NULL; |
1207 | u_int16_t flags = 0; |
1208 | int ivlen = 0, keylen = 0; |
1209 | |
1210 | sc = arg; |
1211 | KASSERT(sc != NULL /*, ("ubsec_process: null softc")*/); |
1212 | |
1213 | if (crp == NULL || crp->crp_callback == NULL || sc == NULL) { |
1214 | ubsecstats.hst_invalid++; |
1215 | return (EINVAL); |
1216 | } |
1217 | if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) { |
1218 | ubsecstats.hst_badsession++; |
1219 | return (EINVAL); |
1220 | } |
1221 | |
1222 | mutex_spin_enter(&sc->sc_mtx); |
1223 | |
1224 | if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) { |
1225 | ubsecstats.hst_queuefull++; |
1226 | sc->sc_needwakeup |= CRYPTO_SYMQ; |
1227 | mutex_spin_exit(&sc->sc_mtx); |
1228 | return(ERESTART); |
1229 | } |
1230 | |
1231 | q = SIMPLEQ_FIRST(&sc->sc_freequeue); |
1232 | SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, /*q,*/ q_next); |
1233 | mutex_spin_exit(&sc->sc_mtx); |
1234 | |
1235 | dmap = q->q_dma; /* Save dma pointer */ |
1236 | /* don't lose the cached dmamaps q_src_map and q_cached_dst_map */ |
1237 | memset(q, 0, offsetof(struct ubsec_q, q_src_map)); |
1238 | memset(&key, 0, sizeof(key)); |
1239 | |
1240 | q->q_sesn = UBSEC_SESSION(crp->crp_sid); |
1241 | q->q_dma = dmap; |
1242 | ses = &sc->sc_sessions[q->q_sesn]; |
1243 | |
1244 | if (crp->crp_flags & CRYPTO_F_IMBUF) { |
1245 | q->q_src_m = (struct mbuf *)crp->crp_buf; |
1246 | q->q_dst_m = (struct mbuf *)crp->crp_buf; |
1247 | } else if (crp->crp_flags & CRYPTO_F_IOV) { |
1248 | q->q_src_io = (struct uio *)crp->crp_buf; |
1249 | q->q_dst_io = (struct uio *)crp->crp_buf; |
1250 | } else { |
1251 | ubsecstats.hst_badflags++; |
1252 | err = EINVAL; |
1253 | goto errout; /* XXX we don't handle contiguous blocks! */ |
1254 | } |
1255 | |
1256 | memset(&dmap->d_dma->d_mcr, 0, sizeof(struct ubsec_mcr)); |
1257 | |
1258 | dmap->d_dma->d_mcr.mcr_pkts = htole16(1); |
1259 | dmap->d_dma->d_mcr.mcr_flags = 0; |
1260 | q->q_crp = crp; |
1261 | |
1262 | crd1 = crp->crp_desc; |
1263 | if (crd1 == NULL) { |
1264 | ubsecstats.hst_nodesc++; |
1265 | err = EINVAL; |
1266 | goto errout; |
1267 | } |
1268 | crd2 = crd1->crd_next; |
1269 | |
1270 | if (crd2 == NULL) { |
1271 | if (crd1->crd_alg == CRYPTO_MD5_HMAC_96 || |
1272 | crd1->crd_alg == CRYPTO_SHA1_HMAC_96) { |
1273 | maccrd = crd1; |
1274 | enccrd = NULL; |
1275 | } else if (crd1->crd_alg == CRYPTO_DES_CBC || |
1276 | crd1->crd_alg == CRYPTO_3DES_CBC || |
1277 | crd1->crd_alg == CRYPTO_AES_CBC) { |
1278 | maccrd = NULL; |
1279 | enccrd = crd1; |
1280 | } else { |
1281 | ubsecstats.hst_badalg++; |
1282 | err = EINVAL; |
1283 | goto errout; |
1284 | } |
1285 | } else { |
1286 | if ((crd1->crd_alg == CRYPTO_MD5_HMAC_96 || |
1287 | crd1->crd_alg == CRYPTO_SHA1_HMAC_96) && |
1288 | (crd2->crd_alg == CRYPTO_DES_CBC || |
1289 | crd2->crd_alg == CRYPTO_3DES_CBC || |
1290 | crd2->crd_alg == CRYPTO_AES_CBC) && |
1291 | ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { |
1292 | maccrd = crd1; |
1293 | enccrd = crd2; |
1294 | } else if ((crd1->crd_alg == CRYPTO_DES_CBC || |
1295 | crd1->crd_alg == CRYPTO_3DES_CBC || |
1296 | crd1->crd_alg == CRYPTO_AES_CBC) && |
1297 | (crd2->crd_alg == CRYPTO_MD5_HMAC_96 || |
1298 | crd2->crd_alg == CRYPTO_SHA1_HMAC_96) && |
1299 | (crd1->crd_flags & CRD_F_ENCRYPT)) { |
1300 | enccrd = crd1; |
1301 | maccrd = crd2; |
1302 | } else { |
1303 | /* |
1304 | * We cannot order the ubsec as requested |
1305 | */ |
1306 | ubsecstats.hst_badalg++; |
1307 | err = EINVAL; |
1308 | goto errout; |
1309 | } |
1310 | } |
1311 | |
1312 | if (enccrd) { |
1313 | if (enccrd->crd_alg == CRYPTO_AES_CBC) { |
1314 | if ((sc->sc_flags & UBS_FLAGS_AES) == 0) { |
1315 | /* |
1316 | * We cannot order the ubsec as requested |
1317 | */ |
1318 | ubsecstats.hst_badalg++; |
1319 | err = EINVAL; |
1320 | goto errout; |
1321 | } |
1322 | flags |= htole16(UBS_PKTCTX_ENC_AES); |
1323 | switch (enccrd->crd_klen) { |
1324 | case 128: |
1325 | case 192: |
1326 | case 256: |
1327 | keylen = enccrd->crd_klen / 8; |
1328 | break; |
1329 | default: |
1330 | err = EINVAL; |
1331 | goto errout; |
1332 | } |
1333 | ivlen = 16; |
1334 | } else { |
1335 | flags |= htole16(UBS_PKTCTX_ENC_3DES); |
1336 | ivlen = 8; |
1337 | keylen = 24; |
1338 | } |
1339 | |
1340 | encoffset = enccrd->crd_skip; |
1341 | |
1342 | if (enccrd->crd_flags & CRD_F_ENCRYPT) { |
1343 | q->q_flags |= UBSEC_QFLAGS_COPYOUTIV; |
1344 | |
1345 | if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) |
1346 | memcpy(key.ses_iv, enccrd->crd_iv, ivlen); |
1347 | else { |
1348 | for (i = 0; i < (ivlen / 4); i++) |
1349 | key.ses_iv[i] = ses->ses_iv[i]; |
1350 | } |
1351 | |
1352 | if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) { |
1353 | if (crp->crp_flags & CRYPTO_F_IMBUF) |
1354 | m_copyback(q->q_src_m, |
1355 | enccrd->crd_inject, |
1356 | ivlen, (void *)key.ses_iv); |
1357 | else if (crp->crp_flags & CRYPTO_F_IOV) |
1358 | cuio_copyback(q->q_src_io, |
1359 | enccrd->crd_inject, |
1360 | ivlen, (void *)key.ses_iv); |
1361 | } |
1362 | } else { |
1363 | flags |= htole16(UBS_PKTCTX_INBOUND); |
1364 | |
1365 | if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) |
1366 | memcpy(key.ses_iv, enccrd->crd_iv, ivlen); |
1367 | else if (crp->crp_flags & CRYPTO_F_IMBUF) |
1368 | m_copydata(q->q_src_m, enccrd->crd_inject, |
1369 | ivlen, (void *)key.ses_iv); |
1370 | else if (crp->crp_flags & CRYPTO_F_IOV) |
1371 | cuio_copydata(q->q_src_io, |
1372 | enccrd->crd_inject, 8, |
1373 | (void *)key.ses_iv); |
1374 | } |
1375 | |
1376 | for (i = 0; i < (keylen / 4); i++) |
1377 | key.ses_key[i] = ses->ses_key[i]; |
1378 | for (i = 0; i < (ivlen / 4); i++) |
1379 | SWAP32(key.ses_iv[i]); |
1380 | } |
1381 | |
1382 | if (maccrd) { |
1383 | macoffset = maccrd->crd_skip; |
1384 | |
1385 | if (maccrd->crd_alg == CRYPTO_MD5_HMAC_96) |
1386 | flags |= htole16(UBS_PKTCTX_AUTH_MD5); |
1387 | else |
1388 | flags |= htole16(UBS_PKTCTX_AUTH_SHA1); |
1389 | |
1390 | for (i = 0; i < 5; i++) { |
1391 | key.ses_hminner[i] = ses->ses_hminner[i]; |
1392 | key.ses_hmouter[i] = ses->ses_hmouter[i]; |
1393 | |
1394 | HTOLE32(key.ses_hminner[i]); |
1395 | HTOLE32(key.ses_hmouter[i]); |
1396 | } |
1397 | } |
1398 | |
1399 | if (enccrd && maccrd) { |
1400 | /* |
1401 | * ubsec cannot handle packets where the end of encryption |
1402 | * and authentication are not the same, or where the |
1403 | * encrypted part begins before the authenticated part. |
1404 | */ |
1405 | if ((encoffset + enccrd->crd_len) != |
1406 | (macoffset + maccrd->crd_len)) { |
1407 | ubsecstats.hst_lenmismatch++; |
1408 | err = EINVAL; |
1409 | goto errout; |
1410 | } |
1411 | if (enccrd->crd_skip < maccrd->crd_skip) { |
1412 | ubsecstats.hst_skipmismatch++; |
1413 | err = EINVAL; |
1414 | goto errout; |
1415 | } |
1416 | sskip = maccrd->crd_skip; |
1417 | cpskip = dskip = enccrd->crd_skip; |
1418 | stheend = maccrd->crd_len; |
1419 | dtheend = enccrd->crd_len; |
1420 | coffset = enccrd->crd_skip - maccrd->crd_skip; |
1421 | cpoffset = cpskip + dtheend; |
1422 | #ifdef UBSEC_DEBUG |
1423 | if (ubsec_debug) { |
1424 | printf("mac: skip %d, len %d, inject %d\n" , |
1425 | maccrd->crd_skip, maccrd->crd_len, |
1426 | maccrd->crd_inject); |
1427 | printf("enc: skip %d, len %d, inject %d\n" , |
1428 | enccrd->crd_skip, enccrd->crd_len, |
1429 | enccrd->crd_inject); |
1430 | printf("src: skip %d, len %d\n" , sskip, stheend); |
1431 | printf("dst: skip %d, len %d\n" , dskip, dtheend); |
1432 | printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n" , |
1433 | coffset, stheend, cpskip, cpoffset); |
1434 | } |
1435 | #endif |
1436 | } else { |
1437 | cpskip = dskip = sskip = macoffset + encoffset; |
1438 | dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len; |
1439 | cpoffset = cpskip + dtheend; |
1440 | coffset = 0; |
1441 | } |
1442 | |
1443 | if (q->q_src_map == NULL) { |
1444 | /* XXX FIXME: jonathan asks, what the heck's that 0xfff0? */ |
1445 | if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER, |
1446 | 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) { |
1447 | err = ENOMEM; |
1448 | goto errout; |
1449 | } |
1450 | } |
1451 | if (crp->crp_flags & CRYPTO_F_IMBUF) { |
1452 | if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map, |
1453 | q->q_src_m, BUS_DMA_NOWAIT) != 0) { |
1454 | ubsecstats.hst_noload++; |
1455 | err = ENOMEM; |
1456 | goto errout; |
1457 | } |
1458 | } else if (crp->crp_flags & CRYPTO_F_IOV) { |
1459 | if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map, |
1460 | q->q_src_io, BUS_DMA_NOWAIT) != 0) { |
1461 | ubsecstats.hst_noload++; |
1462 | err = ENOMEM; |
1463 | goto errout; |
1464 | } |
1465 | } |
1466 | nicealign = ubsec_dmamap_aligned(q->q_src_map); |
1467 | |
1468 | dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend); |
1469 | |
1470 | #ifdef UBSEC_DEBUG |
1471 | if (ubsec_debug) |
1472 | printf("src skip: %d nicealign: %u\n" , sskip, nicealign); |
1473 | #endif |
1474 | for (i = j = 0; i < q->q_src_map->dm_nsegs; i++) { |
1475 | struct ubsec_pktbuf *pb; |
1476 | bus_size_t packl = q->q_src_map->dm_segs[i].ds_len; |
1477 | bus_addr_t packp = q->q_src_map->dm_segs[i].ds_addr; |
1478 | |
1479 | if (sskip >= packl) { |
1480 | sskip -= packl; |
1481 | continue; |
1482 | } |
1483 | |
1484 | packl -= sskip; |
1485 | packp += sskip; |
1486 | sskip = 0; |
1487 | |
1488 | if (packl > 0xfffc) { |
1489 | err = EIO; |
1490 | goto errout; |
1491 | } |
1492 | |
1493 | if (j == 0) |
1494 | pb = &dmap->d_dma->d_mcr.mcr_ipktbuf; |
1495 | else |
1496 | pb = &dmap->d_dma->d_sbuf[j - 1]; |
1497 | |
1498 | pb->pb_addr = htole32(packp); |
1499 | |
1500 | if (stheend) { |
1501 | if (packl > stheend) { |
1502 | pb->pb_len = htole32(stheend); |
1503 | stheend = 0; |
1504 | } else { |
1505 | pb->pb_len = htole32(packl); |
1506 | stheend -= packl; |
1507 | } |
1508 | } else |
1509 | pb->pb_len = htole32(packl); |
1510 | |
1511 | if ((i + 1) == q->q_src_map->dm_nsegs) |
1512 | pb->pb_next = 0; |
1513 | else |
1514 | pb->pb_next = htole32(dmap->d_alloc.dma_paddr + |
1515 | offsetof(struct ubsec_dmachunk, d_sbuf[j])); |
1516 | j++; |
1517 | } |
1518 | |
1519 | if (enccrd == NULL && maccrd != NULL) { |
1520 | dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0; |
1521 | dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0; |
1522 | dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr + |
1523 | offsetof(struct ubsec_dmachunk, d_macbuf[0])); |
1524 | #ifdef UBSEC_DEBUG |
1525 | if (ubsec_debug) |
1526 | printf("opkt: %x %x %x\n" , |
1527 | dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr, |
1528 | dmap->d_dma->d_mcr.mcr_opktbuf.pb_len, |
1529 | dmap->d_dma->d_mcr.mcr_opktbuf.pb_next); |
1530 | |
1531 | #endif |
1532 | } else { |
1533 | if (crp->crp_flags & CRYPTO_F_IOV) { |
1534 | if (!nicealign) { |
1535 | ubsecstats.hst_iovmisaligned++; |
1536 | err = EINVAL; |
1537 | goto errout; |
1538 | } |
1539 | if (q->q_dst_map == NULL) { |
1540 | if (q->q_cached_dst_map == NULL) { |
1541 | /* |
1542 | * XXX: ``what the heck's that'' |
1543 | * 0xfff0? |
1544 | */ |
1545 | if (bus_dmamap_create(sc->sc_dmat, |
1546 | 0xfff0, UBS_MAX_SCATTER, 0xfff0, 0, |
1547 | BUS_DMA_NOWAIT, |
1548 | &q->q_cached_dst_map) != 0) { |
1549 | ubsecstats.hst_nomap++; |
1550 | err = ENOMEM; |
1551 | goto errout; |
1552 | } |
1553 | } |
1554 | q->q_dst_map = q->q_cached_dst_map; |
1555 | } |
1556 | if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map, |
1557 | q->q_dst_io, BUS_DMA_NOWAIT) != 0) { |
1558 | ubsecstats.hst_noload++; |
1559 | err = ENOMEM; |
1560 | goto errout; |
1561 | } |
1562 | } else if (crp->crp_flags & CRYPTO_F_IMBUF) { |
1563 | if (nicealign) { |
1564 | q->q_dst_m = q->q_src_m; |
1565 | q->q_dst_map = q->q_src_map; |
1566 | } else { |
1567 | int totlen, len; |
1568 | struct mbuf *m, *top, **mp; |
1569 | |
1570 | ubsecstats.hst_unaligned++; |
1571 | totlen = q->q_src_map->dm_mapsize; |
1572 | if (q->q_src_m->m_flags & M_PKTHDR) { |
1573 | len = MHLEN; |
1574 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1575 | /*XXX FIXME: m_dup_pkthdr */ |
1576 | if (m && 1 /*!m_dup_pkthdr(m, q->q_src_m, M_DONTWAIT)*/) { |
1577 | m_free(m); |
1578 | m = NULL; |
1579 | } |
1580 | } else { |
1581 | len = MLEN; |
1582 | MGET(m, M_DONTWAIT, MT_DATA); |
1583 | } |
1584 | if (m == NULL) { |
1585 | ubsecstats.hst_nombuf++; |
1586 | err = sc->sc_nqueue ? ERESTART : ENOMEM; |
1587 | goto errout; |
1588 | } |
1589 | if (len == MHLEN) |
1590 | /*XXX was M_DUP_PKTHDR*/ |
1591 | M_COPY_PKTHDR(m, q->q_src_m); |
1592 | if (totlen >= MINCLSIZE) { |
1593 | MCLGET(m, M_DONTWAIT); |
1594 | if ((m->m_flags & M_EXT) == 0) { |
1595 | m_free(m); |
1596 | ubsecstats.hst_nomcl++; |
1597 | err = sc->sc_nqueue |
1598 | ? ERESTART : ENOMEM; |
1599 | goto errout; |
1600 | } |
1601 | len = MCLBYTES; |
1602 | } |
1603 | m->m_len = len; |
1604 | top = NULL; |
1605 | mp = ⊤ |
1606 | |
1607 | while (totlen > 0) { |
1608 | if (top) { |
1609 | MGET(m, M_DONTWAIT, MT_DATA); |
1610 | if (m == NULL) { |
1611 | m_freem(top); |
1612 | ubsecstats.hst_nombuf++; |
1613 | err = sc->sc_nqueue ? ERESTART : ENOMEM; |
1614 | goto errout; |
1615 | } |
1616 | len = MLEN; |
1617 | } |
1618 | if (top && totlen >= MINCLSIZE) { |
1619 | MCLGET(m, M_DONTWAIT); |
1620 | if ((m->m_flags & M_EXT) == 0) { |
1621 | *mp = m; |
1622 | m_freem(top); |
1623 | ubsecstats.hst_nomcl++; |
1624 | err = sc->sc_nqueue ? ERESTART : ENOMEM; |
1625 | goto errout; |
1626 | } |
1627 | len = MCLBYTES; |
1628 | } |
1629 | m->m_len = len = min(totlen, len); |
1630 | totlen -= len; |
1631 | *mp = m; |
1632 | mp = &m->m_next; |
1633 | } |
1634 | q->q_dst_m = top; |
1635 | ubsec_mcopy(q->q_src_m, q->q_dst_m, |
1636 | cpskip, cpoffset); |
1637 | if (q->q_dst_map == NULL) { |
1638 | if (q->q_cached_dst_map == NULL) { |
1639 | /* XXX again, what the heck is that 0xfff0? */ |
1640 | if (bus_dmamap_create(sc->sc_dmat, 0xfff0, |
1641 | UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT, |
1642 | &q->q_cached_dst_map) != 0) { |
1643 | ubsecstats.hst_nomap++; |
1644 | err = ENOMEM; |
1645 | goto errout; |
1646 | } |
1647 | } |
1648 | q->q_dst_map = q->q_cached_dst_map; |
1649 | } |
1650 | if (bus_dmamap_load_mbuf(sc->sc_dmat, |
1651 | q->q_dst_map, q->q_dst_m, |
1652 | BUS_DMA_NOWAIT) != 0) { |
1653 | ubsecstats.hst_noload++; |
1654 | err = ENOMEM; |
1655 | goto errout; |
1656 | } |
1657 | } |
1658 | } else { |
1659 | ubsecstats.hst_badflags++; |
1660 | err = EINVAL; |
1661 | goto errout; |
1662 | } |
1663 | |
1664 | #ifdef UBSEC_DEBUG |
1665 | if (ubsec_debug) |
1666 | printf("dst skip: %d\n" , dskip); |
1667 | #endif |
1668 | for (i = j = 0; i < q->q_dst_map->dm_nsegs; i++) { |
1669 | struct ubsec_pktbuf *pb; |
1670 | bus_size_t packl = q->q_dst_map->dm_segs[i].ds_len; |
1671 | bus_addr_t packp = q->q_dst_map->dm_segs[i].ds_addr; |
1672 | |
1673 | if (dskip >= packl) { |
1674 | dskip -= packl; |
1675 | continue; |
1676 | } |
1677 | |
1678 | packl -= dskip; |
1679 | packp += dskip; |
1680 | dskip = 0; |
1681 | |
1682 | if (packl > 0xfffc) { |
1683 | err = EIO; |
1684 | goto errout; |
1685 | } |
1686 | |
1687 | if (j == 0) |
1688 | pb = &dmap->d_dma->d_mcr.mcr_opktbuf; |
1689 | else |
1690 | pb = &dmap->d_dma->d_dbuf[j - 1]; |
1691 | |
1692 | pb->pb_addr = htole32(packp); |
1693 | |
1694 | if (dtheend) { |
1695 | if (packl > dtheend) { |
1696 | pb->pb_len = htole32(dtheend); |
1697 | dtheend = 0; |
1698 | } else { |
1699 | pb->pb_len = htole32(packl); |
1700 | dtheend -= packl; |
1701 | } |
1702 | } else |
1703 | pb->pb_len = htole32(packl); |
1704 | |
1705 | if ((i + 1) == q->q_dst_map->dm_nsegs) { |
1706 | if (maccrd) |
1707 | pb->pb_next = htole32(dmap->d_alloc.dma_paddr + |
1708 | offsetof(struct ubsec_dmachunk, d_macbuf[0])); |
1709 | else |
1710 | pb->pb_next = 0; |
1711 | } else |
1712 | pb->pb_next = htole32(dmap->d_alloc.dma_paddr + |
1713 | offsetof(struct ubsec_dmachunk, d_dbuf[j])); |
1714 | j++; |
1715 | } |
1716 | } |
1717 | |
1718 | dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr + |
1719 | offsetof(struct ubsec_dmachunk, d_ctx)); |
1720 | |
1721 | if (enccrd && enccrd->crd_alg == CRYPTO_AES_CBC) { |
1722 | struct ubsec_pktctx_aes128 *aes128; |
1723 | struct ubsec_pktctx_aes192 *aes192; |
1724 | struct ubsec_pktctx_aes256 *aes256; |
1725 | struct ubsec_pktctx_hdr *ph; |
1726 | u_int8_t *ctx; |
1727 | |
1728 | ctx = (u_int8_t *)(dmap->d_alloc.dma_vaddr) + |
1729 | offsetof(struct ubsec_dmachunk, d_ctx); |
1730 | |
1731 | ph = (struct ubsec_pktctx_hdr *)ctx; |
1732 | ph->ph_type = htole16(UBS_PKTCTX_TYPE_IPSEC_AES); |
1733 | ph->ph_flags = flags; |
1734 | ph->ph_offset = htole16(coffset >> 2); |
1735 | |
1736 | switch (enccrd->crd_klen) { |
1737 | case 128: |
1738 | aes128 = (struct ubsec_pktctx_aes128 *)ctx; |
1739 | ph->ph_len = htole16(sizeof(*aes128)); |
1740 | ph->ph_flags |= htole16(UBS_PKTCTX_KEYSIZE_128); |
1741 | for (i = 0; i < 4; i++) |
1742 | aes128->pc_aeskey[i] = key.ses_key[i]; |
1743 | for (i = 0; i < 5; i++) |
1744 | aes128->pc_hminner[i] = key.ses_hminner[i]; |
1745 | for (i = 0; i < 5; i++) |
1746 | aes128->pc_hmouter[i] = key.ses_hmouter[i]; |
1747 | for (i = 0; i < 4; i++) |
1748 | aes128->pc_iv[i] = key.ses_iv[i]; |
1749 | break; |
1750 | case 192: |
1751 | aes192 = (struct ubsec_pktctx_aes192 *)ctx; |
1752 | ph->ph_len = htole16(sizeof(*aes192)); |
1753 | ph->ph_flags |= htole16(UBS_PKTCTX_KEYSIZE_192); |
1754 | for (i = 0; i < 6; i++) |
1755 | aes192->pc_aeskey[i] = key.ses_key[i]; |
1756 | for (i = 0; i < 5; i++) |
1757 | aes192->pc_hminner[i] = key.ses_hminner[i]; |
1758 | for (i = 0; i < 5; i++) |
1759 | aes192->pc_hmouter[i] = key.ses_hmouter[i]; |
1760 | for (i = 0; i < 4; i++) |
1761 | aes192->pc_iv[i] = key.ses_iv[i]; |
1762 | break; |
1763 | case 256: |
1764 | aes256 = (struct ubsec_pktctx_aes256 *)ctx; |
1765 | ph->ph_len = htole16(sizeof(*aes256)); |
1766 | ph->ph_flags |= htole16(UBS_PKTCTX_KEYSIZE_256); |
1767 | for (i = 0; i < 8; i++) |
1768 | aes256->pc_aeskey[i] = key.ses_key[i]; |
1769 | for (i = 0; i < 5; i++) |
1770 | aes256->pc_hminner[i] = key.ses_hminner[i]; |
1771 | for (i = 0; i < 5; i++) |
1772 | aes256->pc_hmouter[i] = key.ses_hmouter[i]; |
1773 | for (i = 0; i < 4; i++) |
1774 | aes256->pc_iv[i] = key.ses_iv[i]; |
1775 | break; |
1776 | } |
1777 | } else if (sc->sc_flags & UBS_FLAGS_LONGCTX) { |
1778 | struct ubsec_pktctx_3des *ctx; |
1779 | struct ubsec_pktctx_hdr *ph; |
1780 | |
1781 | ctx = (struct ubsec_pktctx_3des *) |
1782 | ((u_int8_t *)(dmap->d_alloc.dma_vaddr) + |
1783 | offsetof(struct ubsec_dmachunk, d_ctx)); |
1784 | |
1785 | ph = (struct ubsec_pktctx_hdr *)ctx; |
1786 | ph->ph_len = htole16(sizeof(*ctx)); |
1787 | ph->ph_type = htole16(UBS_PKTCTX_TYPE_IPSEC_3DES); |
1788 | ph->ph_flags = flags; |
1789 | ph->ph_offset = htole16(coffset >> 2); |
1790 | |
1791 | for (i = 0; i < 6; i++) |
1792 | ctx->pc_deskey[i] = key.ses_key[i]; |
1793 | for (i = 0; i < 5; i++) |
1794 | ctx->pc_hminner[i] = key.ses_hminner[i]; |
1795 | for (i = 0; i < 5; i++) |
1796 | ctx->pc_hmouter[i] = key.ses_hmouter[i]; |
1797 | for (i = 0; i < 2; i++) |
1798 | ctx->pc_iv[i] = key.ses_iv[i]; |
1799 | } else { |
1800 | struct ubsec_pktctx *ctx = (struct ubsec_pktctx *) |
1801 | ((u_int8_t *)dmap->d_alloc.dma_vaddr + |
1802 | offsetof(struct ubsec_dmachunk, d_ctx)); |
1803 | |
1804 | ctx->pc_flags = flags; |
1805 | ctx->pc_offset = htole16(coffset >> 2); |
1806 | for (i = 0; i < 6; i++) |
1807 | ctx->pc_deskey[i] = key.ses_key[i]; |
1808 | for (i = 0; i < 5; i++) |
1809 | ctx->pc_hminner[i] = key.ses_hminner[i]; |
1810 | for (i = 0; i < 5; i++) |
1811 | ctx->pc_hmouter[i] = key.ses_hmouter[i]; |
1812 | for (i = 0; i < 2; i++) |
1813 | ctx->pc_iv[i] = key.ses_iv[i]; |
1814 | } |
1815 | |
1816 | mutex_spin_enter(&sc->sc_mtx); |
1817 | SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next); |
1818 | sc->sc_nqueue++; |
1819 | ubsecstats.hst_ipackets++; |
1820 | ubsecstats.hst_ibytes += dmap->d_alloc.dma_map->dm_mapsize; |
1821 | if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= ubsec_maxbatch) |
1822 | ubsec_feed(sc); |
1823 | mutex_spin_exit(&sc->sc_mtx); |
1824 | return (0); |
1825 | |
1826 | errout: |
1827 | if (q != NULL) { |
1828 | if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) |
1829 | m_freem(q->q_dst_m); |
1830 | |
1831 | if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { |
1832 | bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); |
1833 | } |
1834 | if (q->q_src_map != NULL) { |
1835 | bus_dmamap_unload(sc->sc_dmat, q->q_src_map); |
1836 | } |
1837 | |
1838 | mutex_spin_enter(&sc->sc_mtx); |
1839 | SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); |
1840 | mutex_spin_exit(&sc->sc_mtx); |
1841 | } |
1842 | #if 0 /* jonathan says: this openbsd code seems to be subsumed elsewhere */ |
1843 | if (err == EINVAL) |
1844 | ubsecstats.hst_invalid++; |
1845 | else |
1846 | ubsecstats.hst_nomem++; |
1847 | #endif |
1848 | if (err != ERESTART) { |
1849 | crp->crp_etype = err; |
1850 | crypto_done(crp); |
1851 | } else { |
1852 | sc->sc_needwakeup |= CRYPTO_SYMQ; |
1853 | } |
1854 | return (err); |
1855 | } |
1856 | |
1857 | static void |
1858 | ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q) |
1859 | { |
1860 | struct cryptop *crp = (struct cryptop *)q->q_crp; |
1861 | struct cryptodesc *crd; |
1862 | struct ubsec_dma *dmap = q->q_dma; |
1863 | |
1864 | ubsecstats.hst_opackets++; |
1865 | ubsecstats.hst_obytes += dmap->d_alloc.dma_size; |
1866 | |
1867 | bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0, |
1868 | dmap->d_alloc.dma_map->dm_mapsize, |
1869 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
1870 | if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) { |
1871 | bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, |
1872 | 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
1873 | bus_dmamap_unload(sc->sc_dmat, q->q_dst_map); |
1874 | } |
1875 | bus_dmamap_sync(sc->sc_dmat, q->q_src_map, |
1876 | 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
1877 | bus_dmamap_unload(sc->sc_dmat, q->q_src_map); |
1878 | |
1879 | if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) { |
1880 | m_freem(q->q_src_m); |
1881 | crp->crp_buf = (void *)q->q_dst_m; |
1882 | } |
1883 | |
1884 | /* copy out IV for future use */ |
1885 | if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) { |
1886 | for (crd = crp->crp_desc; crd; crd = crd->crd_next) { |
1887 | if (crd->crd_alg != CRYPTO_DES_CBC && |
1888 | crd->crd_alg != CRYPTO_3DES_CBC && |
1889 | crd->crd_alg != CRYPTO_AES_CBC) |
1890 | continue; |
1891 | if (crp->crp_flags & CRYPTO_F_IMBUF) |
1892 | m_copydata((struct mbuf *)crp->crp_buf, |
1893 | crd->crd_skip + crd->crd_len - 8, 8, |
1894 | (void *)sc->sc_sessions[q->q_sesn].ses_iv); |
1895 | else if (crp->crp_flags & CRYPTO_F_IOV) { |
1896 | cuio_copydata((struct uio *)crp->crp_buf, |
1897 | crd->crd_skip + crd->crd_len - 8, 8, |
1898 | (void *)sc->sc_sessions[q->q_sesn].ses_iv); |
1899 | } |
1900 | break; |
1901 | } |
1902 | } |
1903 | |
1904 | for (crd = crp->crp_desc; crd; crd = crd->crd_next) { |
1905 | if (crd->crd_alg != CRYPTO_MD5_HMAC_96 && |
1906 | crd->crd_alg != CRYPTO_SHA1_HMAC_96) |
1907 | continue; |
1908 | if (crp->crp_flags & CRYPTO_F_IMBUF) |
1909 | m_copyback((struct mbuf *)crp->crp_buf, |
1910 | crd->crd_inject, 12, |
1911 | (void *)dmap->d_dma->d_macbuf); |
1912 | else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) |
1913 | bcopy((void *)dmap->d_dma->d_macbuf, |
1914 | crp->crp_mac, 12); |
1915 | break; |
1916 | } |
1917 | SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); |
1918 | crypto_done(crp); |
1919 | } |
1920 | |
1921 | static void |
1922 | ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset) |
1923 | { |
1924 | int i, j, dlen, slen; |
1925 | char *dptr, *sptr; |
1926 | |
1927 | j = 0; |
1928 | sptr = srcm->m_data; |
1929 | slen = srcm->m_len; |
1930 | dptr = dstm->m_data; |
1931 | dlen = dstm->m_len; |
1932 | |
1933 | while (1) { |
1934 | for (i = 0; i < min(slen, dlen); i++) { |
1935 | if (j < hoffset || j >= toffset) |
1936 | *dptr++ = *sptr++; |
1937 | slen--; |
1938 | dlen--; |
1939 | j++; |
1940 | } |
1941 | if (slen == 0) { |
1942 | srcm = srcm->m_next; |
1943 | if (srcm == NULL) |
1944 | return; |
1945 | sptr = srcm->m_data; |
1946 | slen = srcm->m_len; |
1947 | } |
1948 | if (dlen == 0) { |
1949 | dstm = dstm->m_next; |
1950 | if (dstm == NULL) |
1951 | return; |
1952 | dptr = dstm->m_data; |
1953 | dlen = dstm->m_len; |
1954 | } |
1955 | } |
1956 | } |
1957 | |
1958 | /* |
1959 | * feed the key generator, must be called at splnet() or higher. |
1960 | */ |
1961 | static void |
1962 | ubsec_feed2(struct ubsec_softc *sc) |
1963 | { |
1964 | struct ubsec_q2 *q; |
1965 | |
1966 | while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) { |
1967 | if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL) |
1968 | break; |
1969 | q = SIMPLEQ_FIRST(&sc->sc_queue2); |
1970 | |
1971 | bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0, |
1972 | q->q_mcr.dma_map->dm_mapsize, |
1973 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
1974 | bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, |
1975 | q->q_ctx.dma_map->dm_mapsize, |
1976 | BUS_DMASYNC_PREWRITE); |
1977 | |
1978 | WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr); |
1979 | q = SIMPLEQ_FIRST(&sc->sc_queue2); |
1980 | SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, /*q,*/ q_next); |
1981 | --sc->sc_nqueue2; |
1982 | SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next); |
1983 | } |
1984 | } |
1985 | |
1986 | /* |
1987 | * feed the RNG (used instead of ubsec_feed2() on 5827+ devices) |
1988 | */ |
1989 | void |
1990 | ubsec_feed4(struct ubsec_softc *sc) |
1991 | { |
1992 | struct ubsec_q2 *q; |
1993 | |
1994 | while (!SIMPLEQ_EMPTY(&sc->sc_queue4)) { |
1995 | if (READ_REG(sc, BS_STAT) & BS_STAT_MCR4_FULL) |
1996 | break; |
1997 | q = SIMPLEQ_FIRST(&sc->sc_queue4); |
1998 | |
1999 | bus_dmamap_sync(sc->sc_dmat, q->q_mcr.dma_map, 0, |
2000 | q->q_mcr.dma_map->dm_mapsize, |
2001 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
2002 | bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, |
2003 | q->q_ctx.dma_map->dm_mapsize, |
2004 | BUS_DMASYNC_PREWRITE); |
2005 | |
2006 | WRITE_REG(sc, BS_MCR4, q->q_mcr.dma_paddr); |
2007 | SIMPLEQ_REMOVE_HEAD(&sc->sc_queue4, q_next); |
2008 | --sc->sc_nqueue4; |
2009 | SIMPLEQ_INSERT_TAIL(&sc->sc_qchip4, q, q_next); |
2010 | } |
2011 | } |
2012 | |
2013 | /* |
2014 | * Callback for handling random numbers |
2015 | */ |
2016 | static void |
2017 | ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q) |
2018 | { |
2019 | struct cryptkop *krp; |
2020 | struct ubsec_ctx_keyop *ctx; |
2021 | |
2022 | ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr; |
2023 | bus_dmamap_sync(sc->sc_dmat, q->q_ctx.dma_map, 0, |
2024 | q->q_ctx.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
2025 | |
2026 | switch (q->q_type) { |
2027 | #ifndef UBSEC_NO_RNG |
2028 | case UBS_CTXOP_RNGSHA1: |
2029 | case UBS_CTXOP_RNGBYPASS: { |
2030 | struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q; |
2031 | u_int32_t *p; |
2032 | int i; |
2033 | |
2034 | bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, |
2035 | rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
2036 | p = (u_int32_t *)rng->rng_buf.dma_vaddr; |
2037 | #ifndef __NetBSD__ |
2038 | for (i = 0; i < UBSEC_RNG_BUFSIZ; p++, i++) |
2039 | add_true_randomness(letoh32(*p)); |
2040 | #else |
2041 | i = UBSEC_RNG_BUFSIZ * sizeof(u_int32_t); |
2042 | rnd_add_data(&sc->sc_rnd_source, (char *)p, i, i * NBBY); |
2043 | sc->sc_rng_need -= i; |
2044 | #endif |
2045 | rng->rng_used = 0; |
2046 | #ifdef __OpenBSD__ |
2047 | timeout_add(&sc->sc_rngto, sc->sc_rnghz); |
2048 | #else |
2049 | if (sc->sc_rng_need > 0) { |
2050 | callout_schedule(&sc->sc_rngto, sc->sc_rnghz); |
2051 | } |
2052 | #endif |
2053 | break; |
2054 | } |
2055 | #endif |
2056 | case UBS_CTXOP_MODEXP: { |
2057 | struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; |
2058 | u_int rlen, clen; |
2059 | |
2060 | krp = me->me_krp; |
2061 | rlen = (me->me_modbits + 7) / 8; |
2062 | clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8; |
2063 | |
2064 | bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, |
2065 | 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
2066 | bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, |
2067 | 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
2068 | bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, |
2069 | 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
2070 | bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, |
2071 | 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
2072 | |
2073 | if (clen < rlen) |
2074 | krp->krp_status = E2BIG; |
2075 | else { |
2076 | if (sc->sc_flags & UBS_FLAGS_HWNORM) { |
2077 | memset(krp->krp_param[krp->krp_iparams].crp_p, 0, |
2078 | (krp->krp_param[krp->krp_iparams].crp_nbits |
2079 | + 7) / 8); |
2080 | bcopy(me->me_C.dma_vaddr, |
2081 | krp->krp_param[krp->krp_iparams].crp_p, |
2082 | (me->me_modbits + 7) / 8); |
2083 | } else |
2084 | ubsec_kshift_l(me->me_shiftbits, |
2085 | me->me_C.dma_vaddr, me->me_normbits, |
2086 | krp->krp_param[krp->krp_iparams].crp_p, |
2087 | krp->krp_param[krp->krp_iparams].crp_nbits); |
2088 | } |
2089 | |
2090 | crypto_kdone(krp); |
2091 | |
2092 | /* bzero all potentially sensitive data */ |
2093 | memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); |
2094 | memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); |
2095 | memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); |
2096 | memset(me->me_q.q_ctx.dma_vaddr, 0, me->me_q.q_ctx.dma_size); |
2097 | |
2098 | /* Can't free here, so put us on the free list. */ |
2099 | SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next); |
2100 | break; |
2101 | } |
2102 | case UBS_CTXOP_RSAPRIV: { |
2103 | struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; |
2104 | u_int len; |
2105 | |
2106 | krp = rp->rpr_krp; |
2107 | bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, 0, |
2108 | rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); |
2109 | bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, 0, |
2110 | rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); |
2111 | |
2112 | len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) |
2113 | / 8; |
2114 | bcopy(rp->rpr_msgout.dma_vaddr, |
2115 | krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len); |
2116 | |
2117 | crypto_kdone(krp); |
2118 | |
2119 | memset(rp->rpr_msgin.dma_vaddr, 0, rp->rpr_msgin.dma_size); |
2120 | memset(rp->rpr_msgout.dma_vaddr, 0, rp->rpr_msgout.dma_size); |
2121 | memset(rp->rpr_q.q_ctx.dma_vaddr, 0, rp->rpr_q.q_ctx.dma_size); |
2122 | |
2123 | /* Can't free here, so put us on the free list. */ |
2124 | SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next); |
2125 | break; |
2126 | } |
2127 | default: |
2128 | printf("%s: unknown ctx op: %x\n" , device_xname(sc->sc_dev), |
2129 | letoh16(ctx->ctx_op)); |
2130 | break; |
2131 | } |
2132 | } |
2133 | |
2134 | #ifndef UBSEC_NO_RNG |
2135 | |
2136 | static void |
2137 | ubsec_rng_get(size_t bytes, void *vsc) |
2138 | { |
2139 | struct ubsec_softc *sc = vsc; |
2140 | |
2141 | mutex_spin_enter(&sc->sc_mtx); |
2142 | sc->sc_rng_need = bytes; |
2143 | ubsec_rng_locked(sc); |
2144 | mutex_spin_exit(&sc->sc_mtx); |
2145 | |
2146 | } |
2147 | |
2148 | static void |
2149 | ubsec_rng(void *vsc) |
2150 | { |
2151 | struct ubsec_softc *sc = vsc; |
2152 | mutex_spin_enter(&sc->sc_mtx); |
2153 | ubsec_rng_locked(sc); |
2154 | mutex_spin_exit(&sc->sc_mtx); |
2155 | } |
2156 | |
2157 | static void |
2158 | ubsec_rng_locked(void *vsc) |
2159 | { |
2160 | struct ubsec_softc *sc = vsc; |
2161 | struct ubsec_q2_rng *rng = &sc->sc_rng; |
2162 | struct ubsec_mcr *mcr; |
2163 | struct ubsec_ctx_rngbypass *ctx; |
2164 | int *nqueue; |
2165 | |
2166 | /* Caller is responsible to lock and release sc_mtx. */ |
2167 | KASSERT(mutex_owned(&sc->sc_mtx)); |
2168 | |
2169 | if (rng->rng_used) { |
2170 | return; |
2171 | } |
2172 | |
2173 | if (sc->sc_rng_need < 1) { |
2174 | callout_stop(&sc->sc_rngto); |
2175 | return; |
2176 | } |
2177 | |
2178 | if (sc->sc_flags & UBS_FLAGS_RNG4) |
2179 | nqueue = &sc->sc_nqueue4; |
2180 | else |
2181 | nqueue = &sc->sc_nqueue2; |
2182 | |
2183 | (*nqueue)++; |
2184 | if (*nqueue >= UBS_MAX_NQUEUE) |
2185 | goto out; |
2186 | |
2187 | mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr; |
2188 | ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr; |
2189 | |
2190 | mcr->mcr_pkts = htole16(1); |
2191 | mcr->mcr_flags = 0; |
2192 | mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr); |
2193 | mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0; |
2194 | mcr->mcr_ipktbuf.pb_len = 0; |
2195 | mcr->mcr_reserved = mcr->mcr_pktlen = 0; |
2196 | mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr); |
2197 | mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) & |
2198 | UBS_PKTBUF_LEN); |
2199 | mcr->mcr_opktbuf.pb_next = 0; |
2200 | |
2201 | ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass)); |
2202 | ctx->rbp_op = htole16(UBS_CTXOP_RNGSHA1); |
2203 | rng->rng_q.q_type = UBS_CTXOP_RNGSHA1; |
2204 | |
2205 | bus_dmamap_sync(sc->sc_dmat, rng->rng_buf.dma_map, 0, |
2206 | rng->rng_buf.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
2207 | |
2208 | if (sc->sc_flags & UBS_FLAGS_RNG4) { |
2209 | SIMPLEQ_INSERT_TAIL(&sc->sc_queue4, &rng->rng_q, q_next); |
2210 | ubsec_feed4(sc); |
2211 | } else { |
2212 | SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next); |
2213 | ubsec_feed2(sc); |
2214 | } |
2215 | rng->rng_used = 1; |
2216 | ubsecstats.hst_rng++; |
2217 | |
2218 | return; |
2219 | |
2220 | out: |
2221 | /* |
2222 | * Something weird happened, generate our own call back. |
2223 | */ |
2224 | (*nqueue)--; |
2225 | #ifdef __OpenBSD__ |
2226 | timeout_add(&sc->sc_rngto, sc->sc_rnghz); |
2227 | #else |
2228 | callout_schedule(&sc->sc_rngto, sc->sc_rnghz); |
2229 | #endif |
2230 | } |
2231 | #endif /* UBSEC_NO_RNG */ |
2232 | |
2233 | static int |
2234 | ubsec_dma_malloc(struct ubsec_softc *sc, bus_size_t size, |
2235 | struct ubsec_dma_alloc *dma,int mapflags) |
2236 | { |
2237 | int r; |
2238 | |
2239 | if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, |
2240 | &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) != 0) |
2241 | goto fail_0; |
2242 | |
2243 | if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, |
2244 | size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) |
2245 | goto fail_1; |
2246 | |
2247 | if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, |
2248 | BUS_DMA_NOWAIT, &dma->dma_map)) != 0) |
2249 | goto fail_2; |
2250 | |
2251 | if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, |
2252 | size, NULL, BUS_DMA_NOWAIT)) != 0) |
2253 | goto fail_3; |
2254 | |
2255 | dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; |
2256 | dma->dma_size = size; |
2257 | return (0); |
2258 | |
2259 | fail_3: |
2260 | bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); |
2261 | fail_2: |
2262 | bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); |
2263 | fail_1: |
2264 | bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); |
2265 | fail_0: |
2266 | dma->dma_map = NULL; |
2267 | return (r); |
2268 | } |
2269 | |
2270 | static void |
2271 | ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma) |
2272 | { |
2273 | bus_dmamap_unload(sc->sc_dmat, dma->dma_map); |
2274 | bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_size); |
2275 | bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); |
2276 | bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); |
2277 | } |
2278 | |
2279 | /* |
2280 | * Resets the board. Values in the regesters are left as is |
2281 | * from the reset (i.e. initial values are assigned elsewhere). |
2282 | */ |
2283 | static void |
2284 | ubsec_reset_board(struct ubsec_softc *sc) |
2285 | { |
2286 | volatile u_int32_t ctrl; |
2287 | |
2288 | ctrl = READ_REG(sc, BS_CTRL); |
2289 | ctrl |= BS_CTRL_RESET; |
2290 | WRITE_REG(sc, BS_CTRL, ctrl); |
2291 | |
2292 | /* |
2293 | * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us |
2294 | */ |
2295 | DELAY(10); |
2296 | |
2297 | /* Enable RNG and interrupts on newer devices */ |
2298 | if (sc->sc_flags & UBS_FLAGS_MULTIMCR) { |
2299 | #ifndef UBSEC_NO_RNG |
2300 | WRITE_REG(sc, BS_CFG, BS_CFG_RNG); |
2301 | #endif |
2302 | WRITE_REG(sc, BS_INT, BS_INT_DMAINT); |
2303 | } |
2304 | } |
2305 | |
2306 | /* |
2307 | * Init Broadcom registers |
2308 | */ |
2309 | static void |
2310 | ubsec_init_board(struct ubsec_softc *sc) |
2311 | { |
2312 | u_int32_t ctrl; |
2313 | |
2314 | ctrl = READ_REG(sc, BS_CTRL); |
2315 | ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64); |
2316 | ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT; |
2317 | |
2318 | /* |
2319 | * XXX: Sam Leffler's code has (UBS_FLAGS_KEY|UBS_FLAGS_RNG)). |
2320 | * anyone got hw docs? |
2321 | */ |
2322 | if (sc->sc_flags & UBS_FLAGS_KEY) |
2323 | ctrl |= BS_CTRL_MCR2INT; |
2324 | else |
2325 | ctrl &= ~BS_CTRL_MCR2INT; |
2326 | |
2327 | if (sc->sc_flags & UBS_FLAGS_HWNORM) |
2328 | ctrl &= ~BS_CTRL_SWNORM; |
2329 | |
2330 | if (sc->sc_flags & UBS_FLAGS_MULTIMCR) { |
2331 | ctrl |= BS_CTRL_BSIZE240; |
2332 | ctrl &= ~BS_CTRL_MCR3INT; /* MCR3 is reserved for SSL */ |
2333 | |
2334 | if (sc->sc_flags & UBS_FLAGS_RNG4) |
2335 | ctrl |= BS_CTRL_MCR4INT; |
2336 | else |
2337 | ctrl &= ~BS_CTRL_MCR4INT; |
2338 | } |
2339 | |
2340 | WRITE_REG(sc, BS_CTRL, ctrl); |
2341 | } |
2342 | |
2343 | /* |
2344 | * Init Broadcom PCI registers |
2345 | */ |
2346 | static void |
2347 | ubsec_init_pciregs(struct pci_attach_args *pa) |
2348 | { |
2349 | pci_chipset_tag_t pc = pa->pa_pc; |
2350 | u_int32_t misc; |
2351 | |
2352 | /* |
2353 | * This will set the cache line size to 1, this will |
2354 | * force the BCM58xx chip just to do burst read/writes. |
2355 | * Cache line read/writes are to slow |
2356 | */ |
2357 | misc = pci_conf_read(pc, pa->pa_tag, PCI_BHLC_REG); |
2358 | misc = (misc & ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT)) |
2359 | | ((UBS_DEF_CACHELINE & 0xff) << PCI_CACHELINE_SHIFT); |
2360 | pci_conf_write(pc, pa->pa_tag, PCI_BHLC_REG, misc); |
2361 | } |
2362 | |
2363 | /* |
2364 | * Clean up after a chip crash. |
2365 | * It is assumed that the caller in splnet() |
2366 | */ |
2367 | static void |
2368 | ubsec_cleanchip(struct ubsec_softc *sc) |
2369 | { |
2370 | struct ubsec_q *q; |
2371 | |
2372 | while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) { |
2373 | q = SIMPLEQ_FIRST(&sc->sc_qchip); |
2374 | SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, /*q,*/ q_next); |
2375 | ubsec_free_q(sc, q); |
2376 | } |
2377 | sc->sc_nqchip = 0; |
2378 | } |
2379 | |
2380 | /* |
2381 | * free a ubsec_q |
2382 | * It is assumed that the caller is within splnet() |
2383 | */ |
2384 | static int |
2385 | ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q) |
2386 | { |
2387 | struct ubsec_q *q2; |
2388 | struct cryptop *crp; |
2389 | int npkts; |
2390 | int i; |
2391 | |
2392 | npkts = q->q_nstacked_mcrs; |
2393 | |
2394 | for (i = 0; i < npkts; i++) { |
2395 | if(q->q_stacked_mcr[i]) { |
2396 | q2 = q->q_stacked_mcr[i]; |
2397 | |
2398 | if ((q2->q_dst_m != NULL) |
2399 | && (q2->q_src_m != q2->q_dst_m)) |
2400 | m_freem(q2->q_dst_m); |
2401 | |
2402 | crp = (struct cryptop *)q2->q_crp; |
2403 | |
2404 | SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next); |
2405 | |
2406 | crp->crp_etype = EFAULT; |
2407 | crypto_done(crp); |
2408 | } else { |
2409 | break; |
2410 | } |
2411 | } |
2412 | |
2413 | /* |
2414 | * Free header MCR |
2415 | */ |
2416 | if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m)) |
2417 | m_freem(q->q_dst_m); |
2418 | |
2419 | crp = (struct cryptop *)q->q_crp; |
2420 | |
2421 | SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); |
2422 | |
2423 | crp->crp_etype = EFAULT; |
2424 | crypto_done(crp); |
2425 | return(0); |
2426 | } |
2427 | |
2428 | /* |
2429 | * Routine to reset the chip and clean up. |
2430 | * It is assumed that the caller is in splnet() |
2431 | */ |
2432 | static void |
2433 | ubsec_totalreset(struct ubsec_softc *sc) |
2434 | { |
2435 | ubsec_reset_board(sc); |
2436 | ubsec_init_board(sc); |
2437 | ubsec_cleanchip(sc); |
2438 | } |
2439 | |
2440 | static int |
2441 | ubsec_dmamap_aligned(bus_dmamap_t map) |
2442 | { |
2443 | int i; |
2444 | |
2445 | for (i = 0; i < map->dm_nsegs; i++) { |
2446 | if (map->dm_segs[i].ds_addr & 3) |
2447 | return (0); |
2448 | if ((i != (map->dm_nsegs - 1)) && |
2449 | (map->dm_segs[i].ds_len & 3)) |
2450 | return (0); |
2451 | } |
2452 | return (1); |
2453 | } |
2454 | |
2455 | #ifdef __OpenBSD__ |
2456 | struct ubsec_softc * |
2457 | ubsec_kfind(struct cryptkop *krp) |
2458 | { |
2459 | struct ubsec_softc *sc; |
2460 | int i; |
2461 | |
2462 | for (i = 0; i < ubsec_cd.cd_ndevs; i++) { |
2463 | sc = ubsec_cd.cd_devs[i]; |
2464 | if (sc == NULL) |
2465 | continue; |
2466 | if (sc->sc_cid == krp->krp_hid) |
2467 | return (sc); |
2468 | } |
2469 | return (NULL); |
2470 | } |
2471 | #endif |
2472 | |
2473 | static void |
2474 | ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q) |
2475 | { |
2476 | switch (q->q_type) { |
2477 | case UBS_CTXOP_MODEXP: { |
2478 | struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q; |
2479 | |
2480 | ubsec_dma_free(sc, &me->me_q.q_mcr); |
2481 | ubsec_dma_free(sc, &me->me_q.q_ctx); |
2482 | ubsec_dma_free(sc, &me->me_M); |
2483 | ubsec_dma_free(sc, &me->me_E); |
2484 | ubsec_dma_free(sc, &me->me_C); |
2485 | ubsec_dma_free(sc, &me->me_epb); |
2486 | free(me, M_DEVBUF); |
2487 | break; |
2488 | } |
2489 | case UBS_CTXOP_RSAPRIV: { |
2490 | struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q; |
2491 | |
2492 | ubsec_dma_free(sc, &rp->rpr_q.q_mcr); |
2493 | ubsec_dma_free(sc, &rp->rpr_q.q_ctx); |
2494 | ubsec_dma_free(sc, &rp->rpr_msgin); |
2495 | ubsec_dma_free(sc, &rp->rpr_msgout); |
2496 | free(rp, M_DEVBUF); |
2497 | break; |
2498 | } |
2499 | default: |
2500 | printf("%s: invalid kfree 0x%x\n" , device_xname(sc->sc_dev), |
2501 | q->q_type); |
2502 | break; |
2503 | } |
2504 | } |
2505 | |
2506 | static int |
2507 | ubsec_kprocess(void *arg, struct cryptkop *krp, int hint) |
2508 | { |
2509 | struct ubsec_softc *sc; |
2510 | int r; |
2511 | |
2512 | if (krp == NULL || krp->krp_callback == NULL) |
2513 | return (EINVAL); |
2514 | #ifdef __OpenBSD__ |
2515 | if ((sc = ubsec_kfind(krp)) == NULL) |
2516 | return (EINVAL); |
2517 | #else |
2518 | sc = arg; |
2519 | KASSERT(sc != NULL /*, ("ubsec_kprocess: null softc")*/); |
2520 | #endif |
2521 | |
2522 | while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) { |
2523 | struct ubsec_q2 *q; |
2524 | |
2525 | q = SIMPLEQ_FIRST(&sc->sc_q2free); |
2526 | SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, /*q,*/ q_next); |
2527 | ubsec_kfree(sc, q); |
2528 | } |
2529 | |
2530 | switch (krp->krp_op) { |
2531 | case CRK_MOD_EXP: |
2532 | if (sc->sc_flags & UBS_FLAGS_HWNORM) |
2533 | r = ubsec_kprocess_modexp_hw(sc, krp, hint); |
2534 | else |
2535 | r = ubsec_kprocess_modexp_sw(sc, krp, hint); |
2536 | break; |
2537 | case CRK_MOD_EXP_CRT: |
2538 | r = ubsec_kprocess_rsapriv(sc, krp, hint); |
2539 | break; |
2540 | default: |
2541 | printf("%s: kprocess: invalid op 0x%x\n" , |
2542 | device_xname(sc->sc_dev), krp->krp_op); |
2543 | krp->krp_status = EOPNOTSUPP; |
2544 | crypto_kdone(krp); |
2545 | r = 0; |
2546 | } |
2547 | return (r); |
2548 | } |
2549 | |
2550 | /* |
2551 | * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization) |
2552 | */ |
2553 | static int |
2554 | ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, |
2555 | int hint) |
2556 | { |
2557 | struct ubsec_q2_modexp *me; |
2558 | struct ubsec_mcr *mcr; |
2559 | struct ubsec_ctx_modexp *ctx; |
2560 | struct ubsec_pktbuf *epb; |
2561 | int err = 0; |
2562 | u_int nbits, normbits, mbits, shiftbits, ebits; |
2563 | |
2564 | me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); |
2565 | if (me == NULL) { |
2566 | err = ENOMEM; |
2567 | goto errout; |
2568 | } |
2569 | memset(me, 0, sizeof *me); |
2570 | me->me_krp = krp; |
2571 | me->me_q.q_type = UBS_CTXOP_MODEXP; |
2572 | |
2573 | nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); |
2574 | if (nbits <= 512) |
2575 | normbits = 512; |
2576 | else if (nbits <= 768) |
2577 | normbits = 768; |
2578 | else if (nbits <= 1024) |
2579 | normbits = 1024; |
2580 | else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) |
2581 | normbits = 1536; |
2582 | else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) |
2583 | normbits = 2048; |
2584 | else { |
2585 | err = E2BIG; |
2586 | goto errout; |
2587 | } |
2588 | |
2589 | shiftbits = normbits - nbits; |
2590 | |
2591 | me->me_modbits = nbits; |
2592 | me->me_shiftbits = shiftbits; |
2593 | me->me_normbits = normbits; |
2594 | |
2595 | /* Sanity check: result bits must be >= true modulus bits. */ |
2596 | if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { |
2597 | err = ERANGE; |
2598 | goto errout; |
2599 | } |
2600 | |
2601 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), |
2602 | &me->me_q.q_mcr, 0)) { |
2603 | err = ENOMEM; |
2604 | goto errout; |
2605 | } |
2606 | mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; |
2607 | |
2608 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), |
2609 | &me->me_q.q_ctx, 0)) { |
2610 | err = ENOMEM; |
2611 | goto errout; |
2612 | } |
2613 | |
2614 | mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); |
2615 | if (mbits > nbits) { |
2616 | err = E2BIG; |
2617 | goto errout; |
2618 | } |
2619 | if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { |
2620 | err = ENOMEM; |
2621 | goto errout; |
2622 | } |
2623 | ubsec_kshift_r(shiftbits, |
2624 | krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits, |
2625 | me->me_M.dma_vaddr, normbits); |
2626 | |
2627 | if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { |
2628 | err = ENOMEM; |
2629 | goto errout; |
2630 | } |
2631 | memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); |
2632 | |
2633 | ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); |
2634 | if (ebits > nbits) { |
2635 | err = E2BIG; |
2636 | goto errout; |
2637 | } |
2638 | if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { |
2639 | err = ENOMEM; |
2640 | goto errout; |
2641 | } |
2642 | ubsec_kshift_r(shiftbits, |
2643 | krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits, |
2644 | me->me_E.dma_vaddr, normbits); |
2645 | |
2646 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), |
2647 | &me->me_epb, 0)) { |
2648 | err = ENOMEM; |
2649 | goto errout; |
2650 | } |
2651 | epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; |
2652 | epb->pb_addr = htole32(me->me_E.dma_paddr); |
2653 | epb->pb_next = 0; |
2654 | epb->pb_len = htole32(normbits / 8); |
2655 | |
2656 | #ifdef UBSEC_DEBUG |
2657 | if (ubsec_debug) { |
2658 | printf("Epb " ); |
2659 | ubsec_dump_pb(epb); |
2660 | } |
2661 | #endif |
2662 | |
2663 | mcr->mcr_pkts = htole16(1); |
2664 | mcr->mcr_flags = 0; |
2665 | mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); |
2666 | mcr->mcr_reserved = 0; |
2667 | mcr->mcr_pktlen = 0; |
2668 | |
2669 | mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); |
2670 | mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); |
2671 | mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); |
2672 | |
2673 | mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); |
2674 | mcr->mcr_opktbuf.pb_next = 0; |
2675 | mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); |
2676 | |
2677 | #ifdef DIAGNOSTIC |
2678 | /* Misaligned output buffer will hang the chip. */ |
2679 | if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) |
2680 | panic("%s: modexp invalid addr 0x%x" , device_xname(sc->sc_dev), |
2681 | letoh32(mcr->mcr_opktbuf.pb_addr)); |
2682 | if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) |
2683 | panic("%s: modexp invalid len 0x%x" , device_xname(sc->sc_dev), |
2684 | letoh32(mcr->mcr_opktbuf.pb_len)); |
2685 | #endif |
2686 | |
2687 | ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; |
2688 | memset(ctx, 0, sizeof(*ctx)); |
2689 | ubsec_kshift_r(shiftbits, |
2690 | krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits, |
2691 | ctx->me_N, normbits); |
2692 | ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); |
2693 | ctx->me_op = htole16(UBS_CTXOP_MODEXP); |
2694 | ctx->me_E_len = htole16(nbits); |
2695 | ctx->me_N_len = htole16(nbits); |
2696 | |
2697 | #ifdef UBSEC_DEBUG |
2698 | if (ubsec_debug) { |
2699 | ubsec_dump_mcr(mcr); |
2700 | ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); |
2701 | } |
2702 | #endif |
2703 | |
2704 | /* |
2705 | * ubsec_feed2 will sync mcr and ctx, we just need to sync |
2706 | * everything else. |
2707 | */ |
2708 | bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, |
2709 | 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2710 | bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, |
2711 | 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2712 | bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, |
2713 | 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
2714 | bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, |
2715 | 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2716 | |
2717 | /* Enqueue and we're done... */ |
2718 | mutex_spin_enter(&sc->sc_mtx); |
2719 | SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); |
2720 | ubsec_feed2(sc); |
2721 | ubsecstats.hst_modexp++; |
2722 | mutex_spin_exit(&sc->sc_mtx); |
2723 | |
2724 | return (0); |
2725 | |
2726 | errout: |
2727 | if (me != NULL) { |
2728 | if (me->me_q.q_mcr.dma_map != NULL) |
2729 | ubsec_dma_free(sc, &me->me_q.q_mcr); |
2730 | if (me->me_q.q_ctx.dma_map != NULL) { |
2731 | memset(me->me_q.q_ctx.dma_vaddr, 0, |
2732 | me->me_q.q_ctx.dma_size); |
2733 | ubsec_dma_free(sc, &me->me_q.q_ctx); |
2734 | } |
2735 | if (me->me_M.dma_map != NULL) { |
2736 | memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); |
2737 | ubsec_dma_free(sc, &me->me_M); |
2738 | } |
2739 | if (me->me_E.dma_map != NULL) { |
2740 | memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); |
2741 | ubsec_dma_free(sc, &me->me_E); |
2742 | } |
2743 | if (me->me_C.dma_map != NULL) { |
2744 | memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); |
2745 | ubsec_dma_free(sc, &me->me_C); |
2746 | } |
2747 | if (me->me_epb.dma_map != NULL) |
2748 | ubsec_dma_free(sc, &me->me_epb); |
2749 | free(me, M_DEVBUF); |
2750 | } |
2751 | krp->krp_status = err; |
2752 | crypto_kdone(krp); |
2753 | return (0); |
2754 | } |
2755 | |
2756 | /* |
2757 | * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization) |
2758 | */ |
2759 | static int |
2760 | ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, |
2761 | int hint) |
2762 | { |
2763 | struct ubsec_q2_modexp *me; |
2764 | struct ubsec_mcr *mcr; |
2765 | struct ubsec_ctx_modexp *ctx; |
2766 | struct ubsec_pktbuf *epb; |
2767 | int err = 0; |
2768 | u_int nbits, normbits, mbits, shiftbits, ebits; |
2769 | |
2770 | me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT); |
2771 | if (me == NULL) { |
2772 | err = ENOMEM; |
2773 | goto errout; |
2774 | } |
2775 | memset(me, 0, sizeof *me); |
2776 | me->me_krp = krp; |
2777 | me->me_q.q_type = UBS_CTXOP_MODEXP; |
2778 | |
2779 | nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]); |
2780 | if (nbits <= 512) |
2781 | normbits = 512; |
2782 | else if (nbits <= 768) |
2783 | normbits = 768; |
2784 | else if (nbits <= 1024) |
2785 | normbits = 1024; |
2786 | else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536) |
2787 | normbits = 1536; |
2788 | else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048) |
2789 | normbits = 2048; |
2790 | else { |
2791 | err = E2BIG; |
2792 | goto errout; |
2793 | } |
2794 | |
2795 | shiftbits = normbits - nbits; |
2796 | |
2797 | /* XXX ??? */ |
2798 | me->me_modbits = nbits; |
2799 | me->me_shiftbits = shiftbits; |
2800 | me->me_normbits = normbits; |
2801 | |
2802 | /* Sanity check: result bits must be >= true modulus bits. */ |
2803 | if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) { |
2804 | err = ERANGE; |
2805 | goto errout; |
2806 | } |
2807 | |
2808 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), |
2809 | &me->me_q.q_mcr, 0)) { |
2810 | err = ENOMEM; |
2811 | goto errout; |
2812 | } |
2813 | mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr; |
2814 | |
2815 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp), |
2816 | &me->me_q.q_ctx, 0)) { |
2817 | err = ENOMEM; |
2818 | goto errout; |
2819 | } |
2820 | |
2821 | mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]); |
2822 | if (mbits > nbits) { |
2823 | err = E2BIG; |
2824 | goto errout; |
2825 | } |
2826 | if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) { |
2827 | err = ENOMEM; |
2828 | goto errout; |
2829 | } |
2830 | memset(me->me_M.dma_vaddr, 0, normbits / 8); |
2831 | bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p, |
2832 | me->me_M.dma_vaddr, (mbits + 7) / 8); |
2833 | |
2834 | if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) { |
2835 | err = ENOMEM; |
2836 | goto errout; |
2837 | } |
2838 | memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); |
2839 | |
2840 | ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]); |
2841 | if (ebits > nbits) { |
2842 | err = E2BIG; |
2843 | goto errout; |
2844 | } |
2845 | if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) { |
2846 | err = ENOMEM; |
2847 | goto errout; |
2848 | } |
2849 | memset(me->me_E.dma_vaddr, 0, normbits / 8); |
2850 | bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p, |
2851 | me->me_E.dma_vaddr, (ebits + 7) / 8); |
2852 | |
2853 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf), |
2854 | &me->me_epb, 0)) { |
2855 | err = ENOMEM; |
2856 | goto errout; |
2857 | } |
2858 | epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr; |
2859 | epb->pb_addr = htole32(me->me_E.dma_paddr); |
2860 | epb->pb_next = 0; |
2861 | epb->pb_len = htole32((ebits + 7) / 8); |
2862 | |
2863 | #ifdef UBSEC_DEBUG |
2864 | if (ubsec_debug) { |
2865 | printf("Epb " ); |
2866 | ubsec_dump_pb(epb); |
2867 | } |
2868 | #endif |
2869 | |
2870 | mcr->mcr_pkts = htole16(1); |
2871 | mcr->mcr_flags = 0; |
2872 | mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr); |
2873 | mcr->mcr_reserved = 0; |
2874 | mcr->mcr_pktlen = 0; |
2875 | |
2876 | mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr); |
2877 | mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8); |
2878 | mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr); |
2879 | |
2880 | mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr); |
2881 | mcr->mcr_opktbuf.pb_next = 0; |
2882 | mcr->mcr_opktbuf.pb_len = htole32(normbits / 8); |
2883 | |
2884 | #ifdef DIAGNOSTIC |
2885 | /* Misaligned output buffer will hang the chip. */ |
2886 | if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0) |
2887 | panic("%s: modexp invalid addr 0x%x" , device_xname(sc->sc_dev), |
2888 | letoh32(mcr->mcr_opktbuf.pb_addr)); |
2889 | if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0) |
2890 | panic("%s: modexp invalid len 0x%x" , device_xname(sc->sc_dev), |
2891 | letoh32(mcr->mcr_opktbuf.pb_len)); |
2892 | #endif |
2893 | |
2894 | ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr; |
2895 | memset(ctx, 0, sizeof(*ctx)); |
2896 | memcpy(ctx->me_N, krp->krp_param[UBS_MODEXP_PAR_N].crp_p, |
2897 | (nbits + 7) / 8); |
2898 | ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t))); |
2899 | ctx->me_op = htole16(UBS_CTXOP_MODEXP); |
2900 | ctx->me_E_len = htole16(ebits); |
2901 | ctx->me_N_len = htole16(nbits); |
2902 | |
2903 | #ifdef UBSEC_DEBUG |
2904 | if (ubsec_debug) { |
2905 | ubsec_dump_mcr(mcr); |
2906 | ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx); |
2907 | } |
2908 | #endif |
2909 | |
2910 | /* |
2911 | * ubsec_feed2 will sync mcr and ctx, we just need to sync |
2912 | * everything else. |
2913 | */ |
2914 | bus_dmamap_sync(sc->sc_dmat, me->me_M.dma_map, |
2915 | 0, me->me_M.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2916 | bus_dmamap_sync(sc->sc_dmat, me->me_E.dma_map, |
2917 | 0, me->me_E.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2918 | bus_dmamap_sync(sc->sc_dmat, me->me_C.dma_map, |
2919 | 0, me->me_C.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
2920 | bus_dmamap_sync(sc->sc_dmat, me->me_epb.dma_map, |
2921 | 0, me->me_epb.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
2922 | |
2923 | /* Enqueue and we're done... */ |
2924 | mutex_spin_enter(&sc->sc_mtx); |
2925 | SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next); |
2926 | ubsec_feed2(sc); |
2927 | mutex_spin_exit(&sc->sc_mtx); |
2928 | |
2929 | return (0); |
2930 | |
2931 | errout: |
2932 | if (me != NULL) { |
2933 | if (me->me_q.q_mcr.dma_map != NULL) |
2934 | ubsec_dma_free(sc, &me->me_q.q_mcr); |
2935 | if (me->me_q.q_ctx.dma_map != NULL) { |
2936 | memset(me->me_q.q_ctx.dma_vaddr, 0, |
2937 | me->me_q.q_ctx.dma_size); |
2938 | ubsec_dma_free(sc, &me->me_q.q_ctx); |
2939 | } |
2940 | if (me->me_M.dma_map != NULL) { |
2941 | memset(me->me_M.dma_vaddr, 0, me->me_M.dma_size); |
2942 | ubsec_dma_free(sc, &me->me_M); |
2943 | } |
2944 | if (me->me_E.dma_map != NULL) { |
2945 | memset(me->me_E.dma_vaddr, 0, me->me_E.dma_size); |
2946 | ubsec_dma_free(sc, &me->me_E); |
2947 | } |
2948 | if (me->me_C.dma_map != NULL) { |
2949 | memset(me->me_C.dma_vaddr, 0, me->me_C.dma_size); |
2950 | ubsec_dma_free(sc, &me->me_C); |
2951 | } |
2952 | if (me->me_epb.dma_map != NULL) |
2953 | ubsec_dma_free(sc, &me->me_epb); |
2954 | free(me, M_DEVBUF); |
2955 | } |
2956 | krp->krp_status = err; |
2957 | crypto_kdone(krp); |
2958 | return (0); |
2959 | } |
2960 | |
2961 | static int |
2962 | ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, |
2963 | int hint) |
2964 | { |
2965 | struct ubsec_q2_rsapriv *rp = NULL; |
2966 | struct ubsec_mcr *mcr; |
2967 | struct ubsec_ctx_rsapriv *ctx; |
2968 | int err = 0; |
2969 | u_int padlen, msglen; |
2970 | |
2971 | msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]); |
2972 | padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]); |
2973 | if (msglen > padlen) |
2974 | padlen = msglen; |
2975 | |
2976 | if (padlen <= 256) |
2977 | padlen = 256; |
2978 | else if (padlen <= 384) |
2979 | padlen = 384; |
2980 | else if (padlen <= 512) |
2981 | padlen = 512; |
2982 | else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768) |
2983 | padlen = 768; |
2984 | else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024) |
2985 | padlen = 1024; |
2986 | else { |
2987 | err = E2BIG; |
2988 | goto errout; |
2989 | } |
2990 | |
2991 | if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) { |
2992 | err = E2BIG; |
2993 | goto errout; |
2994 | } |
2995 | |
2996 | if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) { |
2997 | err = E2BIG; |
2998 | goto errout; |
2999 | } |
3000 | |
3001 | if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) { |
3002 | err = E2BIG; |
3003 | goto errout; |
3004 | } |
3005 | |
3006 | rp = malloc(sizeof *rp, M_DEVBUF, M_NOWAIT|M_ZERO); |
3007 | if (rp == NULL) |
3008 | return (ENOMEM); |
3009 | rp->rpr_krp = krp; |
3010 | rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV; |
3011 | |
3012 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), |
3013 | &rp->rpr_q.q_mcr, 0)) { |
3014 | err = ENOMEM; |
3015 | goto errout; |
3016 | } |
3017 | mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr; |
3018 | |
3019 | if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv), |
3020 | &rp->rpr_q.q_ctx, 0)) { |
3021 | err = ENOMEM; |
3022 | goto errout; |
3023 | } |
3024 | ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr; |
3025 | memset(ctx, 0, sizeof *ctx); |
3026 | |
3027 | /* Copy in p */ |
3028 | bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p, |
3029 | &ctx->rpr_buf[0 * (padlen / 8)], |
3030 | (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8); |
3031 | |
3032 | /* Copy in q */ |
3033 | bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p, |
3034 | &ctx->rpr_buf[1 * (padlen / 8)], |
3035 | (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8); |
3036 | |
3037 | /* Copy in dp */ |
3038 | bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p, |
3039 | &ctx->rpr_buf[2 * (padlen / 8)], |
3040 | (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8); |
3041 | |
3042 | /* Copy in dq */ |
3043 | bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p, |
3044 | &ctx->rpr_buf[3 * (padlen / 8)], |
3045 | (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8); |
3046 | |
3047 | /* Copy in pinv */ |
3048 | bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p, |
3049 | &ctx->rpr_buf[4 * (padlen / 8)], |
3050 | (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8); |
3051 | |
3052 | msglen = padlen * 2; |
3053 | |
3054 | /* Copy in input message (aligned buffer/length). */ |
3055 | if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) { |
3056 | /* Is this likely? */ |
3057 | err = E2BIG; |
3058 | goto errout; |
3059 | } |
3060 | if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) { |
3061 | err = ENOMEM; |
3062 | goto errout; |
3063 | } |
3064 | memset(rp->rpr_msgin.dma_vaddr, 0, (msglen + 7) / 8); |
3065 | bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p, |
3066 | rp->rpr_msgin.dma_vaddr, |
3067 | (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8); |
3068 | |
3069 | /* Prepare space for output message (aligned buffer/length). */ |
3070 | if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) { |
3071 | /* Is this likely? */ |
3072 | err = E2BIG; |
3073 | goto errout; |
3074 | } |
3075 | if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) { |
3076 | err = ENOMEM; |
3077 | goto errout; |
3078 | } |
3079 | memset(rp->rpr_msgout.dma_vaddr, 0, (msglen + 7) / 8); |
3080 | |
3081 | mcr->mcr_pkts = htole16(1); |
3082 | mcr->mcr_flags = 0; |
3083 | mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr); |
3084 | mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr); |
3085 | mcr->mcr_ipktbuf.pb_next = 0; |
3086 | mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size); |
3087 | mcr->mcr_reserved = 0; |
3088 | mcr->mcr_pktlen = htole16(msglen); |
3089 | mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr); |
3090 | mcr->mcr_opktbuf.pb_next = 0; |
3091 | mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size); |
3092 | |
3093 | #ifdef DIAGNOSTIC |
3094 | if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) { |
3095 | panic("%s: rsapriv: invalid msgin 0x%lx(0x%lx)" , |
3096 | device_xname(sc->sc_dev), (u_long) rp->rpr_msgin.dma_paddr, |
3097 | (u_long) rp->rpr_msgin.dma_size); |
3098 | } |
3099 | if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) { |
3100 | panic("%s: rsapriv: invalid msgout 0x%lx(0x%lx)" , |
3101 | device_xname(sc->sc_dev), (u_long) rp->rpr_msgout.dma_paddr, |
3102 | (u_long) rp->rpr_msgout.dma_size); |
3103 | } |
3104 | #endif |
3105 | |
3106 | ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8)); |
3107 | ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV); |
3108 | ctx->rpr_q_len = htole16(padlen); |
3109 | ctx->rpr_p_len = htole16(padlen); |
3110 | |
3111 | /* |
3112 | * ubsec_feed2 will sync mcr and ctx, we just need to sync |
3113 | * everything else. |
3114 | */ |
3115 | bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgin.dma_map, |
3116 | 0, rp->rpr_msgin.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); |
3117 | bus_dmamap_sync(sc->sc_dmat, rp->rpr_msgout.dma_map, |
3118 | 0, rp->rpr_msgout.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); |
3119 | |
3120 | /* Enqueue and we're done... */ |
3121 | mutex_spin_enter(&sc->sc_mtx); |
3122 | SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next); |
3123 | ubsec_feed2(sc); |
3124 | ubsecstats.hst_modexpcrt++; |
3125 | mutex_spin_exit(&sc->sc_mtx); |
3126 | return (0); |
3127 | |
3128 | errout: |
3129 | if (rp != NULL) { |
3130 | if (rp->rpr_q.q_mcr.dma_map != NULL) |
3131 | ubsec_dma_free(sc, &rp->rpr_q.q_mcr); |
3132 | if (rp->rpr_msgin.dma_map != NULL) { |
3133 | memset(rp->rpr_msgin.dma_vaddr, 0, |
3134 | rp->rpr_msgin.dma_size); |
3135 | ubsec_dma_free(sc, &rp->rpr_msgin); |
3136 | } |
3137 | if (rp->rpr_msgout.dma_map != NULL) { |
3138 | memset(rp->rpr_msgout.dma_vaddr, 0, |
3139 | rp->rpr_msgout.dma_size); |
3140 | ubsec_dma_free(sc, &rp->rpr_msgout); |
3141 | } |
3142 | free(rp, M_DEVBUF); |
3143 | } |
3144 | krp->krp_status = err; |
3145 | crypto_kdone(krp); |
3146 | return (0); |
3147 | } |
3148 | |
3149 | #ifdef UBSEC_DEBUG |
3150 | static void |
3151 | ubsec_dump_pb(volatile struct ubsec_pktbuf *pb) |
3152 | { |
3153 | printf("addr 0x%x (0x%x) next 0x%x\n" , |
3154 | pb->pb_addr, pb->pb_len, pb->pb_next); |
3155 | } |
3156 | |
3157 | static void |
3158 | ubsec_dump_ctx2(volatile struct ubsec_ctx_keyop *c) |
3159 | { |
3160 | printf("CTX (0x%x):\n" , c->ctx_len); |
3161 | switch (letoh16(c->ctx_op)) { |
3162 | case UBS_CTXOP_RNGBYPASS: |
3163 | case UBS_CTXOP_RNGSHA1: |
3164 | break; |
3165 | case UBS_CTXOP_MODEXP: |
3166 | { |
3167 | struct ubsec_ctx_modexp *cx = (void *)c; |
3168 | int i, len; |
3169 | |
3170 | printf(" Elen %u, Nlen %u\n" , |
3171 | letoh16(cx->me_E_len), letoh16(cx->me_N_len)); |
3172 | len = (cx->me_N_len + 7)/8; |
3173 | for (i = 0; i < len; i++) |
3174 | printf("%s%02x" , (i == 0) ? " N: " : ":" , cx->me_N[i]); |
3175 | printf("\n" ); |
3176 | break; |
3177 | } |
3178 | default: |
3179 | printf("unknown context: %x\n" , c->ctx_op); |
3180 | } |
3181 | printf("END CTX\n" ); |
3182 | } |
3183 | |
3184 | static void |
3185 | ubsec_dump_mcr(struct ubsec_mcr *mcr) |
3186 | { |
3187 | volatile struct ubsec_mcr_add *ma; |
3188 | int i; |
3189 | |
3190 | printf("MCR:\n" ); |
3191 | printf(" pkts: %u, flags 0x%x\n" , |
3192 | letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags)); |
3193 | ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp; |
3194 | for (i = 0; i < letoh16(mcr->mcr_pkts); i++) { |
3195 | printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n" , i, |
3196 | letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen), |
3197 | letoh16(ma->mcr_reserved)); |
3198 | printf(" %d: ipkt " , i); |
3199 | ubsec_dump_pb(&ma->mcr_ipktbuf); |
3200 | printf(" %d: opkt " , i); |
3201 | ubsec_dump_pb(&ma->mcr_opktbuf); |
3202 | ma++; |
3203 | } |
3204 | printf("END MCR\n" ); |
3205 | } |
3206 | #endif /* UBSEC_DEBUG */ |
3207 | |
3208 | /* |
3209 | * Return the number of significant bits of a big number. |
3210 | */ |
3211 | static int |
3212 | ubsec_ksigbits(struct crparam *cr) |
3213 | { |
3214 | u_int plen = (cr->crp_nbits + 7) / 8; |
3215 | int i, sig = plen * 8; |
3216 | u_int8_t c, *p = cr->crp_p; |
3217 | |
3218 | for (i = plen - 1; i >= 0; i--) { |
3219 | c = p[i]; |
3220 | if (c != 0) { |
3221 | while ((c & 0x80) == 0) { |
3222 | sig--; |
3223 | c <<= 1; |
3224 | } |
3225 | break; |
3226 | } |
3227 | sig -= 8; |
3228 | } |
3229 | return (sig); |
3230 | } |
3231 | |
3232 | static void |
3233 | ubsec_kshift_r(u_int shiftbits, u_int8_t *src, u_int srcbits, |
3234 | u_int8_t *dst, u_int dstbits) |
3235 | { |
3236 | u_int slen, dlen; |
3237 | int i, si, di, n; |
3238 | |
3239 | slen = (srcbits + 7) / 8; |
3240 | dlen = (dstbits + 7) / 8; |
3241 | |
3242 | for (i = 0; i < slen; i++) |
3243 | dst[i] = src[i]; |
3244 | for (i = 0; i < dlen - slen; i++) |
3245 | dst[slen + i] = 0; |
3246 | |
3247 | n = shiftbits / 8; |
3248 | if (n != 0) { |
3249 | si = dlen - n - 1; |
3250 | di = dlen - 1; |
3251 | while (si >= 0) |
3252 | dst[di--] = dst[si--]; |
3253 | while (di >= 0) |
3254 | dst[di--] = 0; |
3255 | } |
3256 | |
3257 | n = shiftbits % 8; |
3258 | if (n != 0) { |
3259 | for (i = dlen - 1; i > 0; i--) |
3260 | dst[i] = (dst[i] << n) | |
3261 | (dst[i - 1] >> (8 - n)); |
3262 | dst[0] = dst[0] << n; |
3263 | } |
3264 | } |
3265 | |
3266 | static void |
3267 | ubsec_kshift_l(u_int shiftbits, u_int8_t *src, u_int srcbits, |
3268 | u_int8_t *dst, u_int dstbits) |
3269 | { |
3270 | int slen, dlen, i, n; |
3271 | |
3272 | slen = (srcbits + 7) / 8; |
3273 | dlen = (dstbits + 7) / 8; |
3274 | |
3275 | n = shiftbits / 8; |
3276 | for (i = 0; i < slen; i++) |
3277 | dst[i] = src[i + n]; |
3278 | for (i = 0; i < dlen - slen; i++) |
3279 | dst[slen + i] = 0; |
3280 | |
3281 | n = shiftbits % 8; |
3282 | if (n != 0) { |
3283 | for (i = 0; i < (dlen - 1); i++) |
3284 | dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n)); |
3285 | dst[dlen - 1] = dst[dlen - 1] >> n; |
3286 | } |
3287 | } |
3288 | |