| 1 | /* $NetBSD: crypto.c,v 1.48 2016/07/07 06:55:43 msaitoh Exp $ */ |
| 2 | /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */ |
| 3 | /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */ |
| 4 | |
| 5 | /*- |
| 6 | * Copyright (c) 2008 The NetBSD Foundation, Inc. |
| 7 | * All rights reserved. |
| 8 | * |
| 9 | * This code is derived from software contributed to The NetBSD Foundation |
| 10 | * by Coyote Point Systems, Inc. |
| 11 | * |
| 12 | * Redistribution and use in source and binary forms, with or without |
| 13 | * modification, are permitted provided that the following conditions |
| 14 | * are met: |
| 15 | * 1. Redistributions of source code must retain the above copyright |
| 16 | * notice, this list of conditions and the following disclaimer. |
| 17 | * 2. Redistributions in binary form must reproduce the above copyright |
| 18 | * notice, this list of conditions and the following disclaimer in the |
| 19 | * documentation and/or other materials provided with the distribution. |
| 20 | * |
| 21 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
| 22 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
| 23 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 24 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
| 25 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 31 | * POSSIBILITY OF SUCH DAMAGE. |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) |
| 36 | * |
| 37 | * This code was written by Angelos D. Keromytis in Athens, Greece, in |
| 38 | * February 2000. Network Security Technologies Inc. (NSTI) kindly |
| 39 | * supported the development of this code. |
| 40 | * |
| 41 | * Copyright (c) 2000, 2001 Angelos D. Keromytis |
| 42 | * |
| 43 | * Permission to use, copy, and modify this software with or without fee |
| 44 | * is hereby granted, provided that this entire notice is included in |
| 45 | * all source code copies of any software which is or includes a copy or |
| 46 | * modification of this software. |
| 47 | * |
| 48 | * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR |
| 49 | * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY |
| 50 | * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE |
| 51 | * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR |
| 52 | * PURPOSE. |
| 53 | */ |
| 54 | |
| 55 | #include <sys/cdefs.h> |
| 56 | __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.48 2016/07/07 06:55:43 msaitoh Exp $" ); |
| 57 | |
| 58 | #include <sys/param.h> |
| 59 | #include <sys/reboot.h> |
| 60 | #include <sys/systm.h> |
| 61 | #include <sys/malloc.h> |
| 62 | #include <sys/proc.h> |
| 63 | #include <sys/pool.h> |
| 64 | #include <sys/kthread.h> |
| 65 | #include <sys/once.h> |
| 66 | #include <sys/sysctl.h> |
| 67 | #include <sys/intr.h> |
| 68 | #include <sys/errno.h> |
| 69 | #include <sys/module.h> |
| 70 | |
| 71 | #if defined(_KERNEL_OPT) |
| 72 | #include "opt_ocf.h" |
| 73 | #endif |
| 74 | |
| 75 | #include <opencrypto/cryptodev.h> |
| 76 | #include <opencrypto/xform.h> /* XXX for M_XDATA */ |
| 77 | |
| 78 | kmutex_t crypto_q_mtx; |
| 79 | kmutex_t crypto_ret_q_mtx; |
| 80 | kcondvar_t cryptoret_cv; |
| 81 | kmutex_t crypto_mtx; |
| 82 | |
| 83 | /* below are kludges for residual code wrtitten to FreeBSD interfaces */ |
| 84 | #define SWI_CRYPTO 17 |
| 85 | #define register_swi(lvl, fn) \ |
| 86 | softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, (void (*)(void *))fn, NULL) |
| 87 | #define unregister_swi(lvl, fn) softint_disestablish(softintr_cookie) |
| 88 | #define setsoftcrypto(x) softint_schedule(x) |
| 89 | |
| 90 | int crypto_ret_q_check(struct cryptop *); |
| 91 | |
| 92 | /* |
| 93 | * Crypto drivers register themselves by allocating a slot in the |
| 94 | * crypto_drivers table with crypto_get_driverid() and then registering |
| 95 | * each algorithm they support with crypto_register() and crypto_kregister(). |
| 96 | */ |
| 97 | static struct cryptocap *crypto_drivers; |
| 98 | static int crypto_drivers_num; |
| 99 | static void *softintr_cookie; |
| 100 | static int crypto_exit_flag; |
| 101 | |
| 102 | /* |
| 103 | * There are two queues for crypto requests; one for symmetric (e.g. |
| 104 | * cipher) operations and one for asymmetric (e.g. MOD) operations. |
| 105 | * See below for how synchronization is handled. |
| 106 | */ |
| 107 | static TAILQ_HEAD(,cryptop) crp_q = /* request queues */ |
| 108 | TAILQ_HEAD_INITIALIZER(crp_q); |
| 109 | static TAILQ_HEAD(,cryptkop) crp_kq = |
| 110 | TAILQ_HEAD_INITIALIZER(crp_kq); |
| 111 | |
| 112 | /* |
| 113 | * There are two queues for processing completed crypto requests; one |
| 114 | * for the symmetric and one for the asymmetric ops. We only need one |
| 115 | * but have two to avoid type futzing (cryptop vs. cryptkop). See below |
| 116 | * for how synchronization is handled. |
| 117 | */ |
| 118 | static TAILQ_HEAD(crprethead, cryptop) crp_ret_q = /* callback queues */ |
| 119 | TAILQ_HEAD_INITIALIZER(crp_ret_q); |
| 120 | static TAILQ_HEAD(krprethead, cryptkop) crp_ret_kq = |
| 121 | TAILQ_HEAD_INITIALIZER(crp_ret_kq); |
| 122 | |
| 123 | /* |
| 124 | * XXX these functions are ghastly hacks for when the submission |
| 125 | * XXX routines discover a request that was not CBIMM is already |
| 126 | * XXX done, and must be yanked from the retq (where _done) put it |
| 127 | * XXX as cryptoret won't get the chance. The queue is walked backwards |
| 128 | * XXX as the request is generally the last one queued. |
| 129 | * |
| 130 | * call with the lock held, or else. |
| 131 | */ |
| 132 | int |
| 133 | crypto_ret_q_remove(struct cryptop *crp) |
| 134 | { |
| 135 | struct cryptop * acrp, *next; |
| 136 | |
| 137 | TAILQ_FOREACH_REVERSE_SAFE(acrp, &crp_ret_q, crprethead, crp_next, next) { |
| 138 | if (acrp == crp) { |
| 139 | TAILQ_REMOVE(&crp_ret_q, crp, crp_next); |
| 140 | crp->crp_flags &= (~CRYPTO_F_ONRETQ); |
| 141 | return 1; |
| 142 | } |
| 143 | } |
| 144 | return 0; |
| 145 | } |
| 146 | |
| 147 | int |
| 148 | crypto_ret_kq_remove(struct cryptkop *krp) |
| 149 | { |
| 150 | struct cryptkop * akrp, *next; |
| 151 | |
| 152 | TAILQ_FOREACH_REVERSE_SAFE(akrp, &crp_ret_kq, krprethead, krp_next, next) { |
| 153 | if (akrp == krp) { |
| 154 | TAILQ_REMOVE(&crp_ret_kq, krp, krp_next); |
| 155 | krp->krp_flags &= (~CRYPTO_F_ONRETQ); |
| 156 | return 1; |
| 157 | } |
| 158 | } |
| 159 | return 0; |
| 160 | } |
| 161 | |
| 162 | /* |
| 163 | * Crypto op and desciptor data structures are allocated |
| 164 | * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) . |
| 165 | */ |
| 166 | struct pool cryptop_pool; |
| 167 | struct pool cryptodesc_pool; |
| 168 | struct pool cryptkop_pool; |
| 169 | |
| 170 | int crypto_usercrypto = 1; /* userland may open /dev/crypto */ |
| 171 | int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ |
| 172 | /* |
| 173 | * cryptodevallowsoft is (intended to be) sysctl'able, controlling |
| 174 | * access to hardware versus software transforms as below: |
| 175 | * |
| 176 | * crypto_devallowsoft < 0: Force userlevel requests to use software |
| 177 | * transforms, always |
| 178 | * crypto_devallowsoft = 0: Use hardware if present, grant userlevel |
| 179 | * requests for non-accelerated transforms |
| 180 | * (handling the latter in software) |
| 181 | * crypto_devallowsoft > 0: Allow user requests only for transforms which |
| 182 | * are hardware-accelerated. |
| 183 | */ |
| 184 | int crypto_devallowsoft = 1; /* only use hardware crypto */ |
| 185 | |
| 186 | SYSCTL_SETUP(sysctl_opencrypto_setup, "sysctl opencrypto subtree setup" ) |
| 187 | { |
| 188 | |
| 189 | sysctl_createv(clog, 0, NULL, NULL, |
| 190 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
| 191 | CTLTYPE_INT, "usercrypto" , |
| 192 | SYSCTL_DESCR("Enable/disable user-mode access to " |
| 193 | "crypto support" ), |
| 194 | NULL, 0, &crypto_usercrypto, 0, |
| 195 | CTL_KERN, CTL_CREATE, CTL_EOL); |
| 196 | sysctl_createv(clog, 0, NULL, NULL, |
| 197 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
| 198 | CTLTYPE_INT, "userasymcrypto" , |
| 199 | SYSCTL_DESCR("Enable/disable user-mode access to " |
| 200 | "asymmetric crypto support" ), |
| 201 | NULL, 0, &crypto_userasymcrypto, 0, |
| 202 | CTL_KERN, CTL_CREATE, CTL_EOL); |
| 203 | sysctl_createv(clog, 0, NULL, NULL, |
| 204 | CTLFLAG_PERMANENT|CTLFLAG_READWRITE, |
| 205 | CTLTYPE_INT, "cryptodevallowsoft" , |
| 206 | SYSCTL_DESCR("Enable/disable use of software " |
| 207 | "asymmetric crypto support" ), |
| 208 | NULL, 0, &crypto_devallowsoft, 0, |
| 209 | CTL_KERN, CTL_CREATE, CTL_EOL); |
| 210 | } |
| 211 | |
| 212 | MALLOC_DEFINE(M_CRYPTO_DATA, "crypto" , "crypto session records" ); |
| 213 | |
| 214 | /* |
| 215 | * Synchronization: read carefully, this is non-trivial. |
| 216 | * |
| 217 | * Crypto requests are submitted via crypto_dispatch. Typically |
| 218 | * these come in from network protocols at spl0 (output path) or |
| 219 | * spl[,soft]net (input path). |
| 220 | * |
| 221 | * Requests are typically passed on the driver directly, but they |
| 222 | * may also be queued for processing by a software interrupt thread, |
| 223 | * cryptointr, that runs at splsoftcrypto. This thread dispatches |
| 224 | * the requests to crypto drivers (h/w or s/w) who call crypto_done |
| 225 | * when a request is complete. Hardware crypto drivers are assumed |
| 226 | * to register their IRQ's as network devices so their interrupt handlers |
| 227 | * and subsequent "done callbacks" happen at spl[imp,net]. |
| 228 | * |
| 229 | * Completed crypto ops are queued for a separate kernel thread that |
| 230 | * handles the callbacks at spl0. This decoupling insures the crypto |
| 231 | * driver interrupt service routine is not delayed while the callback |
| 232 | * takes place and that callbacks are delivered after a context switch |
| 233 | * (as opposed to a software interrupt that clients must block). |
| 234 | * |
| 235 | * This scheme is not intended for SMP machines. |
| 236 | */ |
| 237 | static void cryptointr(void); /* swi thread to dispatch ops */ |
| 238 | static void cryptoret(void); /* kernel thread for callbacks*/ |
| 239 | static struct lwp *cryptothread; |
| 240 | static int crypto_destroy(bool); |
| 241 | static int crypto_invoke(struct cryptop *crp, int hint); |
| 242 | static int crypto_kinvoke(struct cryptkop *krp, int hint); |
| 243 | |
| 244 | static struct cryptostats cryptostats; |
| 245 | #ifdef CRYPTO_TIMING |
| 246 | static int crypto_timing = 0; |
| 247 | #endif |
| 248 | |
| 249 | #ifdef _MODULE |
| 250 | static struct sysctllog *sysctl_opencrypto_clog; |
| 251 | #endif |
| 252 | |
| 253 | static int |
| 254 | crypto_init0(void) |
| 255 | { |
| 256 | int error; |
| 257 | |
| 258 | mutex_init(&crypto_mtx, MUTEX_DEFAULT, IPL_NONE); |
| 259 | mutex_init(&crypto_q_mtx, MUTEX_DEFAULT, IPL_NET); |
| 260 | mutex_init(&crypto_ret_q_mtx, MUTEX_DEFAULT, IPL_NET); |
| 261 | cv_init(&cryptoret_cv, "crypto_w" ); |
| 262 | pool_init(&cryptop_pool, sizeof(struct cryptop), 0, 0, |
| 263 | 0, "cryptop" , NULL, IPL_NET); |
| 264 | pool_init(&cryptodesc_pool, sizeof(struct cryptodesc), 0, 0, |
| 265 | 0, "cryptodesc" , NULL, IPL_NET); |
| 266 | pool_init(&cryptkop_pool, sizeof(struct cryptkop), 0, 0, |
| 267 | 0, "cryptkop" , NULL, IPL_NET); |
| 268 | |
| 269 | crypto_drivers = malloc(CRYPTO_DRIVERS_INITIAL * |
| 270 | sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); |
| 271 | if (crypto_drivers == NULL) { |
| 272 | printf("crypto_init: cannot malloc driver table\n" ); |
| 273 | return ENOMEM; |
| 274 | } |
| 275 | crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; |
| 276 | |
| 277 | softintr_cookie = register_swi(SWI_CRYPTO, cryptointr); |
| 278 | error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, |
| 279 | (void (*)(void *))cryptoret, NULL, &cryptothread, "cryptoret" ); |
| 280 | if (error) { |
| 281 | printf("crypto_init: cannot start cryptoret thread; error %d" , |
| 282 | error); |
| 283 | return crypto_destroy(false); |
| 284 | } |
| 285 | |
| 286 | #ifdef _MODULE |
| 287 | sysctl_opencrypto_setup(&sysctl_opencrypto_clog); |
| 288 | #endif |
| 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | int |
| 293 | crypto_init(void) |
| 294 | { |
| 295 | static ONCE_DECL(crypto_init_once); |
| 296 | |
| 297 | return RUN_ONCE(&crypto_init_once, crypto_init0); |
| 298 | } |
| 299 | |
| 300 | static int |
| 301 | crypto_destroy(bool exit_kthread) |
| 302 | { |
| 303 | int i; |
| 304 | |
| 305 | if (exit_kthread) { |
| 306 | mutex_spin_enter(&crypto_ret_q_mtx); |
| 307 | |
| 308 | /* if we have any in-progress requests, don't unload */ |
| 309 | if (!TAILQ_EMPTY(&crp_q) || !TAILQ_EMPTY(&crp_kq)) |
| 310 | return EBUSY; |
| 311 | |
| 312 | for (i = 0; i < crypto_drivers_num; i++) |
| 313 | if (crypto_drivers[i].cc_sessions != 0) |
| 314 | break; |
| 315 | if (i < crypto_drivers_num) |
| 316 | return EBUSY; |
| 317 | |
| 318 | /* kick the cryptoret thread and wait for it to exit */ |
| 319 | crypto_exit_flag = 1; |
| 320 | cv_signal(&cryptoret_cv); |
| 321 | |
| 322 | while (crypto_exit_flag != 0) |
| 323 | cv_wait(&cryptoret_cv, &crypto_ret_q_mtx); |
| 324 | mutex_spin_exit(&crypto_ret_q_mtx); |
| 325 | } |
| 326 | |
| 327 | #ifdef _MODULE |
| 328 | if (sysctl_opencrypto_clog != NULL) |
| 329 | sysctl_teardown(&sysctl_opencrypto_clog); |
| 330 | #endif |
| 331 | |
| 332 | unregister_swi(SWI_CRYPTO, cryptointr); |
| 333 | |
| 334 | if (crypto_drivers != NULL) |
| 335 | free(crypto_drivers, M_CRYPTO_DATA); |
| 336 | |
| 337 | pool_destroy(&cryptop_pool); |
| 338 | pool_destroy(&cryptodesc_pool); |
| 339 | pool_destroy(&cryptkop_pool); |
| 340 | |
| 341 | cv_destroy(&cryptoret_cv); |
| 342 | |
| 343 | mutex_destroy(&crypto_ret_q_mtx); |
| 344 | mutex_destroy(&crypto_q_mtx); |
| 345 | mutex_destroy(&crypto_mtx); |
| 346 | |
| 347 | return 0; |
| 348 | } |
| 349 | |
| 350 | /* |
| 351 | * Create a new session. Must be called with crypto_mtx held. |
| 352 | */ |
| 353 | int |
| 354 | crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) |
| 355 | { |
| 356 | struct cryptoini *cr; |
| 357 | u_int32_t hid, lid; |
| 358 | int err = EINVAL; |
| 359 | |
| 360 | mutex_enter(&crypto_mtx); |
| 361 | |
| 362 | if (crypto_drivers == NULL) |
| 363 | goto done; |
| 364 | |
| 365 | /* |
| 366 | * The algorithm we use here is pretty stupid; just use the |
| 367 | * first driver that supports all the algorithms we need. |
| 368 | * |
| 369 | * XXX We need more smarts here (in real life too, but that's |
| 370 | * XXX another story altogether). |
| 371 | */ |
| 372 | |
| 373 | for (hid = 0; hid < crypto_drivers_num; hid++) { |
| 374 | /* |
| 375 | * If it's not initialized or has remaining sessions |
| 376 | * referencing it, skip. |
| 377 | */ |
| 378 | if (crypto_drivers[hid].cc_newsession == NULL || |
| 379 | (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)) |
| 380 | continue; |
| 381 | |
| 382 | /* Hardware required -- ignore software drivers. */ |
| 383 | if (hard > 0 && |
| 384 | (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE)) |
| 385 | continue; |
| 386 | /* Software required -- ignore hardware drivers. */ |
| 387 | if (hard < 0 && |
| 388 | (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) |
| 389 | continue; |
| 390 | |
| 391 | /* See if all the algorithms are supported. */ |
| 392 | for (cr = cri; cr; cr = cr->cri_next) |
| 393 | if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0) { |
| 394 | DPRINTF(("crypto_newsession: alg %d not supported\n" , cr->cri_alg)); |
| 395 | break; |
| 396 | } |
| 397 | |
| 398 | if (cr == NULL) { |
| 399 | /* Ok, all algorithms are supported. */ |
| 400 | |
| 401 | /* |
| 402 | * Can't do everything in one session. |
| 403 | * |
| 404 | * XXX Fix this. We need to inject a "virtual" session layer right |
| 405 | * XXX about here. |
| 406 | */ |
| 407 | |
| 408 | /* Call the driver initialization routine. */ |
| 409 | lid = hid; /* Pass the driver ID. */ |
| 410 | err = crypto_drivers[hid].cc_newsession( |
| 411 | crypto_drivers[hid].cc_arg, &lid, cri); |
| 412 | if (err == 0) { |
| 413 | (*sid) = hid; |
| 414 | (*sid) <<= 32; |
| 415 | (*sid) |= (lid & 0xffffffff); |
| 416 | crypto_drivers[hid].cc_sessions++; |
| 417 | } |
| 418 | goto done; |
| 419 | /*break;*/ |
| 420 | } |
| 421 | } |
| 422 | done: |
| 423 | mutex_exit(&crypto_mtx); |
| 424 | return err; |
| 425 | } |
| 426 | |
| 427 | /* |
| 428 | * Delete an existing session (or a reserved session on an unregistered |
| 429 | * driver). Must be called with crypto_mtx mutex held. |
| 430 | */ |
| 431 | int |
| 432 | crypto_freesession(u_int64_t sid) |
| 433 | { |
| 434 | u_int32_t hid; |
| 435 | int err = 0; |
| 436 | |
| 437 | mutex_enter(&crypto_mtx); |
| 438 | |
| 439 | if (crypto_drivers == NULL) { |
| 440 | err = EINVAL; |
| 441 | goto done; |
| 442 | } |
| 443 | |
| 444 | /* Determine two IDs. */ |
| 445 | hid = CRYPTO_SESID2HID(sid); |
| 446 | |
| 447 | if (hid >= crypto_drivers_num) { |
| 448 | err = ENOENT; |
| 449 | goto done; |
| 450 | } |
| 451 | |
| 452 | if (crypto_drivers[hid].cc_sessions) |
| 453 | crypto_drivers[hid].cc_sessions--; |
| 454 | |
| 455 | /* Call the driver cleanup routine, if available. */ |
| 456 | if (crypto_drivers[hid].cc_freesession) { |
| 457 | err = crypto_drivers[hid].cc_freesession( |
| 458 | crypto_drivers[hid].cc_arg, sid); |
| 459 | } |
| 460 | else |
| 461 | err = 0; |
| 462 | |
| 463 | /* |
| 464 | * If this was the last session of a driver marked as invalid, |
| 465 | * make the entry available for reuse. |
| 466 | */ |
| 467 | if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) && |
| 468 | crypto_drivers[hid].cc_sessions == 0) |
| 469 | memset(&crypto_drivers[hid], 0, sizeof(struct cryptocap)); |
| 470 | |
| 471 | done: |
| 472 | mutex_exit(&crypto_mtx); |
| 473 | return err; |
| 474 | } |
| 475 | |
| 476 | /* |
| 477 | * Return an unused driver id. Used by drivers prior to registering |
| 478 | * support for the algorithms they handle. |
| 479 | */ |
| 480 | int32_t |
| 481 | crypto_get_driverid(u_int32_t flags) |
| 482 | { |
| 483 | struct cryptocap *newdrv; |
| 484 | int i; |
| 485 | |
| 486 | (void)crypto_init(); /* XXX oh, this is foul! */ |
| 487 | |
| 488 | mutex_enter(&crypto_mtx); |
| 489 | for (i = 0; i < crypto_drivers_num; i++) |
| 490 | if (crypto_drivers[i].cc_process == NULL && |
| 491 | (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 && |
| 492 | crypto_drivers[i].cc_sessions == 0) |
| 493 | break; |
| 494 | |
| 495 | /* Out of entries, allocate some more. */ |
| 496 | if (i == crypto_drivers_num) { |
| 497 | /* Be careful about wrap-around. */ |
| 498 | if (2 * crypto_drivers_num <= crypto_drivers_num) { |
| 499 | mutex_exit(&crypto_mtx); |
| 500 | printf("crypto: driver count wraparound!\n" ); |
| 501 | return -1; |
| 502 | } |
| 503 | |
| 504 | newdrv = malloc(2 * crypto_drivers_num * |
| 505 | sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); |
| 506 | if (newdrv == NULL) { |
| 507 | mutex_exit(&crypto_mtx); |
| 508 | printf("crypto: no space to expand driver table!\n" ); |
| 509 | return -1; |
| 510 | } |
| 511 | |
| 512 | memcpy(newdrv, crypto_drivers, |
| 513 | crypto_drivers_num * sizeof(struct cryptocap)); |
| 514 | |
| 515 | crypto_drivers_num *= 2; |
| 516 | |
| 517 | free(crypto_drivers, M_CRYPTO_DATA); |
| 518 | crypto_drivers = newdrv; |
| 519 | } |
| 520 | |
| 521 | /* NB: state is zero'd on free */ |
| 522 | crypto_drivers[i].cc_sessions = 1; /* Mark */ |
| 523 | crypto_drivers[i].cc_flags = flags; |
| 524 | |
| 525 | if (bootverbose) |
| 526 | printf("crypto: assign driver %u, flags %u\n" , i, flags); |
| 527 | |
| 528 | mutex_exit(&crypto_mtx); |
| 529 | |
| 530 | return i; |
| 531 | } |
| 532 | |
| 533 | static struct cryptocap * |
| 534 | crypto_checkdriver(u_int32_t hid) |
| 535 | { |
| 536 | if (crypto_drivers == NULL) |
| 537 | return NULL; |
| 538 | return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); |
| 539 | } |
| 540 | |
| 541 | /* |
| 542 | * Register support for a key-related algorithm. This routine |
| 543 | * is called once for each algorithm supported a driver. |
| 544 | */ |
| 545 | int |
| 546 | crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags, |
| 547 | int (*kprocess)(void *, struct cryptkop *, int), |
| 548 | void *karg) |
| 549 | { |
| 550 | struct cryptocap *cap; |
| 551 | int err; |
| 552 | |
| 553 | mutex_enter(&crypto_mtx); |
| 554 | |
| 555 | cap = crypto_checkdriver(driverid); |
| 556 | if (cap != NULL && |
| 557 | (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { |
| 558 | /* |
| 559 | * XXX Do some performance testing to determine placing. |
| 560 | * XXX We probably need an auxiliary data structure that |
| 561 | * XXX describes relative performances. |
| 562 | */ |
| 563 | |
| 564 | cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; |
| 565 | if (bootverbose) { |
| 566 | printf("crypto: driver %u registers key alg %u " |
| 567 | " flags %u\n" , |
| 568 | driverid, |
| 569 | kalg, |
| 570 | flags |
| 571 | ); |
| 572 | } |
| 573 | |
| 574 | if (cap->cc_kprocess == NULL) { |
| 575 | cap->cc_karg = karg; |
| 576 | cap->cc_kprocess = kprocess; |
| 577 | } |
| 578 | err = 0; |
| 579 | } else |
| 580 | err = EINVAL; |
| 581 | |
| 582 | mutex_exit(&crypto_mtx); |
| 583 | return err; |
| 584 | } |
| 585 | |
| 586 | /* |
| 587 | * Register support for a non-key-related algorithm. This routine |
| 588 | * is called once for each such algorithm supported by a driver. |
| 589 | */ |
| 590 | int |
| 591 | crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, |
| 592 | u_int32_t flags, |
| 593 | int (*newses)(void *, u_int32_t*, struct cryptoini*), |
| 594 | int (*freeses)(void *, u_int64_t), |
| 595 | int (*process)(void *, struct cryptop *, int), |
| 596 | void *arg) |
| 597 | { |
| 598 | struct cryptocap *cap; |
| 599 | int err; |
| 600 | |
| 601 | mutex_enter(&crypto_mtx); |
| 602 | |
| 603 | cap = crypto_checkdriver(driverid); |
| 604 | /* NB: algorithms are in the range [1..max] */ |
| 605 | if (cap != NULL && |
| 606 | (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { |
| 607 | /* |
| 608 | * XXX Do some performance testing to determine placing. |
| 609 | * XXX We probably need an auxiliary data structure that |
| 610 | * XXX describes relative performances. |
| 611 | */ |
| 612 | |
| 613 | cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; |
| 614 | cap->cc_max_op_len[alg] = maxoplen; |
| 615 | if (bootverbose) { |
| 616 | printf("crypto: driver %u registers alg %u " |
| 617 | "flags %u maxoplen %u\n" , |
| 618 | driverid, |
| 619 | alg, |
| 620 | flags, |
| 621 | maxoplen |
| 622 | ); |
| 623 | } |
| 624 | |
| 625 | if (cap->cc_process == NULL) { |
| 626 | cap->cc_arg = arg; |
| 627 | cap->cc_newsession = newses; |
| 628 | cap->cc_process = process; |
| 629 | cap->cc_freesession = freeses; |
| 630 | cap->cc_sessions = 0; /* Unmark */ |
| 631 | } |
| 632 | err = 0; |
| 633 | } else |
| 634 | err = EINVAL; |
| 635 | |
| 636 | mutex_exit(&crypto_mtx); |
| 637 | return err; |
| 638 | } |
| 639 | |
| 640 | /* |
| 641 | * Unregister a crypto driver. If there are pending sessions using it, |
| 642 | * leave enough information around so that subsequent calls using those |
| 643 | * sessions will correctly detect the driver has been unregistered and |
| 644 | * reroute requests. |
| 645 | */ |
| 646 | int |
| 647 | crypto_unregister(u_int32_t driverid, int alg) |
| 648 | { |
| 649 | int i, err; |
| 650 | u_int32_t ses; |
| 651 | struct cryptocap *cap; |
| 652 | |
| 653 | mutex_enter(&crypto_mtx); |
| 654 | |
| 655 | cap = crypto_checkdriver(driverid); |
| 656 | if (cap != NULL && |
| 657 | (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && |
| 658 | cap->cc_alg[alg] != 0) { |
| 659 | cap->cc_alg[alg] = 0; |
| 660 | cap->cc_max_op_len[alg] = 0; |
| 661 | |
| 662 | /* Was this the last algorithm ? */ |
| 663 | for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) |
| 664 | if (cap->cc_alg[i] != 0) |
| 665 | break; |
| 666 | |
| 667 | if (i == CRYPTO_ALGORITHM_MAX + 1) { |
| 668 | ses = cap->cc_sessions; |
| 669 | memset(cap, 0, sizeof(struct cryptocap)); |
| 670 | if (ses != 0) { |
| 671 | /* |
| 672 | * If there are pending sessions, just mark as invalid. |
| 673 | */ |
| 674 | cap->cc_flags |= CRYPTOCAP_F_CLEANUP; |
| 675 | cap->cc_sessions = ses; |
| 676 | } |
| 677 | } |
| 678 | err = 0; |
| 679 | } else |
| 680 | err = EINVAL; |
| 681 | |
| 682 | mutex_exit(&crypto_mtx); |
| 683 | return err; |
| 684 | } |
| 685 | |
| 686 | /* |
| 687 | * Unregister all algorithms associated with a crypto driver. |
| 688 | * If there are pending sessions using it, leave enough information |
| 689 | * around so that subsequent calls using those sessions will |
| 690 | * correctly detect the driver has been unregistered and reroute |
| 691 | * requests. |
| 692 | * |
| 693 | * XXX careful. Don't change this to call crypto_unregister() for each |
| 694 | * XXX registered algorithm unless you drop the mutex across the calls; |
| 695 | * XXX you can't take it recursively. |
| 696 | */ |
| 697 | int |
| 698 | crypto_unregister_all(u_int32_t driverid) |
| 699 | { |
| 700 | int i, err; |
| 701 | u_int32_t ses; |
| 702 | struct cryptocap *cap; |
| 703 | |
| 704 | mutex_enter(&crypto_mtx); |
| 705 | cap = crypto_checkdriver(driverid); |
| 706 | if (cap != NULL) { |
| 707 | for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) { |
| 708 | cap->cc_alg[i] = 0; |
| 709 | cap->cc_max_op_len[i] = 0; |
| 710 | } |
| 711 | ses = cap->cc_sessions; |
| 712 | memset(cap, 0, sizeof(struct cryptocap)); |
| 713 | if (ses != 0) { |
| 714 | /* |
| 715 | * If there are pending sessions, just mark as invalid. |
| 716 | */ |
| 717 | cap->cc_flags |= CRYPTOCAP_F_CLEANUP; |
| 718 | cap->cc_sessions = ses; |
| 719 | } |
| 720 | err = 0; |
| 721 | } else |
| 722 | err = EINVAL; |
| 723 | |
| 724 | mutex_exit(&crypto_mtx); |
| 725 | return err; |
| 726 | } |
| 727 | |
| 728 | /* |
| 729 | * Clear blockage on a driver. The what parameter indicates whether |
| 730 | * the driver is now ready for cryptop's and/or cryptokop's. |
| 731 | */ |
| 732 | int |
| 733 | crypto_unblock(u_int32_t driverid, int what) |
| 734 | { |
| 735 | struct cryptocap *cap; |
| 736 | int needwakeup, err; |
| 737 | |
| 738 | mutex_spin_enter(&crypto_q_mtx); |
| 739 | cap = crypto_checkdriver(driverid); |
| 740 | if (cap != NULL) { |
| 741 | needwakeup = 0; |
| 742 | if (what & CRYPTO_SYMQ) { |
| 743 | needwakeup |= cap->cc_qblocked; |
| 744 | cap->cc_qblocked = 0; |
| 745 | } |
| 746 | if (what & CRYPTO_ASYMQ) { |
| 747 | needwakeup |= cap->cc_kqblocked; |
| 748 | cap->cc_kqblocked = 0; |
| 749 | } |
| 750 | err = 0; |
| 751 | if (needwakeup) |
| 752 | setsoftcrypto(softintr_cookie); |
| 753 | mutex_spin_exit(&crypto_q_mtx); |
| 754 | } else { |
| 755 | err = EINVAL; |
| 756 | mutex_spin_exit(&crypto_q_mtx); |
| 757 | } |
| 758 | |
| 759 | return err; |
| 760 | } |
| 761 | |
| 762 | /* |
| 763 | * Dispatch a crypto request to a driver or queue |
| 764 | * it, to be processed by the kernel thread. |
| 765 | */ |
| 766 | int |
| 767 | crypto_dispatch(struct cryptop *crp) |
| 768 | { |
| 769 | u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid); |
| 770 | int result; |
| 771 | |
| 772 | mutex_spin_enter(&crypto_q_mtx); |
| 773 | DPRINTF(("crypto_dispatch: crp %p, alg %d\n" , |
| 774 | crp, crp->crp_desc->crd_alg)); |
| 775 | |
| 776 | cryptostats.cs_ops++; |
| 777 | |
| 778 | #ifdef CRYPTO_TIMING |
| 779 | if (crypto_timing) |
| 780 | nanouptime(&crp->crp_tstamp); |
| 781 | #endif |
| 782 | if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { |
| 783 | struct cryptocap *cap; |
| 784 | /* |
| 785 | * Caller marked the request to be processed |
| 786 | * immediately; dispatch it directly to the |
| 787 | * driver unless the driver is currently blocked. |
| 788 | */ |
| 789 | cap = crypto_checkdriver(hid); |
| 790 | if (cap && !cap->cc_qblocked) { |
| 791 | mutex_spin_exit(&crypto_q_mtx); |
| 792 | result = crypto_invoke(crp, 0); |
| 793 | if (result == ERESTART) { |
| 794 | /* |
| 795 | * The driver ran out of resources, mark the |
| 796 | * driver ``blocked'' for cryptop's and put |
| 797 | * the op on the queue. |
| 798 | */ |
| 799 | mutex_spin_enter(&crypto_q_mtx); |
| 800 | crypto_drivers[hid].cc_qblocked = 1; |
| 801 | TAILQ_INSERT_HEAD(&crp_q, crp, crp_next); |
| 802 | cryptostats.cs_blocks++; |
| 803 | mutex_spin_exit(&crypto_q_mtx); |
| 804 | } |
| 805 | goto out_released; |
| 806 | } else { |
| 807 | /* |
| 808 | * The driver is blocked, just queue the op until |
| 809 | * it unblocks and the swi thread gets kicked. |
| 810 | */ |
| 811 | TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); |
| 812 | result = 0; |
| 813 | } |
| 814 | } else { |
| 815 | int wasempty = TAILQ_EMPTY(&crp_q); |
| 816 | /* |
| 817 | * Caller marked the request as ``ok to delay''; |
| 818 | * queue it for the swi thread. This is desirable |
| 819 | * when the operation is low priority and/or suitable |
| 820 | * for batching. |
| 821 | */ |
| 822 | TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); |
| 823 | if (wasempty) { |
| 824 | setsoftcrypto(softintr_cookie); |
| 825 | mutex_spin_exit(&crypto_q_mtx); |
| 826 | result = 0; |
| 827 | goto out_released; |
| 828 | } |
| 829 | |
| 830 | result = 0; |
| 831 | } |
| 832 | |
| 833 | mutex_spin_exit(&crypto_q_mtx); |
| 834 | out_released: |
| 835 | return result; |
| 836 | } |
| 837 | |
| 838 | /* |
| 839 | * Add an asymetric crypto request to a queue, |
| 840 | * to be processed by the kernel thread. |
| 841 | */ |
| 842 | int |
| 843 | crypto_kdispatch(struct cryptkop *krp) |
| 844 | { |
| 845 | struct cryptocap *cap; |
| 846 | int result; |
| 847 | |
| 848 | mutex_spin_enter(&crypto_q_mtx); |
| 849 | cryptostats.cs_kops++; |
| 850 | |
| 851 | cap = crypto_checkdriver(krp->krp_hid); |
| 852 | if (cap && !cap->cc_kqblocked) { |
| 853 | mutex_spin_exit(&crypto_q_mtx); |
| 854 | result = crypto_kinvoke(krp, 0); |
| 855 | if (result == ERESTART) { |
| 856 | /* |
| 857 | * The driver ran out of resources, mark the |
| 858 | * driver ``blocked'' for cryptop's and put |
| 859 | * the op on the queue. |
| 860 | */ |
| 861 | mutex_spin_enter(&crypto_q_mtx); |
| 862 | crypto_drivers[krp->krp_hid].cc_kqblocked = 1; |
| 863 | TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); |
| 864 | cryptostats.cs_kblocks++; |
| 865 | mutex_spin_exit(&crypto_q_mtx); |
| 866 | } |
| 867 | } else { |
| 868 | /* |
| 869 | * The driver is blocked, just queue the op until |
| 870 | * it unblocks and the swi thread gets kicked. |
| 871 | */ |
| 872 | TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); |
| 873 | result = 0; |
| 874 | mutex_spin_exit(&crypto_q_mtx); |
| 875 | } |
| 876 | |
| 877 | return result; |
| 878 | } |
| 879 | |
| 880 | /* |
| 881 | * Dispatch an assymetric crypto request to the appropriate crypto devices. |
| 882 | */ |
| 883 | static int |
| 884 | crypto_kinvoke(struct cryptkop *krp, int hint) |
| 885 | { |
| 886 | u_int32_t hid; |
| 887 | int error; |
| 888 | |
| 889 | /* Sanity checks. */ |
| 890 | if (krp == NULL) |
| 891 | return EINVAL; |
| 892 | if (krp->krp_callback == NULL) { |
| 893 | cv_destroy(&krp->krp_cv); |
| 894 | pool_put(&cryptkop_pool, krp); |
| 895 | return EINVAL; |
| 896 | } |
| 897 | |
| 898 | mutex_enter(&crypto_mtx); |
| 899 | for (hid = 0; hid < crypto_drivers_num; hid++) { |
| 900 | if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && |
| 901 | crypto_devallowsoft == 0) |
| 902 | continue; |
| 903 | if (crypto_drivers[hid].cc_kprocess == NULL) |
| 904 | continue; |
| 905 | if ((crypto_drivers[hid].cc_kalg[krp->krp_op] & |
| 906 | CRYPTO_ALG_FLAG_SUPPORTED) == 0) |
| 907 | continue; |
| 908 | break; |
| 909 | } |
| 910 | if (hid < crypto_drivers_num) { |
| 911 | int (*process)(void *, struct cryptkop *, int); |
| 912 | void *arg; |
| 913 | |
| 914 | process = crypto_drivers[hid].cc_kprocess; |
| 915 | arg = crypto_drivers[hid].cc_karg; |
| 916 | mutex_exit(&crypto_mtx); |
| 917 | krp->krp_hid = hid; |
| 918 | error = (*process)(arg, krp, hint); |
| 919 | } else { |
| 920 | mutex_exit(&crypto_mtx); |
| 921 | error = ENODEV; |
| 922 | } |
| 923 | |
| 924 | if (error) { |
| 925 | krp->krp_status = error; |
| 926 | crypto_kdone(krp); |
| 927 | } |
| 928 | return 0; |
| 929 | } |
| 930 | |
| 931 | #ifdef CRYPTO_TIMING |
| 932 | static void |
| 933 | crypto_tstat(struct cryptotstat *ts, struct timespec *tv) |
| 934 | { |
| 935 | struct timespec now, t; |
| 936 | |
| 937 | nanouptime(&now); |
| 938 | t.tv_sec = now.tv_sec - tv->tv_sec; |
| 939 | t.tv_nsec = now.tv_nsec - tv->tv_nsec; |
| 940 | if (t.tv_nsec < 0) { |
| 941 | t.tv_sec--; |
| 942 | t.tv_nsec += 1000000000; |
| 943 | } |
| 944 | timespecadd(&ts->acc, &t, &t); |
| 945 | if (timespeccmp(&t, &ts->min, <)) |
| 946 | ts->min = t; |
| 947 | if (timespeccmp(&t, &ts->max, >)) |
| 948 | ts->max = t; |
| 949 | ts->count++; |
| 950 | |
| 951 | *tv = now; |
| 952 | } |
| 953 | #endif |
| 954 | |
| 955 | /* |
| 956 | * Dispatch a crypto request to the appropriate crypto devices. |
| 957 | */ |
| 958 | static int |
| 959 | crypto_invoke(struct cryptop *crp, int hint) |
| 960 | { |
| 961 | u_int32_t hid; |
| 962 | |
| 963 | #ifdef CRYPTO_TIMING |
| 964 | if (crypto_timing) |
| 965 | crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); |
| 966 | #endif |
| 967 | /* Sanity checks. */ |
| 968 | if (crp == NULL) |
| 969 | return EINVAL; |
| 970 | if (crp->crp_callback == NULL) { |
| 971 | return EINVAL; |
| 972 | } |
| 973 | if (crp->crp_desc == NULL) { |
| 974 | crp->crp_etype = EINVAL; |
| 975 | crypto_done(crp); |
| 976 | return 0; |
| 977 | } |
| 978 | |
| 979 | hid = CRYPTO_SESID2HID(crp->crp_sid); |
| 980 | |
| 981 | if (hid < crypto_drivers_num) { |
| 982 | int (*process)(void *, struct cryptop *, int); |
| 983 | void *arg; |
| 984 | |
| 985 | if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) { |
| 986 | mutex_exit(&crypto_mtx); |
| 987 | crypto_freesession(crp->crp_sid); |
| 988 | mutex_enter(&crypto_mtx); |
| 989 | } |
| 990 | process = crypto_drivers[hid].cc_process; |
| 991 | arg = crypto_drivers[hid].cc_arg; |
| 992 | |
| 993 | /* |
| 994 | * Invoke the driver to process the request. |
| 995 | */ |
| 996 | DPRINTF(("calling process for %p\n" , crp)); |
| 997 | return (*process)(arg, crp, hint); |
| 998 | } else { |
| 999 | struct cryptodesc *crd; |
| 1000 | u_int64_t nid = 0; |
| 1001 | |
| 1002 | /* |
| 1003 | * Driver has unregistered; migrate the session and return |
| 1004 | * an error to the caller so they'll resubmit the op. |
| 1005 | */ |
| 1006 | for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) |
| 1007 | crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); |
| 1008 | |
| 1009 | if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) |
| 1010 | crp->crp_sid = nid; |
| 1011 | |
| 1012 | crp->crp_etype = EAGAIN; |
| 1013 | |
| 1014 | crypto_done(crp); |
| 1015 | return 0; |
| 1016 | } |
| 1017 | } |
| 1018 | |
| 1019 | /* |
| 1020 | * Release a set of crypto descriptors. |
| 1021 | */ |
| 1022 | void |
| 1023 | crypto_freereq(struct cryptop *crp) |
| 1024 | { |
| 1025 | struct cryptodesc *crd; |
| 1026 | |
| 1027 | if (crp == NULL) |
| 1028 | return; |
| 1029 | DPRINTF(("crypto_freereq[%u]: crp %p\n" , |
| 1030 | CRYPTO_SESID2LID(crp->crp_sid), crp)); |
| 1031 | |
| 1032 | /* sanity check */ |
| 1033 | if (crp->crp_flags & CRYPTO_F_ONRETQ) { |
| 1034 | panic("crypto_freereq() freeing crp on RETQ\n" ); |
| 1035 | } |
| 1036 | |
| 1037 | while ((crd = crp->crp_desc) != NULL) { |
| 1038 | crp->crp_desc = crd->crd_next; |
| 1039 | pool_put(&cryptodesc_pool, crd); |
| 1040 | } |
| 1041 | pool_put(&cryptop_pool, crp); |
| 1042 | } |
| 1043 | |
| 1044 | /* |
| 1045 | * Acquire a set of crypto descriptors. |
| 1046 | */ |
| 1047 | struct cryptop * |
| 1048 | crypto_getreq(int num) |
| 1049 | { |
| 1050 | struct cryptodesc *crd; |
| 1051 | struct cryptop *crp; |
| 1052 | |
| 1053 | crp = pool_get(&cryptop_pool, 0); |
| 1054 | if (crp == NULL) { |
| 1055 | return NULL; |
| 1056 | } |
| 1057 | memset(crp, 0, sizeof(struct cryptop)); |
| 1058 | |
| 1059 | while (num--) { |
| 1060 | crd = pool_get(&cryptodesc_pool, 0); |
| 1061 | if (crd == NULL) { |
| 1062 | crypto_freereq(crp); |
| 1063 | return NULL; |
| 1064 | } |
| 1065 | |
| 1066 | memset(crd, 0, sizeof(struct cryptodesc)); |
| 1067 | crd->crd_next = crp->crp_desc; |
| 1068 | crp->crp_desc = crd; |
| 1069 | } |
| 1070 | |
| 1071 | return crp; |
| 1072 | } |
| 1073 | |
| 1074 | /* |
| 1075 | * Invoke the callback on behalf of the driver. |
| 1076 | */ |
| 1077 | void |
| 1078 | crypto_done(struct cryptop *crp) |
| 1079 | { |
| 1080 | int wasempty; |
| 1081 | |
| 1082 | if (crp->crp_etype != 0) |
| 1083 | cryptostats.cs_errs++; |
| 1084 | #ifdef CRYPTO_TIMING |
| 1085 | if (crypto_timing) |
| 1086 | crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); |
| 1087 | #endif |
| 1088 | DPRINTF(("crypto_done[%u]: crp %p\n" , |
| 1089 | CRYPTO_SESID2LID(crp->crp_sid), crp)); |
| 1090 | |
| 1091 | /* |
| 1092 | * Normal case; queue the callback for the thread. |
| 1093 | * |
| 1094 | * The return queue is manipulated by the swi thread |
| 1095 | * and, potentially, by crypto device drivers calling |
| 1096 | * back to mark operations completed. Thus we need |
| 1097 | * to mask both while manipulating the return queue. |
| 1098 | */ |
| 1099 | if (crp->crp_flags & CRYPTO_F_CBIMM) { |
| 1100 | /* |
| 1101 | * Do the callback directly. This is ok when the |
| 1102 | * callback routine does very little (e.g. the |
| 1103 | * /dev/crypto callback method just does a wakeup). |
| 1104 | */ |
| 1105 | mutex_spin_enter(&crypto_ret_q_mtx); |
| 1106 | crp->crp_flags |= CRYPTO_F_DONE; |
| 1107 | mutex_spin_exit(&crypto_ret_q_mtx); |
| 1108 | |
| 1109 | #ifdef CRYPTO_TIMING |
| 1110 | if (crypto_timing) { |
| 1111 | /* |
| 1112 | * NB: We must copy the timestamp before |
| 1113 | * doing the callback as the cryptop is |
| 1114 | * likely to be reclaimed. |
| 1115 | */ |
| 1116 | struct timespec t = crp->crp_tstamp; |
| 1117 | crypto_tstat(&cryptostats.cs_cb, &t); |
| 1118 | crp->crp_callback(crp); |
| 1119 | crypto_tstat(&cryptostats.cs_finis, &t); |
| 1120 | } else |
| 1121 | #endif |
| 1122 | crp->crp_callback(crp); |
| 1123 | } else { |
| 1124 | mutex_spin_enter(&crypto_ret_q_mtx); |
| 1125 | crp->crp_flags |= CRYPTO_F_DONE; |
| 1126 | |
| 1127 | if (crp->crp_flags & CRYPTO_F_USER) { |
| 1128 | /* the request has completed while |
| 1129 | * running in the user context |
| 1130 | * so don't queue it - the user |
| 1131 | * thread won't sleep when it sees |
| 1132 | * the CRYPTO_F_DONE flag. |
| 1133 | * This is an optimization to avoid |
| 1134 | * unecessary context switches. |
| 1135 | */ |
| 1136 | DPRINTF(("crypto_done[%u]: crp %p CRYPTO_F_USER\n" , |
| 1137 | CRYPTO_SESID2LID(crp->crp_sid), crp)); |
| 1138 | } else { |
| 1139 | wasempty = TAILQ_EMPTY(&crp_ret_q); |
| 1140 | DPRINTF(("crypto_done[%u]: queueing %p\n" , |
| 1141 | CRYPTO_SESID2LID(crp->crp_sid), crp)); |
| 1142 | crp->crp_flags |= CRYPTO_F_ONRETQ; |
| 1143 | TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next); |
| 1144 | if (wasempty) { |
| 1145 | DPRINTF(("crypto_done[%u]: waking cryptoret, " |
| 1146 | "crp %p hit empty queue\n." , |
| 1147 | CRYPTO_SESID2LID(crp->crp_sid), crp)); |
| 1148 | cv_signal(&cryptoret_cv); |
| 1149 | } |
| 1150 | } |
| 1151 | mutex_spin_exit(&crypto_ret_q_mtx); |
| 1152 | } |
| 1153 | } |
| 1154 | |
| 1155 | /* |
| 1156 | * Invoke the callback on behalf of the driver. |
| 1157 | */ |
| 1158 | void |
| 1159 | crypto_kdone(struct cryptkop *krp) |
| 1160 | { |
| 1161 | int wasempty; |
| 1162 | |
| 1163 | if (krp->krp_status != 0) |
| 1164 | cryptostats.cs_kerrs++; |
| 1165 | |
| 1166 | krp->krp_flags |= CRYPTO_F_DONE; |
| 1167 | |
| 1168 | /* |
| 1169 | * The return queue is manipulated by the swi thread |
| 1170 | * and, potentially, by crypto device drivers calling |
| 1171 | * back to mark operations completed. Thus we need |
| 1172 | * to mask both while manipulating the return queue. |
| 1173 | */ |
| 1174 | if (krp->krp_flags & CRYPTO_F_CBIMM) { |
| 1175 | krp->krp_callback(krp); |
| 1176 | } else { |
| 1177 | mutex_spin_enter(&crypto_ret_q_mtx); |
| 1178 | wasempty = TAILQ_EMPTY(&crp_ret_kq); |
| 1179 | krp->krp_flags |= CRYPTO_F_ONRETQ; |
| 1180 | TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next); |
| 1181 | if (wasempty) |
| 1182 | cv_signal(&cryptoret_cv); |
| 1183 | mutex_spin_exit(&crypto_ret_q_mtx); |
| 1184 | } |
| 1185 | } |
| 1186 | |
| 1187 | int |
| 1188 | crypto_getfeat(int *featp) |
| 1189 | { |
| 1190 | int hid, kalg, feat = 0; |
| 1191 | |
| 1192 | mutex_enter(&crypto_mtx); |
| 1193 | |
| 1194 | if (crypto_userasymcrypto == 0) |
| 1195 | goto out; |
| 1196 | |
| 1197 | for (hid = 0; hid < crypto_drivers_num; hid++) { |
| 1198 | if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && |
| 1199 | crypto_devallowsoft == 0) { |
| 1200 | continue; |
| 1201 | } |
| 1202 | if (crypto_drivers[hid].cc_kprocess == NULL) |
| 1203 | continue; |
| 1204 | for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) |
| 1205 | if ((crypto_drivers[hid].cc_kalg[kalg] & |
| 1206 | CRYPTO_ALG_FLAG_SUPPORTED) != 0) |
| 1207 | feat |= 1 << kalg; |
| 1208 | } |
| 1209 | out: |
| 1210 | mutex_exit(&crypto_mtx); |
| 1211 | *featp = feat; |
| 1212 | return (0); |
| 1213 | } |
| 1214 | |
| 1215 | /* |
| 1216 | * Software interrupt thread to dispatch crypto requests. |
| 1217 | */ |
| 1218 | static void |
| 1219 | cryptointr(void) |
| 1220 | { |
| 1221 | struct cryptop *crp, *submit, *cnext; |
| 1222 | struct cryptkop *krp, *knext; |
| 1223 | struct cryptocap *cap; |
| 1224 | int result, hint; |
| 1225 | |
| 1226 | cryptostats.cs_intrs++; |
| 1227 | mutex_spin_enter(&crypto_q_mtx); |
| 1228 | do { |
| 1229 | /* |
| 1230 | * Find the first element in the queue that can be |
| 1231 | * processed and look-ahead to see if multiple ops |
| 1232 | * are ready for the same driver. |
| 1233 | */ |
| 1234 | submit = NULL; |
| 1235 | hint = 0; |
| 1236 | TAILQ_FOREACH_SAFE(crp, &crp_q, crp_next, cnext) { |
| 1237 | u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid); |
| 1238 | cap = crypto_checkdriver(hid); |
| 1239 | if (cap == NULL || cap->cc_process == NULL) { |
| 1240 | /* Op needs to be migrated, process it. */ |
| 1241 | if (submit == NULL) |
| 1242 | submit = crp; |
| 1243 | break; |
| 1244 | } |
| 1245 | if (!cap->cc_qblocked) { |
| 1246 | if (submit != NULL) { |
| 1247 | /* |
| 1248 | * We stop on finding another op, |
| 1249 | * regardless whether its for the same |
| 1250 | * driver or not. We could keep |
| 1251 | * searching the queue but it might be |
| 1252 | * better to just use a per-driver |
| 1253 | * queue instead. |
| 1254 | */ |
| 1255 | if (CRYPTO_SESID2HID(submit->crp_sid) |
| 1256 | == hid) |
| 1257 | hint = CRYPTO_HINT_MORE; |
| 1258 | break; |
| 1259 | } else { |
| 1260 | submit = crp; |
| 1261 | if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) |
| 1262 | break; |
| 1263 | /* keep scanning for more are q'd */ |
| 1264 | } |
| 1265 | } |
| 1266 | } |
| 1267 | if (submit != NULL) { |
| 1268 | TAILQ_REMOVE(&crp_q, submit, crp_next); |
| 1269 | mutex_spin_exit(&crypto_q_mtx); |
| 1270 | result = crypto_invoke(submit, hint); |
| 1271 | /* we must take here as the TAILQ op or kinvoke |
| 1272 | may need this mutex below. sigh. */ |
| 1273 | mutex_spin_enter(&crypto_q_mtx); |
| 1274 | if (result == ERESTART) { |
| 1275 | /* |
| 1276 | * The driver ran out of resources, mark the |
| 1277 | * driver ``blocked'' for cryptop's and put |
| 1278 | * the request back in the queue. It would |
| 1279 | * best to put the request back where we got |
| 1280 | * it but that's hard so for now we put it |
| 1281 | * at the front. This should be ok; putting |
| 1282 | * it at the end does not work. |
| 1283 | */ |
| 1284 | /* XXX validate sid again? */ |
| 1285 | crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1; |
| 1286 | TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); |
| 1287 | cryptostats.cs_blocks++; |
| 1288 | } |
| 1289 | } |
| 1290 | |
| 1291 | /* As above, but for key ops */ |
| 1292 | TAILQ_FOREACH_SAFE(krp, &crp_kq, krp_next, knext) { |
| 1293 | cap = crypto_checkdriver(krp->krp_hid); |
| 1294 | if (cap == NULL || cap->cc_kprocess == NULL) { |
| 1295 | /* Op needs to be migrated, process it. */ |
| 1296 | break; |
| 1297 | } |
| 1298 | if (!cap->cc_kqblocked) |
| 1299 | break; |
| 1300 | } |
| 1301 | if (krp != NULL) { |
| 1302 | TAILQ_REMOVE(&crp_kq, krp, krp_next); |
| 1303 | mutex_spin_exit(&crypto_q_mtx); |
| 1304 | result = crypto_kinvoke(krp, 0); |
| 1305 | /* the next iteration will want the mutex. :-/ */ |
| 1306 | mutex_spin_enter(&crypto_q_mtx); |
| 1307 | if (result == ERESTART) { |
| 1308 | /* |
| 1309 | * The driver ran out of resources, mark the |
| 1310 | * driver ``blocked'' for cryptkop's and put |
| 1311 | * the request back in the queue. It would |
| 1312 | * best to put the request back where we got |
| 1313 | * it but that's hard so for now we put it |
| 1314 | * at the front. This should be ok; putting |
| 1315 | * it at the end does not work. |
| 1316 | */ |
| 1317 | /* XXX validate sid again? */ |
| 1318 | crypto_drivers[krp->krp_hid].cc_kqblocked = 1; |
| 1319 | TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); |
| 1320 | cryptostats.cs_kblocks++; |
| 1321 | } |
| 1322 | } |
| 1323 | } while (submit != NULL || krp != NULL); |
| 1324 | mutex_spin_exit(&crypto_q_mtx); |
| 1325 | } |
| 1326 | |
| 1327 | /* |
| 1328 | * Kernel thread to do callbacks. |
| 1329 | */ |
| 1330 | static void |
| 1331 | cryptoret(void) |
| 1332 | { |
| 1333 | struct cryptop *crp; |
| 1334 | struct cryptkop *krp; |
| 1335 | |
| 1336 | mutex_spin_enter(&crypto_ret_q_mtx); |
| 1337 | for (;;) { |
| 1338 | crp = TAILQ_FIRST(&crp_ret_q); |
| 1339 | if (crp != NULL) { |
| 1340 | TAILQ_REMOVE(&crp_ret_q, crp, crp_next); |
| 1341 | crp->crp_flags &= ~CRYPTO_F_ONRETQ; |
| 1342 | } |
| 1343 | krp = TAILQ_FIRST(&crp_ret_kq); |
| 1344 | if (krp != NULL) { |
| 1345 | TAILQ_REMOVE(&crp_ret_kq, krp, krp_next); |
| 1346 | krp->krp_flags &= ~CRYPTO_F_ONRETQ; |
| 1347 | } |
| 1348 | |
| 1349 | /* drop before calling any callbacks. */ |
| 1350 | if (crp == NULL && krp == NULL) { |
| 1351 | |
| 1352 | /* Check for the exit condition. */ |
| 1353 | if (crypto_exit_flag != 0) { |
| 1354 | |
| 1355 | /* Time to die. */ |
| 1356 | crypto_exit_flag = 0; |
| 1357 | cv_broadcast(&cryptoret_cv); |
| 1358 | mutex_spin_exit(&crypto_ret_q_mtx); |
| 1359 | kthread_exit(0); |
| 1360 | } |
| 1361 | |
| 1362 | cryptostats.cs_rets++; |
| 1363 | cv_wait(&cryptoret_cv, &crypto_ret_q_mtx); |
| 1364 | continue; |
| 1365 | } |
| 1366 | |
| 1367 | mutex_spin_exit(&crypto_ret_q_mtx); |
| 1368 | |
| 1369 | if (crp != NULL) { |
| 1370 | #ifdef CRYPTO_TIMING |
| 1371 | if (crypto_timing) { |
| 1372 | /* |
| 1373 | * NB: We must copy the timestamp before |
| 1374 | * doing the callback as the cryptop is |
| 1375 | * likely to be reclaimed. |
| 1376 | */ |
| 1377 | struct timespec t = crp->crp_tstamp; |
| 1378 | crypto_tstat(&cryptostats.cs_cb, &t); |
| 1379 | crp->crp_callback(crp); |
| 1380 | crypto_tstat(&cryptostats.cs_finis, &t); |
| 1381 | } else |
| 1382 | #endif |
| 1383 | { |
| 1384 | crp->crp_callback(crp); |
| 1385 | } |
| 1386 | } |
| 1387 | if (krp != NULL) |
| 1388 | krp->krp_callback(krp); |
| 1389 | |
| 1390 | mutex_spin_enter(&crypto_ret_q_mtx); |
| 1391 | } |
| 1392 | } |
| 1393 | |
| 1394 | /* NetBSD module interface */ |
| 1395 | |
| 1396 | MODULE(MODULE_CLASS_MISC, opencrypto, NULL); |
| 1397 | |
| 1398 | static int |
| 1399 | opencrypto_modcmd(modcmd_t cmd, void *opaque) |
| 1400 | { |
| 1401 | int error = 0; |
| 1402 | |
| 1403 | switch (cmd) { |
| 1404 | case MODULE_CMD_INIT: |
| 1405 | #ifdef _MODULE |
| 1406 | error = crypto_init(); |
| 1407 | #endif |
| 1408 | break; |
| 1409 | case MODULE_CMD_FINI: |
| 1410 | #ifdef _MODULE |
| 1411 | error = crypto_destroy(true); |
| 1412 | #endif |
| 1413 | break; |
| 1414 | default: |
| 1415 | error = ENOTTY; |
| 1416 | } |
| 1417 | return error; |
| 1418 | } |
| 1419 | |