usched: Allow process to change self cpu affinity
[dragonfly.git] / lib / libdmsg / crypto.c
blobda28e4fb29fca883626472b09b0620ab16067d9f
1 /*
2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Alex Hornung <alexh@dragonflybsd.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 #include "dmsg_local.h"
40 * Setup crypto for pthreads
42 static pthread_mutex_t *crypto_locks;
43 int crypto_count;
45 static int dmsg_crypto_gcm_init(dmsg_ioq_t *, char *, int, char *, int, int);
46 static int dmsg_crypto_gcm_encrypt_chunk(dmsg_ioq_t *, char *, char *, int, int *);
47 static int dmsg_crypto_gcm_decrypt_chunk(dmsg_ioq_t *, char *, char *, int, int *);
50 * NOTE: the order of this table needs to match the DMSG_CRYPTO_ALGO_*_IDX
51 * defines in network.h.
53 static struct crypto_algo crypto_algos[] = {
55 .name = "aes-256-gcm",
56 .keylen = DMSG_CRYPTO_GCM_KEY_SIZE,
57 .taglen = DMSG_CRYPTO_GCM_TAG_SIZE,
58 .init = dmsg_crypto_gcm_init,
59 .enc_chunk = dmsg_crypto_gcm_encrypt_chunk,
60 .dec_chunk = dmsg_crypto_gcm_decrypt_chunk
62 { NULL, 0, 0, NULL, NULL, NULL }
65 static
66 unsigned long
67 dmsg_crypto_id_callback(void)
69 return ((unsigned long)(uintptr_t)pthread_self());
72 static
73 void
74 dmsg_crypto_locking_callback(int mode, int type,
75 const char *file __unused, int line __unused)
77 assert(type >= 0 && type < crypto_count);
78 if (mode & CRYPTO_LOCK) {
79 pthread_mutex_lock(&crypto_locks[type]);
80 } else {
81 pthread_mutex_unlock(&crypto_locks[type]);
85 void
86 dmsg_crypto_setup(void)
88 crypto_count = CRYPTO_num_locks();
89 crypto_locks = calloc(crypto_count, sizeof(crypto_locks[0]));
90 CRYPTO_set_id_callback(dmsg_crypto_id_callback);
91 CRYPTO_set_locking_callback(dmsg_crypto_locking_callback);
94 static
95 int
96 dmsg_crypto_gcm_init(dmsg_ioq_t *ioq, char *key, int klen,
97 char *iv_fixed, int ivlen, int enc)
99 int i, ok;
101 if (klen < DMSG_CRYPTO_GCM_KEY_SIZE ||
102 ivlen < DMSG_CRYPTO_GCM_IV_FIXED_SIZE) {
103 dm_printf(1, "%s\n", "Not enough key or iv material");
104 return -1;
107 dm_printf(6, "%s key: ", enc ? "Encryption" : "Decryption");
108 for (i = 0; i < DMSG_CRYPTO_GCM_KEY_SIZE; ++i)
109 dmx_printf(6, "%02x", (unsigned char)key[i]);
110 dmx_printf(6, "%s\n", "");
112 dm_printf(6, "%s iv: ", enc ? "Encryption" : "Decryption");
113 for (i = 0; i < DMSG_CRYPTO_GCM_IV_FIXED_SIZE; ++i)
114 dmx_printf(6, "%02x", (unsigned char)iv_fixed[i]);
115 dmx_printf(6, "%s\n", " (fixed part only)");
117 EVP_CIPHER_CTX_init(&ioq->ctx);
119 if (enc)
120 ok = EVP_EncryptInit_ex(&ioq->ctx, EVP_aes_256_gcm(), NULL,
121 key, NULL);
122 else
123 ok = EVP_DecryptInit_ex(&ioq->ctx, EVP_aes_256_gcm(), NULL,
124 key, NULL);
125 if (!ok)
126 goto fail;
129 * According to the original Galois/Counter Mode of Operation (GCM)
130 * proposal, only IVs that are exactly 96 bits get used without any
131 * further processing. Other IV sizes cause the GHASH() operation
132 * to be applied to the IV, which is more costly.
134 * The NIST SP 800-38D also recommends using a 96 bit IV for the same
135 * reasons. We actually follow the deterministic construction
136 * recommended in NIST SP 800-38D with a 64 bit invocation field as an
137 * integer counter and a random, session-specific fixed field.
139 * This means that we can essentially use the same session key and
140 * IV fixed field for up to 2^64 invocations of the authenticated
141 * encryption or decryption.
143 * With a chunk size of 64 bytes, this adds up to 1 zettabyte of
144 * traffic.
146 ok = EVP_CIPHER_CTX_ctrl(&ioq->ctx, EVP_CTRL_GCM_SET_IVLEN,
147 DMSG_CRYPTO_GCM_IV_SIZE, NULL);
148 if (!ok)
149 goto fail;
151 memset(ioq->iv, 0, DMSG_CRYPTO_GCM_IV_SIZE);
152 memcpy(ioq->iv, iv_fixed, DMSG_CRYPTO_GCM_IV_FIXED_SIZE);
155 * Strictly speaking, padding is irrelevant with a counter mode
156 * encryption.
158 * However, setting padding to 0, even if using a counter mode such
159 * as GCM, will cause an error in _finish if the pt/ct size is not
160 * a multiple of the cipher block size.
162 EVP_CIPHER_CTX_set_padding(&ioq->ctx, 0);
164 return 0;
166 fail:
167 dm_printf(1, "%s\n", "Error during _gcm_init");
168 return -1;
171 static
173 _gcm_iv_increment(char *iv)
176 * Deterministic construction according to NIST SP 800-38D, with
177 * 64 bit invocation field as integer counter.
179 * In other words, our 96 bit IV consists of a 32 bit fixed field
180 * unique to the session and a 64 bit integer counter.
183 uint64_t *c = (uint64_t *)(&iv[DMSG_CRYPTO_GCM_IV_FIXED_SIZE]);
185 /* Increment invocation field integer counter */
186 *c = htobe64(be64toh(*c)+1);
189 * Detect wrap-around, which means it is time to renegotiate
190 * the session to get a new key and/or fixed field.
192 return (*c == 0) ? 0 : 1;
195 static
197 dmsg_crypto_gcm_encrypt_chunk(dmsg_ioq_t *ioq, char *ct, char *pt,
198 int in_size, int *out_size)
200 int ok;
201 int u_len, f_len;
203 *out_size = 0;
205 /* Re-initialize with new IV (but without redoing the key schedule) */
206 ok = EVP_EncryptInit_ex(&ioq->ctx, NULL, NULL, NULL, ioq->iv);
207 if (!ok)
208 goto fail;
210 u_len = 0; /* safety */
211 ok = EVP_EncryptUpdate(&ioq->ctx, ct, &u_len, pt, in_size);
212 if (!ok)
213 goto fail;
215 f_len = 0; /* safety */
216 ok = EVP_EncryptFinal(&ioq->ctx, ct + u_len, &f_len);
217 if (!ok)
218 goto fail;
220 /* Retrieve auth tag */
221 ok = EVP_CIPHER_CTX_ctrl(&ioq->ctx, EVP_CTRL_GCM_GET_TAG,
222 DMSG_CRYPTO_GCM_TAG_SIZE,
223 ct + u_len + f_len);
224 if (!ok)
225 goto fail;
227 ok = _gcm_iv_increment(ioq->iv);
228 if (!ok) {
229 ioq->error = DMSG_IOQ_ERROR_IVWRAP;
230 goto fail_out;
233 *out_size = u_len + f_len + DMSG_CRYPTO_GCM_TAG_SIZE;
235 return 0;
237 fail:
238 ioq->error = DMSG_IOQ_ERROR_ALGO;
239 fail_out:
240 dm_printf(1, "%s\n", "error during encrypt_chunk");
241 return -1;
244 static
246 dmsg_crypto_gcm_decrypt_chunk(dmsg_ioq_t *ioq, char *ct, char *pt,
247 int out_size, int *consume_size)
249 int ok;
250 int u_len, f_len;
252 *consume_size = 0;
254 /* Re-initialize with new IV (but without redoing the key schedule) */
255 ok = EVP_DecryptInit_ex(&ioq->ctx, NULL, NULL, NULL, ioq->iv);
256 if (!ok) {
257 ioq->error = DMSG_IOQ_ERROR_ALGO;
258 goto fail_out;
261 ok = EVP_CIPHER_CTX_ctrl(&ioq->ctx, EVP_CTRL_GCM_SET_TAG,
262 DMSG_CRYPTO_GCM_TAG_SIZE,
263 ct + out_size);
264 if (!ok) {
265 ioq->error = DMSG_IOQ_ERROR_ALGO;
266 goto fail_out;
269 ok = EVP_DecryptUpdate(&ioq->ctx, pt, &u_len, ct, out_size);
270 if (!ok)
271 goto fail;
273 ok = EVP_DecryptFinal(&ioq->ctx, pt + u_len, &f_len);
274 if (!ok)
275 goto fail;
277 ok = _gcm_iv_increment(ioq->iv);
278 if (!ok) {
279 ioq->error = DMSG_IOQ_ERROR_IVWRAP;
280 goto fail_out;
283 *consume_size = u_len + f_len + DMSG_CRYPTO_GCM_TAG_SIZE;
285 return 0;
287 fail:
288 ioq->error = DMSG_IOQ_ERROR_MACFAIL;
289 fail_out:
290 dm_printf(1, "%s\n",
291 "error during decrypt_chunk "
292 "(likely authentication error)");
293 return -1;
297 * Synchronously negotiate crypto for a new session. This must occur
298 * within 10 seconds or the connection is error'd out.
300 * We work off the IP address and/or reverse DNS. The IP address is
301 * checked first, followed by the IP address at various levels of granularity,
302 * followed by the full domain name and domain names at various levels of
303 * granularity.
305 * /etc/hammer2/remote/<name>.pub - Contains a public key
306 * /etc/hammer2/remote/<name>.none - Indicates no encryption (empty file)
307 * (e.g. localhost.none).
309 * We first attempt to locate a public key file based on the peer address or
310 * peer FQDN.
312 * <name>.none - No further negotiation is needed. We simply return.
313 * All communication proceeds without encryption.
314 * No public key handshake occurs in this situation.
315 * (both ends must match).
317 * <name>.pub - We have located the public key for the peer. Both
318 * sides transmit a block encrypted with their private
319 * keys and the peer's public key.
321 * Both sides receive a block and decrypt it.
323 * Both sides formulate a reply using the decrypted
324 * block and transmit it.
326 * communication proceeds with the negotiated session
327 * key (typically AES-256-CBC).
329 * If we fail to locate the appropriate file and no floating.db exists the
330 * connection is terminated without further action.
332 * If floating.db exists the connection proceeds with a floating negotiation.
334 typedef union {
335 struct sockaddr sa;
336 struct sockaddr_in sa_in;
337 struct sockaddr_in6 sa_in6;
338 } sockaddr_any_t;
340 void
341 dmsg_crypto_negotiate(dmsg_iocom_t *iocom)
343 sockaddr_any_t sa;
344 socklen_t salen = sizeof(sa);
345 char peername[128];
346 char realname[128];
347 dmsg_handshake_t handtx;
348 dmsg_handshake_t handrx;
349 char buf1[sizeof(handtx)];
350 char buf2[sizeof(handtx)];
351 char *ptr;
352 char *path;
353 struct stat st;
354 FILE *fp;
355 RSA *keys[3] = { NULL, NULL, NULL };
356 size_t i;
357 size_t blksize;
358 size_t blkmask;
359 ssize_t n;
360 int fd;
361 int error;
364 * Get the peer IP address for the connection as a string.
366 if (getpeername(iocom->sock_fd, &sa.sa, &salen) < 0) {
367 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOPEER;
368 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
369 dm_printf(1, "%s\n", "accept: getpeername() failed");
370 goto done;
372 if (getnameinfo(&sa.sa, salen, peername, sizeof(peername),
373 NULL, 0, NI_NUMERICHOST) < 0) {
374 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOPEER;
375 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
376 dm_printf(1, "%s\n", "accept: cannot decode sockaddr");
377 goto done;
379 if (DMsgDebugOpt) {
380 if (realhostname_sa(realname, sizeof(realname),
381 &sa.sa, salen) == HOSTNAME_FOUND) {
382 dm_printf(1, "accept from %s (%s)\n",
383 peername, realname);
384 } else {
385 dm_printf(1, "accept from %s\n", peername);
390 * Find the remote host's public key
392 * If the link is not to be encrypted (<ip>.none located) we shortcut
393 * the handshake entirely. No buffers are exchanged.
395 asprintf(&path, "%s/%s.pub", DMSG_PATH_REMOTE, peername);
396 if ((fp = fopen(path, "r")) == NULL) {
397 free(path);
398 asprintf(&path, "%s/%s.none",
399 DMSG_PATH_REMOTE, peername);
400 if (stat(path, &st) < 0) {
401 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NORKEY;
402 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
403 dm_printf(1, "%s\n", "auth failure: unknown host");
404 goto done;
406 dm_printf(1, "%s\n", "auth succeeded, unencrypted link");
407 goto done;
409 if (fp) {
410 keys[0] = PEM_read_RSA_PUBKEY(fp, NULL, NULL, NULL);
411 fclose(fp);
412 if (keys[0] == NULL) {
413 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
414 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
415 dm_printf(1, "%s\n", "auth failure: bad key format");
416 goto done;
421 * Get our public and private keys
423 free(path);
424 asprintf(&path, DMSG_DEFAULT_DIR "/rsa.pub");
425 if ((fp = fopen(path, "r")) == NULL) {
426 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOLKEY;
427 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
428 goto done;
430 keys[1] = PEM_read_RSA_PUBKEY(fp, NULL, NULL, NULL);
431 fclose(fp);
432 if (keys[1] == NULL) {
433 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
434 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
435 dm_printf(1, "%s\n", "auth failure: bad host key format");
436 goto done;
439 free(path);
440 asprintf(&path, DMSG_DEFAULT_DIR "/rsa.prv");
441 if ((fp = fopen(path, "r")) == NULL) {
442 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOLKEY;
443 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
444 dm_printf(1, "%s\n", "auth failure: bad host key format");
445 goto done;
447 keys[2] = PEM_read_RSAPrivateKey(fp, NULL, NULL, NULL);
448 fclose(fp);
449 if (keys[2] == NULL) {
450 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
451 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
452 dm_printf(1, "%s\n", "auth failure: bad host key format");
453 goto done;
455 free(path);
456 path = NULL;
459 * public key encrypt/decrypt block size.
461 if (keys[0]) {
462 blksize = (size_t)RSA_size(keys[0]);
463 if (blksize != (size_t)RSA_size(keys[1]) ||
464 blksize != (size_t)RSA_size(keys[2]) ||
465 sizeof(handtx) % blksize != 0) {
466 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
467 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
468 dm_printf(1, "%s\n",
469 "auth failure: key size mismatch");
470 goto done;
472 } else {
473 blksize = sizeof(handtx);
475 blkmask = blksize - 1;
477 bzero(&handrx, sizeof(handrx));
478 bzero(&handtx, sizeof(handtx));
481 * Fill all unused fields (particular all junk fields) with random
482 * data, and also set the session key.
484 fd = open("/dev/urandom", O_RDONLY);
485 if (fd < 0 ||
486 fstat(fd, &st) < 0 || /* something wrong */
487 S_ISREG(st.st_mode) || /* supposed to be a RNG dev! */
488 read(fd, &handtx, sizeof(handtx)) != sizeof(handtx)) {
489 urandfail:
490 if (fd >= 0)
491 close(fd);
492 iocom->ioq_rx.error = DMSG_IOQ_ERROR_BADURANDOM;
493 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
494 dm_printf(1, "%s\n", "auth failure: bad rng");
495 goto done;
497 if (bcmp(&handrx, &handtx, sizeof(handtx)) == 0)
498 goto urandfail; /* read all zeros */
499 close(fd);
500 /* ERR_load_crypto_strings(); openssl debugging */
503 * Handshake with the remote.
505 * Encrypt with my private and remote's public
506 * Decrypt with my private and remote's public
508 * When encrypting we have to make sure our buffer fits within the
509 * modulus, which typically requires bit 7 o the first byte to be
510 * zero. To be safe make sure that bit 7 and bit 6 is zero.
512 snprintf(handtx.quickmsg, sizeof(handtx.quickmsg), "Testing 1 2 3");
513 handtx.magic = DMSG_HDR_MAGIC;
514 handtx.version = 1;
515 handtx.flags = 0;
516 assert(sizeof(handtx.verf) * 4 == sizeof(handtx.sess));
517 bzero(handtx.verf, sizeof(handtx.verf));
519 handtx.pad1[0] &= 0x3f; /* message must fit within modulus */
520 handtx.pad2[0] &= 0x3f; /* message must fit within modulus */
522 for (i = 0; i < sizeof(handtx.sess); ++i)
523 handtx.verf[i / 4] ^= handtx.sess[i];
526 * Write handshake buffer to remote
528 for (i = 0; i < sizeof(handtx); i += blksize) {
529 ptr = (char *)&handtx + i;
530 if (keys[0]) {
532 * Since we are double-encrypting we have to make
533 * sure that the result of the first stage does
534 * not blow out the modulus for the second stage.
536 * The pointer is pointing to the pad*[] area so
537 * we can mess with that until the first stage
538 * is legal.
540 do {
541 ++*(int *)(ptr + 4);
542 if (RSA_private_encrypt(blksize, ptr, buf1,
543 keys[2], RSA_NO_PADDING) < 0) {
544 iocom->ioq_rx.error =
545 DMSG_IOQ_ERROR_KEYXCHGFAIL;
547 } while (buf1[0] & 0xC0);
549 if (RSA_public_encrypt(blksize, buf1, buf2,
550 keys[0], RSA_NO_PADDING) < 0) {
551 iocom->ioq_rx.error =
552 DMSG_IOQ_ERROR_KEYXCHGFAIL;
555 if (write(iocom->sock_fd, buf2, blksize) != (ssize_t)blksize) {
556 dmio_printf(iocom, 1, "%s\n", "WRITE ERROR");
559 if (iocom->ioq_rx.error) {
560 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
561 dmio_printf(iocom, 1, "%s\n",
562 "auth failure: key exchange failure "
563 "during encryption");
564 goto done;
568 * Read handshake buffer from remote
570 i = 0;
571 while (i < sizeof(handrx)) {
572 ptr = (char *)&handrx + i;
573 n = read(iocom->sock_fd, ptr, blksize - (i & blkmask));
574 if (n <= 0)
575 break;
576 ptr -= (i & blkmask);
577 i += n;
578 if (keys[0] && (i & blkmask) == 0) {
579 if (RSA_private_decrypt(blksize, ptr, buf1,
580 keys[2], RSA_NO_PADDING) < 0)
581 iocom->ioq_rx.error =
582 DMSG_IOQ_ERROR_KEYXCHGFAIL;
583 if (RSA_public_decrypt(blksize, buf1, ptr,
584 keys[0], RSA_NO_PADDING) < 0)
585 iocom->ioq_rx.error =
586 DMSG_IOQ_ERROR_KEYXCHGFAIL;
589 if (iocom->ioq_rx.error) {
590 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
591 dmio_printf(iocom, 1, "%s\n",
592 "auth failure: key exchange failure "
593 "during decryption");
594 goto done;
598 * Validate the received data. Try to make this a constant-time
599 * algorithm.
601 if (i != sizeof(handrx)) {
602 keyxchgfail:
603 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYXCHGFAIL;
604 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
605 dmio_printf(iocom, 1, "%s\n",
606 "auth failure: key exchange failure");
607 goto done;
610 if (handrx.magic == DMSG_HDR_MAGIC_REV) {
611 handrx.version = bswap16(handrx.version);
612 handrx.flags = bswap32(handrx.flags);
614 for (i = 0; i < sizeof(handrx.sess); ++i)
615 handrx.verf[i / 4] ^= handrx.sess[i];
616 n = 0;
617 for (i = 0; i < sizeof(handrx.verf); ++i)
618 n += handrx.verf[i];
619 if (handrx.version != 1)
620 ++n;
621 if (n != 0)
622 goto keyxchgfail;
625 * Use separate session keys and session fixed IVs for receive and
626 * transmit.
628 error = crypto_algos[DMSG_CRYPTO_ALGO].init(&iocom->ioq_rx, handrx.sess,
629 crypto_algos[DMSG_CRYPTO_ALGO].keylen,
630 handrx.sess + crypto_algos[DMSG_CRYPTO_ALGO].keylen,
631 sizeof(handrx.sess) - crypto_algos[DMSG_CRYPTO_ALGO].keylen,
632 0 /* decryption */);
633 if (error)
634 goto keyxchgfail;
636 error = crypto_algos[DMSG_CRYPTO_ALGO].init(&iocom->ioq_tx, handtx.sess,
637 crypto_algos[DMSG_CRYPTO_ALGO].keylen,
638 handtx.sess + crypto_algos[DMSG_CRYPTO_ALGO].keylen,
639 sizeof(handtx.sess) - crypto_algos[DMSG_CRYPTO_ALGO].keylen,
640 1 /* encryption */);
641 if (error)
642 goto keyxchgfail;
644 atomic_set_int(&iocom->flags, DMSG_IOCOMF_CRYPTED);
646 dmio_printf(iocom, 1, "auth success: %s\n", handrx.quickmsg);
647 done:
648 if (path)
649 free(path);
650 if (keys[0])
651 RSA_free(keys[0]);
652 if (keys[1])
653 RSA_free(keys[1]);
654 if (keys[1])
655 RSA_free(keys[2]);
659 * Decrypt pending data in the ioq's fifo. The data is decrypted in-place.
661 void
662 dmsg_crypto_decrypt(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
664 int p_len;
665 int used;
666 __unused int error; /* XXX */
667 char buf[512];
670 * fifo_beg to fifo_cdx is data already decrypted.
671 * fifo_cdn to fifo_end is data not yet decrypted.
673 p_len = ioq->fifo_end - ioq->fifo_cdn; /* data not yet decrypted */
675 if (p_len == 0)
676 return;
678 while (p_len >= crypto_algos[DMSG_CRYPTO_ALGO].taglen +
679 DMSG_CRYPTO_CHUNK_SIZE) {
680 bcopy(ioq->buf + ioq->fifo_cdn, buf,
681 crypto_algos[DMSG_CRYPTO_ALGO].taglen +
682 DMSG_CRYPTO_CHUNK_SIZE);
683 error = crypto_algos[DMSG_CRYPTO_ALGO].dec_chunk(
684 ioq, buf,
685 ioq->buf + ioq->fifo_cdx,
686 DMSG_CRYPTO_CHUNK_SIZE,
687 &used);
688 #ifdef CRYPTO_DEBUG
689 dmio_printf(iocom, 5,
690 "dec: p_len: %d, used: %d, "
691 "fifo_cdn: %ju, fifo_cdx: %ju\n",
692 p_len, used,
693 ioq->fifo_cdn, ioq->fifo_cdx);
694 #endif
695 p_len -= used;
696 ioq->fifo_cdn += used;
697 ioq->fifo_cdx += DMSG_CRYPTO_CHUNK_SIZE;
698 #ifdef CRYPTO_DEBUG
699 dmio_printf(iocom, 5,
700 "dec: p_len: %d, used: %d, "
701 "fifo_cdn: %ju, fifo_cdx: %ju\n",
702 p_len, used, ioq->fifo_cdn, ioq->fifo_cdx);
703 #endif
708 * *nactp is set to the number of ORIGINAL bytes consumed by the encrypter.
709 * The FIFO may contain more data.
712 dmsg_crypto_encrypt(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq,
713 struct iovec *iov, int n, size_t *nactp)
715 int p_len, used, ct_used;
716 int i;
717 __unused int error; /* XXX */
718 size_t nmax;
720 nmax = sizeof(ioq->buf) - ioq->fifo_end; /* max new bytes */
722 *nactp = 0;
723 for (i = 0; i < n && nmax; ++i) {
724 used = 0;
725 p_len = iov[i].iov_len;
726 assert((p_len & DMSG_ALIGNMASK) == 0);
728 while (p_len >= DMSG_CRYPTO_CHUNK_SIZE &&
729 nmax >= DMSG_CRYPTO_CHUNK_SIZE +
730 (size_t)crypto_algos[DMSG_CRYPTO_ALGO].taglen) {
731 error = crypto_algos[DMSG_CRYPTO_ALGO].enc_chunk(
732 ioq,
733 ioq->buf + ioq->fifo_cdx,
734 (char *)iov[i].iov_base + used,
735 DMSG_CRYPTO_CHUNK_SIZE, &ct_used);
736 #ifdef CRYPTO_DEBUG
737 dmio_printf(iocom, 5,
738 "nactp: %ju, p_len: %d, "
739 "ct_used: %d, used: %d, nmax: %ju\n",
740 *nactp, p_len, ct_used, used, nmax);
741 #endif
743 *nactp += (size_t)DMSG_CRYPTO_CHUNK_SIZE; /* plaintext count */
744 used += DMSG_CRYPTO_CHUNK_SIZE;
745 p_len -= DMSG_CRYPTO_CHUNK_SIZE;
748 * NOTE: crypted count will eventually differ from
749 * nmax, but for now we have not yet introduced
750 * random armor.
752 ioq->fifo_cdx += (size_t)ct_used;
753 ioq->fifo_cdn += (size_t)ct_used;
754 ioq->fifo_end += (size_t)ct_used;
755 nmax -= (size_t)ct_used;
756 #ifdef CRYPTO_DEBUG
757 dmio_printf(iocom, 5,
758 "nactp: %ju, p_len: %d, "
759 "ct_used: %d, used: %d, nmax: %ju\n",
760 *nactp, p_len, ct_used, used, nmax);
761 #endif
764 iov[0].iov_base = ioq->buf + ioq->fifo_beg;
765 iov[0].iov_len = ioq->fifo_cdx - ioq->fifo_beg;
767 return (1);