mtree: Support dhcpcd chroot
[dragonfly.git] / lib / libdmsg / crypto.c
blob24d1f0820c77f9459631ce3bebd33ad5d8d05e04
1 /*
2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Alex Hornung <alexh@dragonflybsd.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 #include "dmsg_local.h"
40 * Setup crypto for pthreads
42 static pthread_mutex_t *crypto_locks;
43 int crypto_count;
45 static int dmsg_crypto_gcm_init(dmsg_ioq_t *, char *, int, char *, int, int);
46 static int dmsg_crypto_gcm_encrypt_chunk(dmsg_ioq_t *, char *, char *, int, int *);
47 static int dmsg_crypto_gcm_decrypt_chunk(dmsg_ioq_t *, char *, char *, int, int *);
50 * NOTE: the order of this table needs to match the DMSG_CRYPTO_ALGO_*_IDX
51 * defines in network.h.
53 static struct crypto_algo crypto_algos[] = {
55 .name = "aes-256-gcm",
56 .keylen = DMSG_CRYPTO_GCM_KEY_SIZE,
57 .taglen = DMSG_CRYPTO_GCM_TAG_SIZE,
58 .init = dmsg_crypto_gcm_init,
59 .enc_chunk = dmsg_crypto_gcm_encrypt_chunk,
60 .dec_chunk = dmsg_crypto_gcm_decrypt_chunk
62 { NULL, 0, 0, NULL, NULL, NULL }
65 static
66 unsigned long
67 dmsg_crypto_id_callback(void)
69 return ((unsigned long)(uintptr_t)pthread_self());
72 static
73 void
74 dmsg_crypto_locking_callback(int mode, int type,
75 const char *file __unused, int line __unused)
77 assert(type >= 0 && type < crypto_count);
78 if (mode & CRYPTO_LOCK) {
79 pthread_mutex_lock(&crypto_locks[type]);
80 } else {
81 pthread_mutex_unlock(&crypto_locks[type]);
85 void
86 dmsg_crypto_setup(void)
88 crypto_count = CRYPTO_num_locks();
89 crypto_locks = calloc(crypto_count, sizeof(crypto_locks[0]));
90 CRYPTO_set_id_callback(dmsg_crypto_id_callback);
91 CRYPTO_set_locking_callback(dmsg_crypto_locking_callback);
94 static
95 int
96 dmsg_crypto_gcm_init(dmsg_ioq_t *ioq, char *key, int klen,
97 char *iv_fixed, int ivlen, int enc)
99 int i, ok;
101 if (klen < DMSG_CRYPTO_GCM_KEY_SIZE ||
102 ivlen < DMSG_CRYPTO_GCM_IV_FIXED_SIZE) {
103 dm_printf(1, "%s\n", "Not enough key or iv material");
104 return -1;
107 dm_printf(6, "%s key: ", enc ? "Encryption" : "Decryption");
108 for (i = 0; i < DMSG_CRYPTO_GCM_KEY_SIZE; ++i)
109 dmx_printf(6, "%02x", (unsigned char)key[i]);
110 dmx_printf(6, "%s\n", "");
112 dm_printf(6, "%s iv: ", enc ? "Encryption" : "Decryption");
113 for (i = 0; i < DMSG_CRYPTO_GCM_IV_FIXED_SIZE; ++i)
114 dmx_printf(6, "%02x", (unsigned char)iv_fixed[i]);
115 dmx_printf(6, "%s\n", " (fixed part only)");
117 EVP_CIPHER_CTX_init(&ioq->ctx);
119 if (enc)
120 ok = EVP_EncryptInit_ex(&ioq->ctx, EVP_aes_256_gcm(), NULL,
121 (unsigned char*)key, NULL);
122 else
123 ok = EVP_DecryptInit_ex(&ioq->ctx, EVP_aes_256_gcm(), NULL,
124 (unsigned char*)key, NULL);
125 if (!ok)
126 goto fail;
129 * According to the original Galois/Counter Mode of Operation (GCM)
130 * proposal, only IVs that are exactly 96 bits get used without any
131 * further processing. Other IV sizes cause the GHASH() operation
132 * to be applied to the IV, which is more costly.
134 * The NIST SP 800-38D also recommends using a 96 bit IV for the same
135 * reasons. We actually follow the deterministic construction
136 * recommended in NIST SP 800-38D with a 64 bit invocation field as an
137 * integer counter and a random, session-specific fixed field.
139 * This means that we can essentially use the same session key and
140 * IV fixed field for up to 2^64 invocations of the authenticated
141 * encryption or decryption.
143 * With a chunk size of 64 bytes, this adds up to 1 zettabyte of
144 * traffic.
146 ok = EVP_CIPHER_CTX_ctrl(&ioq->ctx, EVP_CTRL_GCM_SET_IVLEN,
147 DMSG_CRYPTO_GCM_IV_SIZE, NULL);
148 if (!ok)
149 goto fail;
151 memset(ioq->iv, 0, DMSG_CRYPTO_GCM_IV_SIZE);
152 memcpy(ioq->iv, iv_fixed, DMSG_CRYPTO_GCM_IV_FIXED_SIZE);
155 * Strictly speaking, padding is irrelevant with a counter mode
156 * encryption.
158 * However, setting padding to 0, even if using a counter mode such
159 * as GCM, will cause an error in _finish if the pt/ct size is not
160 * a multiple of the cipher block size.
162 EVP_CIPHER_CTX_set_padding(&ioq->ctx, 0);
164 return 0;
166 fail:
167 dm_printf(1, "%s\n", "Error during _gcm_init");
168 return -1;
171 static
173 _gcm_iv_increment(char *iv)
176 * Deterministic construction according to NIST SP 800-38D, with
177 * 64 bit invocation field as integer counter.
179 * In other words, our 96 bit IV consists of a 32 bit fixed field
180 * unique to the session and a 64 bit integer counter.
183 uint64_t *c = (uint64_t *)(&iv[DMSG_CRYPTO_GCM_IV_FIXED_SIZE]);
185 /* Increment invocation field integer counter */
186 *c = htobe64(be64toh(*c)+1);
189 * Detect wrap-around, which means it is time to renegotiate
190 * the session to get a new key and/or fixed field.
192 return (*c == 0) ? 0 : 1;
195 static
197 dmsg_crypto_gcm_encrypt_chunk(dmsg_ioq_t *ioq, char *ct, char *pt,
198 int in_size, int *out_size)
200 int ok;
201 int u_len, f_len;
203 *out_size = 0;
205 /* Re-initialize with new IV (but without redoing the key schedule) */
206 ok = EVP_EncryptInit_ex(&ioq->ctx, NULL, NULL, NULL,
207 (unsigned char*)ioq->iv);
208 if (!ok)
209 goto fail;
211 u_len = 0; /* safety */
212 ok = EVP_EncryptUpdate(&ioq->ctx, (unsigned char*)ct, &u_len,
213 (unsigned char*)pt, in_size);
214 if (!ok)
215 goto fail;
217 f_len = 0; /* safety */
218 ok = EVP_EncryptFinal_ex(&ioq->ctx, (unsigned char*)ct + u_len, &f_len);
219 if (!ok)
220 goto fail;
222 /* Retrieve auth tag */
223 ok = EVP_CIPHER_CTX_ctrl(&ioq->ctx, EVP_CTRL_GCM_GET_TAG,
224 DMSG_CRYPTO_GCM_TAG_SIZE,
225 ct + u_len + f_len);
226 if (!ok)
227 goto fail;
229 ok = _gcm_iv_increment(ioq->iv);
230 if (!ok) {
231 ioq->error = DMSG_IOQ_ERROR_IVWRAP;
232 goto fail_out;
235 *out_size = u_len + f_len + DMSG_CRYPTO_GCM_TAG_SIZE;
236 EVP_CIPHER_CTX_reset(&ioq->ctx);
238 return 0;
240 fail:
241 ioq->error = DMSG_IOQ_ERROR_ALGO;
242 fail_out:
243 EVP_CIPHER_CTX_reset(&ioq->ctx);
244 dm_printf(1, "%s\n", "error during encrypt_chunk");
245 return -1;
248 static
250 dmsg_crypto_gcm_decrypt_chunk(dmsg_ioq_t *ioq, char *ct, char *pt,
251 int out_size, int *consume_size)
253 int ok;
254 int u_len, f_len;
256 *consume_size = 0;
258 /* Re-initialize with new IV (but without redoing the key schedule) */
259 ok = EVP_DecryptInit_ex(&ioq->ctx, NULL, NULL, NULL,
260 (unsigned char*)ioq->iv);
261 if (!ok) {
262 ioq->error = DMSG_IOQ_ERROR_ALGO;
263 goto fail_out;
266 ok = EVP_CIPHER_CTX_ctrl(&ioq->ctx, EVP_CTRL_GCM_SET_TAG,
267 DMSG_CRYPTO_GCM_TAG_SIZE,
268 ct + out_size);
269 if (!ok) {
270 ioq->error = DMSG_IOQ_ERROR_ALGO;
271 goto fail_out;
274 ok = EVP_DecryptUpdate(&ioq->ctx, (unsigned char*)pt, &u_len,
275 (unsigned char*)ct, out_size);
276 if (!ok)
277 goto fail;
279 ok = EVP_DecryptFinal_ex(&ioq->ctx, (unsigned char*)pt + u_len, &f_len);
280 if (!ok)
281 goto fail;
283 ok = _gcm_iv_increment(ioq->iv);
284 if (!ok) {
285 ioq->error = DMSG_IOQ_ERROR_IVWRAP;
286 goto fail_out;
289 *consume_size = u_len + f_len + DMSG_CRYPTO_GCM_TAG_SIZE;
290 EVP_CIPHER_CTX_reset(&ioq->ctx);
292 return 0;
294 fail:
295 ioq->error = DMSG_IOQ_ERROR_MACFAIL;
296 fail_out:
297 EVP_CIPHER_CTX_reset(&ioq->ctx);
298 dm_printf(1, "%s\n",
299 "error during decrypt_chunk "
300 "(likely authentication error)");
301 return -1;
305 * Synchronously negotiate crypto for a new session. This must occur
306 * within 10 seconds or the connection is error'd out.
308 * We work off the IP address and/or reverse DNS. The IP address is
309 * checked first, followed by the IP address at various levels of granularity,
310 * followed by the full domain name and domain names at various levels of
311 * granularity.
313 * /etc/hammer2/remote/<name>.pub - Contains a public key
314 * /etc/hammer2/remote/<name>.none - Indicates no encryption (empty file)
315 * (e.g. localhost.none).
317 * We first attempt to locate a public key file based on the peer address or
318 * peer FQDN.
320 * <name>.none - No further negotiation is needed. We simply return.
321 * All communication proceeds without encryption.
322 * No public key handshake occurs in this situation.
323 * (both ends must match).
325 * <name>.pub - We have located the public key for the peer. Both
326 * sides transmit a block encrypted with their private
327 * keys and the peer's public key.
329 * Both sides receive a block and decrypt it.
331 * Both sides formulate a reply using the decrypted
332 * block and transmit it.
334 * communication proceeds with the negotiated session
335 * key (typically AES-256-CBC).
337 * If we fail to locate the appropriate file and no floating.db exists the
338 * connection is terminated without further action.
340 * If floating.db exists the connection proceeds with a floating negotiation.
342 typedef union {
343 struct sockaddr sa;
344 struct sockaddr_in sa_in;
345 struct sockaddr_in6 sa_in6;
346 } sockaddr_any_t;
348 void
349 dmsg_crypto_negotiate(dmsg_iocom_t *iocom)
351 sockaddr_any_t sa;
352 socklen_t salen = sizeof(sa);
353 char peername[128];
354 char realname[128];
355 dmsg_handshake_t handtx;
356 dmsg_handshake_t handrx;
357 char buf1[sizeof(handtx)];
358 char buf2[sizeof(handtx)];
359 char *ptr;
360 char *path;
361 struct stat st;
362 FILE *fp;
363 RSA *keys[3] = { NULL, NULL, NULL };
364 size_t i;
365 size_t blksize;
366 size_t blkmask;
367 ssize_t n;
368 int fd;
369 int error;
372 * Get the peer IP address for the connection as a string.
374 if (getpeername(iocom->sock_fd, &sa.sa, &salen) < 0) {
375 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOPEER;
376 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
377 dm_printf(1, "%s\n", "accept: getpeername() failed");
378 goto done;
380 if (getnameinfo(&sa.sa, salen, peername, sizeof(peername),
381 NULL, 0, NI_NUMERICHOST) < 0) {
382 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOPEER;
383 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
384 dm_printf(1, "%s\n", "accept: cannot decode sockaddr");
385 goto done;
387 if (DMsgDebugOpt) {
388 if (realhostname_sa(realname, sizeof(realname),
389 &sa.sa, salen) == HOSTNAME_FOUND) {
390 dm_printf(1, "accept from %s (%s)\n",
391 peername, realname);
392 } else {
393 dm_printf(1, "accept from %s\n", peername);
398 * Find the remote host's public key
400 * If the link is not to be encrypted (<ip>.none located) we shortcut
401 * the handshake entirely. No buffers are exchanged.
403 asprintf(&path, "%s/%s.pub", DMSG_PATH_REMOTE, peername);
404 if ((fp = fopen(path, "r")) == NULL) {
405 free(path);
406 asprintf(&path, "%s/%s.none",
407 DMSG_PATH_REMOTE, peername);
408 if (stat(path, &st) < 0) {
409 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NORKEY;
410 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
411 dm_printf(1, "%s\n", "auth failure: unknown host");
412 goto done;
414 dm_printf(1, "%s\n", "auth succeeded, unencrypted link");
415 goto done;
417 if (fp) {
418 keys[0] = PEM_read_RSA_PUBKEY(fp, NULL, NULL, NULL);
419 fclose(fp);
420 if (keys[0] == NULL) {
421 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
422 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
423 dm_printf(1, "%s\n", "auth failure: bad key format");
424 goto done;
429 * Get our public and private keys
431 free(path);
432 asprintf(&path, DMSG_DEFAULT_DIR "/rsa.pub");
433 if ((fp = fopen(path, "r")) == NULL) {
434 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOLKEY;
435 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
436 goto done;
438 keys[1] = PEM_read_RSA_PUBKEY(fp, NULL, NULL, NULL);
439 fclose(fp);
440 if (keys[1] == NULL) {
441 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
442 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
443 dm_printf(1, "%s\n", "auth failure: bad host key format");
444 goto done;
447 free(path);
448 asprintf(&path, DMSG_DEFAULT_DIR "/rsa.prv");
449 if ((fp = fopen(path, "r")) == NULL) {
450 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOLKEY;
451 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
452 dm_printf(1, "%s\n", "auth failure: bad host key format");
453 goto done;
455 keys[2] = PEM_read_RSAPrivateKey(fp, NULL, NULL, NULL);
456 fclose(fp);
457 if (keys[2] == NULL) {
458 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
459 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
460 dm_printf(1, "%s\n", "auth failure: bad host key format");
461 goto done;
463 free(path);
464 path = NULL;
467 * public key encrypt/decrypt block size.
469 if (keys[0]) {
470 blksize = (size_t)RSA_size(keys[0]);
471 if (blksize != (size_t)RSA_size(keys[1]) ||
472 blksize != (size_t)RSA_size(keys[2]) ||
473 sizeof(handtx) % blksize != 0) {
474 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
475 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
476 dm_printf(1, "%s\n",
477 "auth failure: key size mismatch");
478 goto done;
480 } else {
481 blksize = sizeof(handtx);
483 blkmask = blksize - 1;
485 bzero(&handrx, sizeof(handrx));
486 bzero(&handtx, sizeof(handtx));
489 * Fill all unused fields (particular all junk fields) with random
490 * data, and also set the session key.
492 fd = open("/dev/urandom", O_RDONLY);
493 if (fd < 0 ||
494 fstat(fd, &st) < 0 || /* something wrong */
495 S_ISREG(st.st_mode) || /* supposed to be a RNG dev! */
496 read(fd, &handtx, sizeof(handtx)) != sizeof(handtx)) {
497 urandfail:
498 if (fd >= 0)
499 close(fd);
500 iocom->ioq_rx.error = DMSG_IOQ_ERROR_BADURANDOM;
501 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
502 dm_printf(1, "%s\n", "auth failure: bad rng");
503 goto done;
505 if (bcmp(&handrx, &handtx, sizeof(handtx)) == 0)
506 goto urandfail; /* read all zeros */
507 close(fd);
508 /* ERR_load_crypto_strings(); openssl debugging */
511 * Handshake with the remote.
513 * Encrypt with my private and remote's public
514 * Decrypt with my private and remote's public
516 * When encrypting we have to make sure our buffer fits within the
517 * modulus, which typically requires bit 7 o the first byte to be
518 * zero. To be safe make sure that bit 7 and bit 6 is zero.
520 snprintf(handtx.quickmsg, sizeof(handtx.quickmsg), "Testing 1 2 3");
521 handtx.magic = DMSG_HDR_MAGIC;
522 handtx.version = 1;
523 handtx.flags = 0;
524 assert(sizeof(handtx.verf) * 4 == sizeof(handtx.sess));
525 bzero(handtx.verf, sizeof(handtx.verf));
527 handtx.pad1[0] &= 0x3f; /* message must fit within modulus */
528 handtx.pad2[0] &= 0x3f; /* message must fit within modulus */
530 for (i = 0; i < sizeof(handtx.sess); ++i)
531 handtx.verf[i / 4] ^= handtx.sess[i];
534 * Write handshake buffer to remote
536 for (i = 0; i < sizeof(handtx); i += blksize) {
537 ptr = (char *)&handtx + i;
538 if (keys[0]) {
540 * Since we are double-encrypting we have to make
541 * sure that the result of the first stage does
542 * not blow out the modulus for the second stage.
544 * The pointer is pointing to the pad*[] area so
545 * we can mess with that until the first stage
546 * is legal.
548 do {
549 ++*(int *)(ptr + 4);
550 if (RSA_private_encrypt(blksize,
551 (unsigned char*)ptr,
552 (unsigned char*)buf1,
553 keys[2], RSA_NO_PADDING) < 0) {
554 iocom->ioq_rx.error =
555 DMSG_IOQ_ERROR_KEYXCHGFAIL;
557 } while (buf1[0] & 0xC0);
559 if (RSA_public_encrypt(blksize,
560 (unsigned char*)buf1,
561 (unsigned char*)buf2,
562 keys[0], RSA_NO_PADDING) < 0) {
563 iocom->ioq_rx.error =
564 DMSG_IOQ_ERROR_KEYXCHGFAIL;
567 if (write(iocom->sock_fd, buf2, blksize) != (ssize_t)blksize) {
568 dmio_printf(iocom, 1, "%s\n", "WRITE ERROR");
571 if (iocom->ioq_rx.error) {
572 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
573 dmio_printf(iocom, 1, "%s\n",
574 "auth failure: key exchange failure "
575 "during encryption");
576 goto done;
580 * Read handshake buffer from remote
582 i = 0;
583 while (i < sizeof(handrx)) {
584 ptr = (char *)&handrx + i;
585 n = read(iocom->sock_fd, ptr, blksize - (i & blkmask));
586 if (n <= 0)
587 break;
588 ptr -= (i & blkmask);
589 i += n;
590 if (keys[0] && (i & blkmask) == 0) {
591 if (RSA_private_decrypt(blksize,
592 (unsigned char*)ptr,
593 (unsigned char*)buf1,
594 keys[2], RSA_NO_PADDING) < 0)
595 iocom->ioq_rx.error =
596 DMSG_IOQ_ERROR_KEYXCHGFAIL;
597 if (RSA_public_decrypt(blksize,
598 (unsigned char*)buf1,
599 (unsigned char*)ptr,
600 keys[0], RSA_NO_PADDING) < 0)
601 iocom->ioq_rx.error =
602 DMSG_IOQ_ERROR_KEYXCHGFAIL;
605 if (iocom->ioq_rx.error) {
606 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
607 dmio_printf(iocom, 1, "%s\n",
608 "auth failure: key exchange failure "
609 "during decryption");
610 goto done;
614 * Validate the received data. Try to make this a constant-time
615 * algorithm.
617 if (i != sizeof(handrx)) {
618 keyxchgfail:
619 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYXCHGFAIL;
620 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
621 dmio_printf(iocom, 1, "%s\n",
622 "auth failure: key exchange failure");
623 goto done;
626 if (handrx.magic == DMSG_HDR_MAGIC_REV) {
627 handrx.version = bswap16(handrx.version);
628 handrx.flags = bswap32(handrx.flags);
630 for (i = 0; i < sizeof(handrx.sess); ++i)
631 handrx.verf[i / 4] ^= handrx.sess[i];
632 n = 0;
633 for (i = 0; i < sizeof(handrx.verf); ++i)
634 n += handrx.verf[i];
635 if (handrx.version != 1)
636 ++n;
637 if (n != 0)
638 goto keyxchgfail;
641 * Use separate session keys and session fixed IVs for receive and
642 * transmit.
644 error = crypto_algos[DMSG_CRYPTO_ALGO].init(&iocom->ioq_rx,
645 (char*)handrx.sess,
646 crypto_algos[DMSG_CRYPTO_ALGO].keylen,
647 (char*)handrx.sess + crypto_algos[DMSG_CRYPTO_ALGO].keylen,
648 sizeof(handrx.sess) - crypto_algos[DMSG_CRYPTO_ALGO].keylen,
649 0 /* decryption */);
650 if (error)
651 goto keyxchgfail;
653 error = crypto_algos[DMSG_CRYPTO_ALGO].init(&iocom->ioq_tx,
654 (char*)handtx.sess,
655 crypto_algos[DMSG_CRYPTO_ALGO].keylen,
656 (char*)handtx.sess + crypto_algos[DMSG_CRYPTO_ALGO].keylen,
657 sizeof(handtx.sess) - crypto_algos[DMSG_CRYPTO_ALGO].keylen,
658 1 /* encryption */);
659 if (error)
660 goto keyxchgfail;
662 atomic_set_int(&iocom->flags, DMSG_IOCOMF_CRYPTED);
664 dmio_printf(iocom, 1, "auth success: %s\n", handrx.quickmsg);
665 done:
666 if (path)
667 free(path);
668 if (keys[0])
669 RSA_free(keys[0]);
670 if (keys[1])
671 RSA_free(keys[1]);
672 if (keys[1])
673 RSA_free(keys[2]);
677 * Decrypt pending data in the ioq's fifo. The data is decrypted in-place.
679 void
680 dmsg_crypto_decrypt(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
682 int p_len;
683 int used;
684 __unused int error; /* XXX */
685 char buf[512];
688 * fifo_beg to fifo_cdx is data already decrypted.
689 * fifo_cdn to fifo_end is data not yet decrypted.
691 p_len = ioq->fifo_end - ioq->fifo_cdn; /* data not yet decrypted */
693 if (p_len == 0)
694 return;
696 while (p_len >= crypto_algos[DMSG_CRYPTO_ALGO].taglen +
697 DMSG_CRYPTO_CHUNK_SIZE) {
698 bcopy(ioq->buf + ioq->fifo_cdn, buf,
699 crypto_algos[DMSG_CRYPTO_ALGO].taglen +
700 DMSG_CRYPTO_CHUNK_SIZE);
701 error = crypto_algos[DMSG_CRYPTO_ALGO].dec_chunk(
702 ioq, buf,
703 ioq->buf + ioq->fifo_cdx,
704 DMSG_CRYPTO_CHUNK_SIZE,
705 &used);
706 #ifdef CRYPTO_DEBUG
707 dmio_printf(iocom, 5,
708 "dec: p_len: %d, used: %d, "
709 "fifo_cdn: %ju, fifo_cdx: %ju\n",
710 p_len, used,
711 ioq->fifo_cdn, ioq->fifo_cdx);
712 #endif
713 p_len -= used;
714 ioq->fifo_cdn += used;
715 ioq->fifo_cdx += DMSG_CRYPTO_CHUNK_SIZE;
716 #ifdef CRYPTO_DEBUG
717 dmio_printf(iocom, 5,
718 "dec: p_len: %d, used: %d, "
719 "fifo_cdn: %ju, fifo_cdx: %ju\n",
720 p_len, used, ioq->fifo_cdn, ioq->fifo_cdx);
721 #endif
726 * *nactp is set to the number of ORIGINAL bytes consumed by the encrypter.
727 * The FIFO may contain more data.
730 dmsg_crypto_encrypt(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq,
731 struct iovec *iov, int n, size_t *nactp)
733 int p_len, used, ct_used;
734 int i;
735 __unused int error; /* XXX */
736 size_t nmax;
738 nmax = sizeof(ioq->buf) - ioq->fifo_end; /* max new bytes */
740 *nactp = 0;
741 for (i = 0; i < n && nmax; ++i) {
742 used = 0;
743 p_len = iov[i].iov_len;
744 assert((p_len & DMSG_ALIGNMASK) == 0);
746 while (p_len >= DMSG_CRYPTO_CHUNK_SIZE &&
747 nmax >= DMSG_CRYPTO_CHUNK_SIZE +
748 (size_t)crypto_algos[DMSG_CRYPTO_ALGO].taglen) {
749 error = crypto_algos[DMSG_CRYPTO_ALGO].enc_chunk(
750 ioq,
751 ioq->buf + ioq->fifo_cdx,
752 (char *)iov[i].iov_base + used,
753 DMSG_CRYPTO_CHUNK_SIZE, &ct_used);
754 #ifdef CRYPTO_DEBUG
755 dmio_printf(iocom, 5,
756 "nactp: %ju, p_len: %d, "
757 "ct_used: %d, used: %d, nmax: %ju\n",
758 *nactp, p_len, ct_used, used, nmax);
759 #endif
761 *nactp += (size_t)DMSG_CRYPTO_CHUNK_SIZE; /* plaintext count */
762 used += DMSG_CRYPTO_CHUNK_SIZE;
763 p_len -= DMSG_CRYPTO_CHUNK_SIZE;
766 * NOTE: crypted count will eventually differ from
767 * nmax, but for now we have not yet introduced
768 * random armor.
770 ioq->fifo_cdx += (size_t)ct_used;
771 ioq->fifo_cdn += (size_t)ct_used;
772 ioq->fifo_end += (size_t)ct_used;
773 nmax -= (size_t)ct_used;
774 #ifdef CRYPTO_DEBUG
775 dmio_printf(iocom, 5,
776 "nactp: %ju, p_len: %d, "
777 "ct_used: %d, used: %d, nmax: %ju\n",
778 *nactp, p_len, ct_used, used, nmax);
779 #endif
782 iov[0].iov_base = ioq->buf + ioq->fifo_beg;
783 iov[0].iov_len = ioq->fifo_cdx - ioq->fifo_beg;
785 return (1);