2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Alex Hornung <alexh@dragonflybsd.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include "dmsg_local.h"
40 * Setup crypto for pthreads
42 static pthread_mutex_t
*crypto_locks
;
45 static int dmsg_crypto_gcm_init(dmsg_ioq_t
*, char *, int, char *, int, int);
46 static int dmsg_crypto_gcm_encrypt_chunk(dmsg_ioq_t
*, char *, char *, int, int *);
47 static int dmsg_crypto_gcm_decrypt_chunk(dmsg_ioq_t
*, char *, char *, int, int *);
50 * NOTE: the order of this table needs to match the DMSG_CRYPTO_ALGO_*_IDX
51 * defines in network.h.
53 static struct crypto_algo crypto_algos
[] = {
55 .name
= "aes-256-gcm",
56 .keylen
= DMSG_CRYPTO_GCM_KEY_SIZE
,
57 .taglen
= DMSG_CRYPTO_GCM_TAG_SIZE
,
58 .init
= dmsg_crypto_gcm_init
,
59 .enc_chunk
= dmsg_crypto_gcm_encrypt_chunk
,
60 .dec_chunk
= dmsg_crypto_gcm_decrypt_chunk
62 { NULL
, 0, 0, NULL
, NULL
, NULL
}
67 dmsg_crypto_id_callback(void)
69 return ((unsigned long)(uintptr_t)pthread_self());
74 dmsg_crypto_locking_callback(int mode
, int type
,
75 const char *file __unused
, int line __unused
)
77 assert(type
>= 0 && type
< crypto_count
);
78 if (mode
& CRYPTO_LOCK
) {
79 pthread_mutex_lock(&crypto_locks
[type
]);
81 pthread_mutex_unlock(&crypto_locks
[type
]);
86 dmsg_crypto_setup(void)
88 crypto_count
= CRYPTO_num_locks();
89 crypto_locks
= calloc(crypto_count
, sizeof(crypto_locks
[0]));
90 CRYPTO_set_id_callback(dmsg_crypto_id_callback
);
91 CRYPTO_set_locking_callback(dmsg_crypto_locking_callback
);
96 dmsg_crypto_gcm_init(dmsg_ioq_t
*ioq
, char *key
, int klen
,
97 char *iv_fixed
, int ivlen
, int enc
)
101 if (klen
< DMSG_CRYPTO_GCM_KEY_SIZE
||
102 ivlen
< DMSG_CRYPTO_GCM_IV_FIXED_SIZE
) {
103 dm_printf(1, "%s\n", "Not enough key or iv material");
107 dm_printf(6, "%s key: ", enc
? "Encryption" : "Decryption");
108 for (i
= 0; i
< DMSG_CRYPTO_GCM_KEY_SIZE
; ++i
)
109 dmx_printf(6, "%02x", (unsigned char)key
[i
]);
110 dmx_printf(6, "%s\n", "");
112 dm_printf(6, "%s iv: ", enc
? "Encryption" : "Decryption");
113 for (i
= 0; i
< DMSG_CRYPTO_GCM_IV_FIXED_SIZE
; ++i
)
114 dmx_printf(6, "%02x", (unsigned char)iv_fixed
[i
]);
115 dmx_printf(6, "%s\n", " (fixed part only)");
117 EVP_CIPHER_CTX_init(&ioq
->ctx
);
120 ok
= EVP_EncryptInit_ex(&ioq
->ctx
, EVP_aes_256_gcm(), NULL
,
123 ok
= EVP_DecryptInit_ex(&ioq
->ctx
, EVP_aes_256_gcm(), NULL
,
129 * According to the original Galois/Counter Mode of Operation (GCM)
130 * proposal, only IVs that are exactly 96 bits get used without any
131 * further processing. Other IV sizes cause the GHASH() operation
132 * to be applied to the IV, which is more costly.
134 * The NIST SP 800-38D also recommends using a 96 bit IV for the same
135 * reasons. We actually follow the deterministic construction
136 * recommended in NIST SP 800-38D with a 64 bit invocation field as an
137 * integer counter and a random, session-specific fixed field.
139 * This means that we can essentially use the same session key and
140 * IV fixed field for up to 2^64 invocations of the authenticated
141 * encryption or decryption.
143 * With a chunk size of 64 bytes, this adds up to 1 zettabyte of
146 ok
= EVP_CIPHER_CTX_ctrl(&ioq
->ctx
, EVP_CTRL_GCM_SET_IVLEN
,
147 DMSG_CRYPTO_GCM_IV_SIZE
, NULL
);
151 memset(ioq
->iv
, 0, DMSG_CRYPTO_GCM_IV_SIZE
);
152 memcpy(ioq
->iv
, iv_fixed
, DMSG_CRYPTO_GCM_IV_FIXED_SIZE
);
155 * Strictly speaking, padding is irrelevant with a counter mode
158 * However, setting padding to 0, even if using a counter mode such
159 * as GCM, will cause an error in _finish if the pt/ct size is not
160 * a multiple of the cipher block size.
162 EVP_CIPHER_CTX_set_padding(&ioq
->ctx
, 0);
167 dm_printf(1, "%s\n", "Error during _gcm_init");
173 _gcm_iv_increment(char *iv
)
176 * Deterministic construction according to NIST SP 800-38D, with
177 * 64 bit invocation field as integer counter.
179 * In other words, our 96 bit IV consists of a 32 bit fixed field
180 * unique to the session and a 64 bit integer counter.
183 uint64_t *c
= (uint64_t *)(&iv
[DMSG_CRYPTO_GCM_IV_FIXED_SIZE
]);
185 /* Increment invocation field integer counter */
186 *c
= htobe64(be64toh(*c
)+1);
189 * Detect wrap-around, which means it is time to renegotiate
190 * the session to get a new key and/or fixed field.
192 return (*c
== 0) ? 0 : 1;
197 dmsg_crypto_gcm_encrypt_chunk(dmsg_ioq_t
*ioq
, char *ct
, char *pt
,
198 int in_size
, int *out_size
)
205 /* Re-initialize with new IV (but without redoing the key schedule) */
206 ok
= EVP_EncryptInit_ex(&ioq
->ctx
, NULL
, NULL
, NULL
, ioq
->iv
);
210 u_len
= 0; /* safety */
211 ok
= EVP_EncryptUpdate(&ioq
->ctx
, ct
, &u_len
, pt
, in_size
);
215 f_len
= 0; /* safety */
216 ok
= EVP_EncryptFinal(&ioq
->ctx
, ct
+ u_len
, &f_len
);
220 /* Retrieve auth tag */
221 ok
= EVP_CIPHER_CTX_ctrl(&ioq
->ctx
, EVP_CTRL_GCM_GET_TAG
,
222 DMSG_CRYPTO_GCM_TAG_SIZE
,
227 ok
= _gcm_iv_increment(ioq
->iv
);
229 ioq
->error
= DMSG_IOQ_ERROR_IVWRAP
;
233 *out_size
= u_len
+ f_len
+ DMSG_CRYPTO_GCM_TAG_SIZE
;
238 ioq
->error
= DMSG_IOQ_ERROR_ALGO
;
240 dm_printf(1, "%s\n", "error during encrypt_chunk");
246 dmsg_crypto_gcm_decrypt_chunk(dmsg_ioq_t
*ioq
, char *ct
, char *pt
,
247 int out_size
, int *consume_size
)
254 /* Re-initialize with new IV (but without redoing the key schedule) */
255 ok
= EVP_DecryptInit_ex(&ioq
->ctx
, NULL
, NULL
, NULL
, ioq
->iv
);
257 ioq
->error
= DMSG_IOQ_ERROR_ALGO
;
261 ok
= EVP_CIPHER_CTX_ctrl(&ioq
->ctx
, EVP_CTRL_GCM_SET_TAG
,
262 DMSG_CRYPTO_GCM_TAG_SIZE
,
265 ioq
->error
= DMSG_IOQ_ERROR_ALGO
;
269 ok
= EVP_DecryptUpdate(&ioq
->ctx
, pt
, &u_len
, ct
, out_size
);
273 ok
= EVP_DecryptFinal(&ioq
->ctx
, pt
+ u_len
, &f_len
);
277 ok
= _gcm_iv_increment(ioq
->iv
);
279 ioq
->error
= DMSG_IOQ_ERROR_IVWRAP
;
283 *consume_size
= u_len
+ f_len
+ DMSG_CRYPTO_GCM_TAG_SIZE
;
288 ioq
->error
= DMSG_IOQ_ERROR_MACFAIL
;
291 "error during decrypt_chunk "
292 "(likely authentication error)");
297 * Synchronously negotiate crypto for a new session. This must occur
298 * within 10 seconds or the connection is error'd out.
300 * We work off the IP address and/or reverse DNS. The IP address is
301 * checked first, followed by the IP address at various levels of granularity,
302 * followed by the full domain name and domain names at various levels of
305 * /etc/hammer2/remote/<name>.pub - Contains a public key
306 * /etc/hammer2/remote/<name>.none - Indicates no encryption (empty file)
307 * (e.g. localhost.none).
309 * We first attempt to locate a public key file based on the peer address or
312 * <name>.none - No further negotiation is needed. We simply return.
313 * All communication proceeds without encryption.
314 * No public key handshake occurs in this situation.
315 * (both ends must match).
317 * <name>.pub - We have located the public key for the peer. Both
318 * sides transmit a block encrypted with their private
319 * keys and the peer's public key.
321 * Both sides receive a block and decrypt it.
323 * Both sides formulate a reply using the decrypted
324 * block and transmit it.
326 * communication proceeds with the negotiated session
327 * key (typically AES-256-CBC).
329 * If we fail to locate the appropriate file and no floating.db exists the
330 * connection is terminated without further action.
332 * If floating.db exists the connection proceeds with a floating negotiation.
336 struct sockaddr_in sa_in
;
337 struct sockaddr_in6 sa_in6
;
341 dmsg_crypto_negotiate(dmsg_iocom_t
*iocom
)
344 socklen_t salen
= sizeof(sa
);
347 dmsg_handshake_t handtx
;
348 dmsg_handshake_t handrx
;
349 char buf1
[sizeof(handtx
)];
350 char buf2
[sizeof(handtx
)];
355 RSA
*keys
[3] = { NULL
, NULL
, NULL
};
364 * Get the peer IP address for the connection as a string.
366 if (getpeername(iocom
->sock_fd
, &sa
.sa
, &salen
) < 0) {
367 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_NOPEER
;
368 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
369 dm_printf(1, "%s\n", "accept: getpeername() failed");
372 if (getnameinfo(&sa
.sa
, salen
, peername
, sizeof(peername
),
373 NULL
, 0, NI_NUMERICHOST
) < 0) {
374 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_NOPEER
;
375 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
376 dm_printf(1, "%s\n", "accept: cannot decode sockaddr");
380 if (realhostname_sa(realname
, sizeof(realname
),
381 &sa
.sa
, salen
) == HOSTNAME_FOUND
) {
382 dm_printf(1, "accept from %s (%s)\n",
385 dm_printf(1, "accept from %s\n", peername
);
390 * Find the remote host's public key
392 * If the link is not to be encrypted (<ip>.none located) we shortcut
393 * the handshake entirely. No buffers are exchanged.
395 asprintf(&path
, "%s/%s.pub", DMSG_PATH_REMOTE
, peername
);
396 if ((fp
= fopen(path
, "r")) == NULL
) {
398 asprintf(&path
, "%s/%s.none",
399 DMSG_PATH_REMOTE
, peername
);
400 if (stat(path
, &st
) < 0) {
401 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_NORKEY
;
402 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
403 dm_printf(1, "%s\n", "auth failure: unknown host");
406 dm_printf(1, "%s\n", "auth succeeded, unencrypted link");
410 keys
[0] = PEM_read_RSA_PUBKEY(fp
, NULL
, NULL
, NULL
);
412 if (keys
[0] == NULL
) {
413 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_KEYFMT
;
414 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
415 dm_printf(1, "%s\n", "auth failure: bad key format");
421 * Get our public and private keys
424 asprintf(&path
, DMSG_DEFAULT_DIR
"/rsa.pub");
425 if ((fp
= fopen(path
, "r")) == NULL
) {
426 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_NOLKEY
;
427 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
430 keys
[1] = PEM_read_RSA_PUBKEY(fp
, NULL
, NULL
, NULL
);
432 if (keys
[1] == NULL
) {
433 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_KEYFMT
;
434 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
435 dm_printf(1, "%s\n", "auth failure: bad host key format");
440 asprintf(&path
, DMSG_DEFAULT_DIR
"/rsa.prv");
441 if ((fp
= fopen(path
, "r")) == NULL
) {
442 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_NOLKEY
;
443 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
444 dm_printf(1, "%s\n", "auth failure: bad host key format");
447 keys
[2] = PEM_read_RSAPrivateKey(fp
, NULL
, NULL
, NULL
);
449 if (keys
[2] == NULL
) {
450 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_KEYFMT
;
451 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
452 dm_printf(1, "%s\n", "auth failure: bad host key format");
459 * public key encrypt/decrypt block size.
462 blksize
= (size_t)RSA_size(keys
[0]);
463 if (blksize
!= (size_t)RSA_size(keys
[1]) ||
464 blksize
!= (size_t)RSA_size(keys
[2]) ||
465 sizeof(handtx
) % blksize
!= 0) {
466 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_KEYFMT
;
467 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
469 "auth failure: key size mismatch");
473 blksize
= sizeof(handtx
);
475 blkmask
= blksize
- 1;
477 bzero(&handrx
, sizeof(handrx
));
478 bzero(&handtx
, sizeof(handtx
));
481 * Fill all unused fields (particular all junk fields) with random
482 * data, and also set the session key.
484 fd
= open("/dev/urandom", O_RDONLY
);
486 fstat(fd
, &st
) < 0 || /* something wrong */
487 S_ISREG(st
.st_mode
) || /* supposed to be a RNG dev! */
488 read(fd
, &handtx
, sizeof(handtx
)) != sizeof(handtx
)) {
492 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_BADURANDOM
;
493 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
494 dm_printf(1, "%s\n", "auth failure: bad rng");
497 if (bcmp(&handrx
, &handtx
, sizeof(handtx
)) == 0)
498 goto urandfail
; /* read all zeros */
500 /* ERR_load_crypto_strings(); openssl debugging */
503 * Handshake with the remote.
505 * Encrypt with my private and remote's public
506 * Decrypt with my private and remote's public
508 * When encrypting we have to make sure our buffer fits within the
509 * modulus, which typically requires bit 7 o the first byte to be
510 * zero. To be safe make sure that bit 7 and bit 6 is zero.
512 snprintf(handtx
.quickmsg
, sizeof(handtx
.quickmsg
), "Testing 1 2 3");
513 handtx
.magic
= DMSG_HDR_MAGIC
;
516 assert(sizeof(handtx
.verf
) * 4 == sizeof(handtx
.sess
));
517 bzero(handtx
.verf
, sizeof(handtx
.verf
));
519 handtx
.pad1
[0] &= 0x3f; /* message must fit within modulus */
520 handtx
.pad2
[0] &= 0x3f; /* message must fit within modulus */
522 for (i
= 0; i
< sizeof(handtx
.sess
); ++i
)
523 handtx
.verf
[i
/ 4] ^= handtx
.sess
[i
];
526 * Write handshake buffer to remote
528 for (i
= 0; i
< sizeof(handtx
); i
+= blksize
) {
529 ptr
= (char *)&handtx
+ i
;
532 * Since we are double-encrypting we have to make
533 * sure that the result of the first stage does
534 * not blow out the modulus for the second stage.
536 * The pointer is pointing to the pad*[] area so
537 * we can mess with that until the first stage
542 if (RSA_private_encrypt(blksize
, ptr
, buf1
,
543 keys
[2], RSA_NO_PADDING
) < 0) {
544 iocom
->ioq_rx
.error
=
545 DMSG_IOQ_ERROR_KEYXCHGFAIL
;
547 } while (buf1
[0] & 0xC0);
549 if (RSA_public_encrypt(blksize
, buf1
, buf2
,
550 keys
[0], RSA_NO_PADDING
) < 0) {
551 iocom
->ioq_rx
.error
=
552 DMSG_IOQ_ERROR_KEYXCHGFAIL
;
555 if (write(iocom
->sock_fd
, buf2
, blksize
) != (ssize_t
)blksize
) {
556 dmio_printf(iocom
, 1, "%s\n", "WRITE ERROR");
559 if (iocom
->ioq_rx
.error
) {
560 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
561 dmio_printf(iocom
, 1, "%s\n",
562 "auth failure: key exchange failure "
563 "during encryption");
568 * Read handshake buffer from remote
571 while (i
< sizeof(handrx
)) {
572 ptr
= (char *)&handrx
+ i
;
573 n
= read(iocom
->sock_fd
, ptr
, blksize
- (i
& blkmask
));
576 ptr
-= (i
& blkmask
);
578 if (keys
[0] && (i
& blkmask
) == 0) {
579 if (RSA_private_decrypt(blksize
, ptr
, buf1
,
580 keys
[2], RSA_NO_PADDING
) < 0)
581 iocom
->ioq_rx
.error
=
582 DMSG_IOQ_ERROR_KEYXCHGFAIL
;
583 if (RSA_public_decrypt(blksize
, buf1
, ptr
,
584 keys
[0], RSA_NO_PADDING
) < 0)
585 iocom
->ioq_rx
.error
=
586 DMSG_IOQ_ERROR_KEYXCHGFAIL
;
589 if (iocom
->ioq_rx
.error
) {
590 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
591 dmio_printf(iocom
, 1, "%s\n",
592 "auth failure: key exchange failure "
593 "during decryption");
598 * Validate the received data. Try to make this a constant-time
601 if (i
!= sizeof(handrx
)) {
603 iocom
->ioq_rx
.error
= DMSG_IOQ_ERROR_KEYXCHGFAIL
;
604 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_EOF
);
605 dmio_printf(iocom
, 1, "%s\n",
606 "auth failure: key exchange failure");
610 if (handrx
.magic
== DMSG_HDR_MAGIC_REV
) {
611 handrx
.version
= bswap16(handrx
.version
);
612 handrx
.flags
= bswap32(handrx
.flags
);
614 for (i
= 0; i
< sizeof(handrx
.sess
); ++i
)
615 handrx
.verf
[i
/ 4] ^= handrx
.sess
[i
];
617 for (i
= 0; i
< sizeof(handrx
.verf
); ++i
)
619 if (handrx
.version
!= 1)
625 * Use separate session keys and session fixed IVs for receive and
628 error
= crypto_algos
[DMSG_CRYPTO_ALGO
].init(&iocom
->ioq_rx
, handrx
.sess
,
629 crypto_algos
[DMSG_CRYPTO_ALGO
].keylen
,
630 handrx
.sess
+ crypto_algos
[DMSG_CRYPTO_ALGO
].keylen
,
631 sizeof(handrx
.sess
) - crypto_algos
[DMSG_CRYPTO_ALGO
].keylen
,
636 error
= crypto_algos
[DMSG_CRYPTO_ALGO
].init(&iocom
->ioq_tx
, handtx
.sess
,
637 crypto_algos
[DMSG_CRYPTO_ALGO
].keylen
,
638 handtx
.sess
+ crypto_algos
[DMSG_CRYPTO_ALGO
].keylen
,
639 sizeof(handtx
.sess
) - crypto_algos
[DMSG_CRYPTO_ALGO
].keylen
,
644 atomic_set_int(&iocom
->flags
, DMSG_IOCOMF_CRYPTED
);
646 dmio_printf(iocom
, 1, "auth success: %s\n", handrx
.quickmsg
);
659 * Decrypt pending data in the ioq's fifo. The data is decrypted in-place.
662 dmsg_crypto_decrypt(dmsg_iocom_t
*iocom __unused
, dmsg_ioq_t
*ioq
)
666 __unused
int error
; /* XXX */
670 * fifo_beg to fifo_cdx is data already decrypted.
671 * fifo_cdn to fifo_end is data not yet decrypted.
673 p_len
= ioq
->fifo_end
- ioq
->fifo_cdn
; /* data not yet decrypted */
678 while (p_len
>= crypto_algos
[DMSG_CRYPTO_ALGO
].taglen
+
679 DMSG_CRYPTO_CHUNK_SIZE
) {
680 bcopy(ioq
->buf
+ ioq
->fifo_cdn
, buf
,
681 crypto_algos
[DMSG_CRYPTO_ALGO
].taglen
+
682 DMSG_CRYPTO_CHUNK_SIZE
);
683 error
= crypto_algos
[DMSG_CRYPTO_ALGO
].dec_chunk(
685 ioq
->buf
+ ioq
->fifo_cdx
,
686 DMSG_CRYPTO_CHUNK_SIZE
,
689 dmio_printf(iocom
, 5,
690 "dec: p_len: %d, used: %d, "
691 "fifo_cdn: %ju, fifo_cdx: %ju\n",
693 ioq
->fifo_cdn
, ioq
->fifo_cdx
);
696 ioq
->fifo_cdn
+= used
;
697 ioq
->fifo_cdx
+= DMSG_CRYPTO_CHUNK_SIZE
;
699 dmio_printf(iocom
, 5,
700 "dec: p_len: %d, used: %d, "
701 "fifo_cdn: %ju, fifo_cdx: %ju\n",
702 p_len
, used
, ioq
->fifo_cdn
, ioq
->fifo_cdx
);
708 * *nactp is set to the number of ORIGINAL bytes consumed by the encrypter.
709 * The FIFO may contain more data.
712 dmsg_crypto_encrypt(dmsg_iocom_t
*iocom __unused
, dmsg_ioq_t
*ioq
,
713 struct iovec
*iov
, int n
, size_t *nactp
)
715 int p_len
, used
, ct_used
;
717 __unused
int error
; /* XXX */
720 nmax
= sizeof(ioq
->buf
) - ioq
->fifo_end
; /* max new bytes */
723 for (i
= 0; i
< n
&& nmax
; ++i
) {
725 p_len
= iov
[i
].iov_len
;
726 assert((p_len
& DMSG_ALIGNMASK
) == 0);
728 while (p_len
>= DMSG_CRYPTO_CHUNK_SIZE
&&
729 nmax
>= DMSG_CRYPTO_CHUNK_SIZE
+
730 (size_t)crypto_algos
[DMSG_CRYPTO_ALGO
].taglen
) {
731 error
= crypto_algos
[DMSG_CRYPTO_ALGO
].enc_chunk(
733 ioq
->buf
+ ioq
->fifo_cdx
,
734 (char *)iov
[i
].iov_base
+ used
,
735 DMSG_CRYPTO_CHUNK_SIZE
, &ct_used
);
737 dmio_printf(iocom
, 5,
738 "nactp: %ju, p_len: %d, "
739 "ct_used: %d, used: %d, nmax: %ju\n",
740 *nactp
, p_len
, ct_used
, used
, nmax
);
743 *nactp
+= (size_t)DMSG_CRYPTO_CHUNK_SIZE
; /* plaintext count */
744 used
+= DMSG_CRYPTO_CHUNK_SIZE
;
745 p_len
-= DMSG_CRYPTO_CHUNK_SIZE
;
748 * NOTE: crypted count will eventually differ from
749 * nmax, but for now we have not yet introduced
752 ioq
->fifo_cdx
+= (size_t)ct_used
;
753 ioq
->fifo_cdn
+= (size_t)ct_used
;
754 ioq
->fifo_end
+= (size_t)ct_used
;
755 nmax
-= (size_t)ct_used
;
757 dmio_printf(iocom
, 5,
758 "nactp: %ju, p_len: %d, "
759 "ct_used: %d, used: %d, nmax: %ju\n",
760 *nactp
, p_len
, ct_used
, used
, nmax
);
764 iov
[0].iov_base
= ioq
->buf
+ ioq
->fifo_beg
;
765 iov
[0].iov_len
= ioq
->fifo_cdx
- ioq
->fifo_beg
;