4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
31 #include <security/cryptoki.h>
34 #include <sys/debug.h>
35 #include <sys/types.h>
36 #include <modes/modes.h>
37 #include <sys/crypto/common.h>
38 #include <sys/crypto/impl.h>
39 #include <aes/aes_impl.h>
41 /* These are the CMAC Rb constants from NIST SP 800-38B */
42 #define CONST_RB_128 0x87
43 #define CONST_RB_64 0x1B
46 * Algorithm independent CBC functions.
49 cbc_encrypt_contiguous_blocks(cbc_ctx_t
*ctx
, char *data
, size_t length
,
50 crypto_data_t
*out
, size_t block_size
,
51 int (*encrypt
)(const void *, const uint8_t *, uint8_t *),
52 void (*copy_block
)(uint8_t *, uint8_t *),
53 void (*xor_block
)(uint8_t *, uint8_t *))
55 size_t remainder
= length
;
57 uint8_t *datap
= (uint8_t *)data
;
64 size_t out_data_1_len
;
66 if (length
+ ctx
->cbc_remainder_len
< ctx
->max_remain
) {
67 /* accumulate bytes here and return */
69 (uint8_t *)ctx
->cbc_remainder
+ ctx
->cbc_remainder_len
,
71 ctx
->cbc_remainder_len
+= length
;
72 ctx
->cbc_copy_to
= datap
;
73 return (CRYPTO_SUCCESS
);
76 lastp
= (uint8_t *)ctx
->cbc_iv
;
78 crypto_init_ptrs(out
, &iov_or_mp
, &offset
);
81 /* Unprocessed data from last call. */
82 if (ctx
->cbc_remainder_len
> 0) {
83 need
= block_size
- ctx
->cbc_remainder_len
;
86 return (CRYPTO_DATA_LEN_RANGE
);
88 bcopy(datap
, &((uint8_t *)ctx
->cbc_remainder
)
89 [ctx
->cbc_remainder_len
], need
);
91 blockp
= (uint8_t *)ctx
->cbc_remainder
;
98 * XOR the previous cipher block or IV with the
99 * current clear block.
101 xor_block(lastp
, blockp
);
102 encrypt(ctx
->cbc_keysched
, blockp
, blockp
);
104 ctx
->cbc_lastp
= blockp
;
107 if ((ctx
->cbc_flags
& CMAC_MODE
) == 0 &&
108 ctx
->cbc_remainder_len
> 0) {
109 bcopy(blockp
, ctx
->cbc_copy_to
,
110 ctx
->cbc_remainder_len
);
111 bcopy(blockp
+ ctx
->cbc_remainder_len
, datap
,
116 * XOR the previous cipher block or IV with the
117 * current clear block.
119 xor_block(blockp
, lastp
);
120 encrypt(ctx
->cbc_keysched
, lastp
, lastp
);
123 * CMAC doesn't output until encrypt_final
125 if ((ctx
->cbc_flags
& CMAC_MODE
) == 0) {
126 crypto_get_ptrs(out
, &iov_or_mp
, &offset
,
127 &out_data_1
, &out_data_1_len
,
128 &out_data_2
, block_size
);
130 /* copy block to where it belongs */
131 if (out_data_1_len
== block_size
) {
132 copy_block(lastp
, out_data_1
);
134 bcopy(lastp
, out_data_1
,
136 if (out_data_2
!= NULL
) {
137 bcopy(lastp
+ out_data_1_len
,
144 out
->cd_offset
+= block_size
;
148 /* Update pointer to next block of data to be processed. */
149 if (ctx
->cbc_remainder_len
!= 0) {
151 ctx
->cbc_remainder_len
= 0;
156 remainder
= (size_t)&data
[length
] - (size_t)datap
;
158 /* Incomplete last block. */
159 if (remainder
> 0 && remainder
< ctx
->max_remain
) {
160 bcopy(datap
, ctx
->cbc_remainder
, remainder
);
161 ctx
->cbc_remainder_len
= remainder
;
162 ctx
->cbc_copy_to
= datap
;
165 ctx
->cbc_copy_to
= NULL
;
167 } while (remainder
> 0);
171 * Save the last encrypted block in the context.
173 if (ctx
->cbc_lastp
!= NULL
) {
174 copy_block((uint8_t *)ctx
->cbc_lastp
, (uint8_t *)ctx
->cbc_iv
);
175 ctx
->cbc_lastp
= (uint8_t *)ctx
->cbc_iv
;
178 return (CRYPTO_SUCCESS
);
181 #define OTHER(a, ctx) \
182 (((a) == (ctx)->cbc_lastblock) ? (ctx)->cbc_iv : (ctx)->cbc_lastblock)
186 cbc_decrypt_contiguous_blocks(cbc_ctx_t
*ctx
, char *data
, size_t length
,
187 crypto_data_t
*out
, size_t block_size
,
188 int (*decrypt
)(const void *, const uint8_t *, uint8_t *),
189 void (*copy_block
)(uint8_t *, uint8_t *),
190 void (*xor_block
)(uint8_t *, uint8_t *))
192 size_t remainder
= length
;
194 uint8_t *datap
= (uint8_t *)data
;
201 size_t out_data_1_len
;
203 if (length
+ ctx
->cbc_remainder_len
< block_size
) {
204 /* accumulate bytes here and return */
206 (uint8_t *)ctx
->cbc_remainder
+ ctx
->cbc_remainder_len
,
208 ctx
->cbc_remainder_len
+= length
;
209 ctx
->cbc_copy_to
= datap
;
210 return (CRYPTO_SUCCESS
);
213 lastp
= ctx
->cbc_lastp
;
215 crypto_init_ptrs(out
, &iov_or_mp
, &offset
);
218 /* Unprocessed data from last call. */
219 if (ctx
->cbc_remainder_len
> 0) {
220 need
= block_size
- ctx
->cbc_remainder_len
;
222 if (need
> remainder
)
223 return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE
);
225 bcopy(datap
, &((uint8_t *)ctx
->cbc_remainder
)
226 [ctx
->cbc_remainder_len
], need
);
228 blockp
= (uint8_t *)ctx
->cbc_remainder
;
233 /* LINTED: pointer alignment */
234 copy_block(blockp
, (uint8_t *)OTHER((uint64_t *)lastp
, ctx
));
237 decrypt(ctx
->cbc_keysched
, blockp
,
238 (uint8_t *)ctx
->cbc_remainder
);
239 blockp
= (uint8_t *)ctx
->cbc_remainder
;
241 decrypt(ctx
->cbc_keysched
, blockp
, blockp
);
245 * XOR the previous cipher block or IV with the
246 * currently decrypted block.
248 xor_block(lastp
, blockp
);
250 /* LINTED: pointer alignment */
251 lastp
= (uint8_t *)OTHER((uint64_t *)lastp
, ctx
);
254 crypto_get_ptrs(out
, &iov_or_mp
, &offset
, &out_data_1
,
255 &out_data_1_len
, &out_data_2
, block_size
);
257 bcopy(blockp
, out_data_1
, out_data_1_len
);
258 if (out_data_2
!= NULL
) {
259 bcopy(blockp
+ out_data_1_len
, out_data_2
,
260 block_size
- out_data_1_len
);
264 out
->cd_offset
+= block_size
;
266 } else if (ctx
->cbc_remainder_len
> 0) {
267 /* copy temporary block to where it belongs */
268 bcopy(blockp
, ctx
->cbc_copy_to
, ctx
->cbc_remainder_len
);
269 bcopy(blockp
+ ctx
->cbc_remainder_len
, datap
, need
);
272 /* Update pointer to next block of data to be processed. */
273 if (ctx
->cbc_remainder_len
!= 0) {
275 ctx
->cbc_remainder_len
= 0;
280 remainder
= (size_t)&data
[length
] - (size_t)datap
;
282 /* Incomplete last block. */
283 if (remainder
> 0 && remainder
< block_size
) {
284 bcopy(datap
, ctx
->cbc_remainder
, remainder
);
285 ctx
->cbc_remainder_len
= remainder
;
286 ctx
->cbc_lastp
= lastp
;
287 ctx
->cbc_copy_to
= datap
;
288 return (CRYPTO_SUCCESS
);
290 ctx
->cbc_copy_to
= NULL
;
292 } while (remainder
> 0);
294 ctx
->cbc_lastp
= lastp
;
295 return (CRYPTO_SUCCESS
);
299 cbc_init_ctx(cbc_ctx_t
*cbc_ctx
, char *param
, size_t param_len
,
300 size_t block_size
, void (*copy_block
)(uint8_t *, uint64_t *))
303 * Copy IV into context.
305 * If cm_param == NULL then the IV comes from the
306 * cd_miscdata field in the crypto_data structure.
310 ASSERT(param_len
== block_size
);
312 assert(param_len
== block_size
);
314 copy_block((uchar_t
*)param
, cbc_ctx
->cbc_iv
);
317 cbc_ctx
->cbc_lastp
= (uint8_t *)&cbc_ctx
->cbc_iv
[0];
318 cbc_ctx
->cbc_flags
|= CBC_MODE
;
319 cbc_ctx
->max_remain
= block_size
;
320 return (CRYPTO_SUCCESS
);
325 cbc_cmac_alloc_ctx(int kmflag
, uint32_t mode
)
328 uint32_t modeval
= mode
& (CBC_MODE
|CMAC_MODE
);
330 /* Only one of the two modes can be set */
331 VERIFY(modeval
== CBC_MODE
|| modeval
== CMAC_MODE
);
334 if ((cbc_ctx
= kmem_zalloc(sizeof (cbc_ctx_t
), kmflag
)) == NULL
)
336 if ((cbc_ctx
= calloc(1, sizeof (cbc_ctx_t
))) == NULL
)
340 cbc_ctx
->cbc_flags
= mode
;
345 cbc_alloc_ctx(int kmflag
)
347 return (cbc_cmac_alloc_ctx(kmflag
, CBC_MODE
));
351 * Algorithms for supporting AES-CMAC
352 * NOTE: CMAC is generally just a wrapper for CBC
356 cmac_alloc_ctx(int kmflag
)
358 return (cbc_cmac_alloc_ctx(kmflag
, CMAC_MODE
));
363 * Typically max_remain is set to block_size - 1, since we usually
364 * will process the data once we have a full block. However with CMAC,
365 * we must preprocess the final block of data. Since we cannot know
366 * when we've received the final block of data until the _final() method
367 * is called, we must not process the last block of data until we know
368 * it is the last block, or we receive a new block of data. As such,
369 * max_remain for CMAC is block_size + 1.
372 cmac_init_ctx(cbc_ctx_t
*cbc_ctx
, size_t block_size
)
375 * CMAC is only approved for block sizes 64 and 128 bits /
379 if (block_size
!= 16 && block_size
!= 8)
380 return (CRYPTO_INVALID_CONTEXT
);
383 * For CMAC, cbc_iv is always 0.
386 cbc_ctx
->cbc_iv
[0] = 0;
387 cbc_ctx
->cbc_iv
[1] = 0;
389 cbc_ctx
->cbc_lastp
= (uint8_t *)&cbc_ctx
->cbc_iv
[0];
390 cbc_ctx
->cbc_flags
|= CMAC_MODE
;
392 cbc_ctx
->max_remain
= block_size
+ 1;
393 return (CRYPTO_SUCCESS
);
397 * Left shifts blocks by one and returns the leftmost bit
400 cmac_left_shift_block_by1(uint8_t *block
, size_t block_size
)
402 uint8_t carry
= 0, old
;
404 for (i
= block_size
; i
> 0; i
--) {
406 carry
= (block
[i
- 1] & 0x80) ? 1 : 0;
407 block
[i
- 1] = (block
[i
- 1] << 1) | old
;
413 * Generate subkeys to preprocess the last block according to RFC 4493.
414 * Store the final block_size MAC generated in 'out'.
417 cmac_mode_final(cbc_ctx_t
*cbc_ctx
, crypto_data_t
*out
,
418 int (*encrypt_block
)(const void *, const uint8_t *, uint8_t *),
419 void (*xor_block
)(uint8_t *, uint8_t *))
421 uint8_t buf
[AES_BLOCK_LEN
] = {0};
422 uint8_t *M_last
= (uint8_t *)cbc_ctx
->cbc_remainder
;
423 size_t length
= cbc_ctx
->cbc_remainder_len
;
424 size_t block_size
= cbc_ctx
->max_remain
- 1;
427 if (length
> block_size
)
428 return (CRYPTO_INVALID_CONTEXT
);
430 if (out
->cd_length
< block_size
)
431 return (CRYPTO_DATA_LEN_RANGE
);
433 if (block_size
== 16)
434 const_rb
= CONST_RB_128
;
435 else if (block_size
== 8)
436 const_rb
= CONST_RB_64
;
438 return (CRYPTO_INVALID_CONTEXT
);
441 encrypt_block(cbc_ctx
->cbc_keysched
, buf
, buf
);
443 if (cmac_left_shift_block_by1(buf
, block_size
))
444 buf
[block_size
- 1] ^= const_rb
;
446 if (length
== block_size
) {
447 /* Last block complete, so m_n = k_1 + m_n' */
448 xor_block(buf
, M_last
);
449 xor_block(cbc_ctx
->cbc_lastp
, M_last
);
450 encrypt_block(cbc_ctx
->cbc_keysched
, M_last
, M_last
);
452 /* Last block incomplete, so m_n = k_2 + (m_n' | 100...0_bin) */
453 if (cmac_left_shift_block_by1(buf
, block_size
))
454 buf
[block_size
- 1] ^= const_rb
;
456 M_last
[length
] = 0x80;
457 bzero(M_last
+ length
+ 1, block_size
- length
- 1);
458 xor_block(buf
, M_last
);
459 xor_block(cbc_ctx
->cbc_lastp
, M_last
);
460 encrypt_block(cbc_ctx
->cbc_keysched
, M_last
, M_last
);
464 * zero out the sub-key.
467 explicit_bzero(&buf
, sizeof (buf
));
469 bzero(&buf
, sizeof (buf
));
471 return (crypto_put_output_data(M_last
, out
, block_size
));