2 * ASPEED Hash and Crypto Engine
4 * Copyright (C) 2021 IBM Corp.
6 * Joel Stanley <joel@jms.id.au>
8 * SPDX-License-Identifier: GPL-2.0-or-later
11 #include "qemu/osdep.h"
13 #include "qemu/error-report.h"
14 #include "hw/misc/aspeed_hace.h"
15 #include "qapi/error.h"
16 #include "migration/vmstate.h"
17 #include "crypto/hash.h"
18 #include "hw/qdev-properties.h"
21 #define R_CRYPT_CMD (0x10 / 4)
23 #define R_STATUS (0x1c / 4)
24 #define HASH_IRQ BIT(9)
25 #define CRYPT_IRQ BIT(12)
26 #define TAG_IRQ BIT(15)
28 #define R_HASH_SRC (0x20 / 4)
29 #define R_HASH_DEST (0x24 / 4)
30 #define R_HASH_KEY_BUFF (0x28 / 4)
31 #define R_HASH_SRC_LEN (0x2c / 4)
33 #define R_HASH_CMD (0x30 / 4)
34 /* Hash algorithm selection */
35 #define HASH_ALGO_MASK (BIT(4) | BIT(5) | BIT(6))
36 #define HASH_ALGO_MD5 0
37 #define HASH_ALGO_SHA1 BIT(5)
38 #define HASH_ALGO_SHA224 BIT(6)
39 #define HASH_ALGO_SHA256 (BIT(4) | BIT(6))
40 #define HASH_ALGO_SHA512_SERIES (BIT(5) | BIT(6))
41 /* SHA512 algorithm selection */
42 #define SHA512_HASH_ALGO_MASK (BIT(10) | BIT(11) | BIT(12))
43 #define HASH_ALGO_SHA512_SHA512 0
44 #define HASH_ALGO_SHA512_SHA384 BIT(10)
45 #define HASH_ALGO_SHA512_SHA256 BIT(11)
46 #define HASH_ALGO_SHA512_SHA224 (BIT(10) | BIT(11))
48 #define HASH_HMAC_MASK (BIT(7) | BIT(8))
50 #define HASH_DIGEST_HMAC BIT(7)
51 #define HASH_DIGEST_ACCUM BIT(8)
52 #define HASH_HMAC_KEY (BIT(7) | BIT(8))
53 /* Cascaded operation modes */
55 #define HASH_ONLY2 BIT(0)
56 #define HASH_CRYPT_THEN_HASH BIT(1)
57 #define HASH_HASH_THEN_CRYPT (BIT(0) | BIT(1))
59 #define HASH_IRQ_EN BIT(9)
60 #define HASH_SG_EN BIT(18)
61 /* Scatter-gather data list */
62 #define SG_LIST_LEN_SIZE 4
63 #define SG_LIST_LEN_MASK 0x0FFFFFFF
64 #define SG_LIST_LEN_LAST BIT(31)
65 #define SG_LIST_ADDR_SIZE 4
66 #define SG_LIST_ADDR_MASK 0x7FFFFFFF
67 #define SG_LIST_ENTRY_SIZE (SG_LIST_LEN_SIZE + SG_LIST_ADDR_SIZE)
71 QCryptoHashAlgorithm algo
;
73 { HASH_ALGO_MD5
, QCRYPTO_HASH_ALG_MD5
},
74 { HASH_ALGO_SHA1
, QCRYPTO_HASH_ALG_SHA1
},
75 { HASH_ALGO_SHA224
, QCRYPTO_HASH_ALG_SHA224
},
76 { HASH_ALGO_SHA256
, QCRYPTO_HASH_ALG_SHA256
},
77 { HASH_ALGO_SHA512_SERIES
| HASH_ALGO_SHA512_SHA512
, QCRYPTO_HASH_ALG_SHA512
},
78 { HASH_ALGO_SHA512_SERIES
| HASH_ALGO_SHA512_SHA384
, QCRYPTO_HASH_ALG_SHA384
},
79 { HASH_ALGO_SHA512_SERIES
| HASH_ALGO_SHA512_SHA256
, QCRYPTO_HASH_ALG_SHA256
},
82 static int hash_algo_lookup(uint32_t reg
)
86 reg
&= HASH_ALGO_MASK
| SHA512_HASH_ALGO_MASK
;
88 for (i
= 0; i
< ARRAY_SIZE(hash_algo_map
); i
++) {
89 if (reg
== hash_algo_map
[i
].mask
) {
90 return hash_algo_map
[i
].algo
;
98 * Check whether the request contains padding message.
100 * @param s aspeed hace state object
101 * @param iov iov of current request
102 * @param req_len length of the current request
103 * @param total_msg_len length of all acc_mode requests(excluding padding msg)
104 * @param pad_offset start offset of padding message
106 static bool has_padding(AspeedHACEState
*s
, struct iovec
*iov
,
107 hwaddr req_len
, uint32_t *total_msg_len
,
108 uint32_t *pad_offset
)
110 *total_msg_len
= (uint32_t)(ldq_be_p(iov
->iov_base
+ req_len
- 8) / 8);
112 * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
113 * last request. The last request should contain padding message.
114 * We check whether message contains padding by
115 * 1. Get total message length. If the current message contains
116 * padding, the last 8 bytes are total message length.
117 * 2. Check whether the total message length is valid.
118 * If it is valid, the value should less than or equal to
120 * 3. Current request len - padding_size to get padding offset.
121 * The padding message's first byte should be 0x80
123 if (*total_msg_len
<= s
->total_req_len
) {
124 uint32_t padding_size
= s
->total_req_len
- *total_msg_len
;
125 uint8_t *padding
= iov
->iov_base
;
126 *pad_offset
= req_len
- padding_size
;
127 if (padding
[*pad_offset
] == 0x80) {
135 static int reconstruct_iov(AspeedHACEState
*s
, struct iovec
*iov
, int id
,
136 uint32_t *pad_offset
)
139 if (*pad_offset
!= 0) {
140 s
->iov_cache
[s
->iov_count
].iov_base
= iov
[id
].iov_base
;
141 s
->iov_cache
[s
->iov_count
].iov_len
= *pad_offset
;
144 for (i
= 0; i
< s
->iov_count
; i
++) {
145 iov
[i
].iov_base
= s
->iov_cache
[i
].iov_base
;
146 iov
[i
].iov_len
= s
->iov_cache
[i
].iov_len
;
148 iov_count
= s
->iov_count
;
150 s
->total_req_len
= 0;
155 * Generate iov for accumulative mode.
157 * @param s aspeed hace state object
158 * @param iov iov of the current request
159 * @param id index of the current iov
160 * @param req_len length of the current request
162 * @return count of iov
164 static int gen_acc_mode_iov(AspeedHACEState
*s
, struct iovec
*iov
, int id
,
168 uint32_t total_msg_len
;
169 s
->total_req_len
+= *req_len
;
171 if (has_padding(s
, &iov
[id
], *req_len
, &total_msg_len
, &pad_offset
)) {
173 return reconstruct_iov(s
, iov
, id
, &pad_offset
);
176 *req_len
-= s
->total_req_len
- total_msg_len
;
177 s
->total_req_len
= 0;
178 iov
[id
].iov_len
= *req_len
;
180 s
->iov_cache
[s
->iov_count
].iov_base
= iov
->iov_base
;
181 s
->iov_cache
[s
->iov_count
].iov_len
= *req_len
;
188 static void do_hash_operation(AspeedHACEState
*s
, int algo
, bool sg_mode
,
191 struct iovec iov
[ASPEED_HACE_MAX_SG
];
192 g_autofree
uint8_t *digest_buf
= NULL
;
193 size_t digest_len
= 0;
201 for (i
= 0; !(len
& SG_LIST_LEN_LAST
); i
++) {
205 if (i
== ASPEED_HACE_MAX_SG
) {
206 qemu_log_mask(LOG_GUEST_ERROR
,
207 "aspeed_hace: guest failed to set end of sg list marker\n");
211 src
= s
->regs
[R_HASH_SRC
] + (i
* SG_LIST_ENTRY_SIZE
);
213 len
= address_space_ldl_le(&s
->dram_as
, src
,
214 MEMTXATTRS_UNSPECIFIED
, NULL
);
216 addr
= address_space_ldl_le(&s
->dram_as
, src
+ SG_LIST_LEN_SIZE
,
217 MEMTXATTRS_UNSPECIFIED
, NULL
);
218 addr
&= SG_LIST_ADDR_MASK
;
220 plen
= len
& SG_LIST_LEN_MASK
;
221 haddr
= address_space_map(&s
->dram_as
, addr
, &plen
, false,
222 MEMTXATTRS_UNSPECIFIED
);
224 qemu_log_mask(LOG_GUEST_ERROR
, "%s: qcrypto failed\n", __func__
);
227 iov
[i
].iov_base
= haddr
;
229 niov
= gen_acc_mode_iov(s
, iov
, i
, &plen
);
232 iov
[i
].iov_len
= plen
;
236 hwaddr len
= s
->regs
[R_HASH_SRC_LEN
];
238 haddr
= address_space_map(&s
->dram_as
, s
->regs
[R_HASH_SRC
],
239 &len
, false, MEMTXATTRS_UNSPECIFIED
);
241 qemu_log_mask(LOG_GUEST_ERROR
, "%s: qcrypto failed\n", __func__
);
244 iov
[0].iov_base
= haddr
;
245 iov
[0].iov_len
= len
;
250 * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
251 * Thus if we received a request with sg_mode disabled, it is
252 * required to check whether cache is empty. If no, we should
253 * combine cached iov and the current iov.
255 uint32_t total_msg_len
;
257 s
->total_req_len
+= len
;
258 if (has_padding(s
, iov
, len
, &total_msg_len
, &pad_offset
)) {
259 niov
= reconstruct_iov(s
, iov
, 0, &pad_offset
);
268 if (qcrypto_hash_bytesv(algo
, iov
, i
, &digest_buf
, &digest_len
, NULL
) < 0) {
269 qemu_log_mask(LOG_GUEST_ERROR
, "%s: qcrypto failed\n", __func__
);
273 if (address_space_write(&s
->dram_as
, s
->regs
[R_HASH_DEST
],
274 MEMTXATTRS_UNSPECIFIED
,
275 digest_buf
, digest_len
)) {
276 qemu_log_mask(LOG_GUEST_ERROR
,
277 "aspeed_hace: address space write failed\n");
281 address_space_unmap(&s
->dram_as
, iov
[i
- 1].iov_base
,
282 iov
[i
- 1].iov_len
, false,
287 * Set status bits to indicate completion. Testing shows hardware sets
288 * these irrespective of HASH_IRQ_EN.
290 s
->regs
[R_STATUS
] |= HASH_IRQ
;
293 static uint64_t aspeed_hace_read(void *opaque
, hwaddr addr
, unsigned int size
)
295 AspeedHACEState
*s
= ASPEED_HACE(opaque
);
299 if (addr
>= ASPEED_HACE_NR_REGS
) {
300 qemu_log_mask(LOG_GUEST_ERROR
,
301 "%s: Out-of-bounds read at offset 0x%" HWADDR_PRIx
"\n",
302 __func__
, addr
<< 2);
306 return s
->regs
[addr
];
309 static void aspeed_hace_write(void *opaque
, hwaddr addr
, uint64_t data
,
312 AspeedHACEState
*s
= ASPEED_HACE(opaque
);
313 AspeedHACEClass
*ahc
= ASPEED_HACE_GET_CLASS(s
);
317 if (addr
>= ASPEED_HACE_NR_REGS
) {
318 qemu_log_mask(LOG_GUEST_ERROR
,
319 "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx
"\n",
320 __func__
, addr
<< 2);
326 if (data
& HASH_IRQ
) {
329 if (s
->regs
[addr
] & HASH_IRQ
) {
330 qemu_irq_lower(s
->irq
);
335 data
&= ahc
->src_mask
;
338 data
&= ahc
->dest_mask
;
340 case R_HASH_KEY_BUFF
:
341 data
&= ahc
->key_mask
;
348 data
&= ahc
->hash_mask
;
350 if ((data
& HASH_DIGEST_HMAC
)) {
351 qemu_log_mask(LOG_UNIMP
,
352 "%s: HMAC mode not implemented\n",
356 qemu_log_mask(LOG_UNIMP
,
357 "%s: Cascaded mode not implemented\n",
360 algo
= hash_algo_lookup(data
);
362 qemu_log_mask(LOG_GUEST_ERROR
,
363 "%s: Invalid hash algorithm selection 0x%"PRIx64
"\n",
364 __func__
, data
& ahc
->hash_mask
);
367 do_hash_operation(s
, algo
, data
& HASH_SG_EN
,
368 ((data
& HASH_HMAC_MASK
) == HASH_DIGEST_ACCUM
));
370 if (data
& HASH_IRQ_EN
) {
371 qemu_irq_raise(s
->irq
);
376 qemu_log_mask(LOG_UNIMP
, "%s: Crypt commands not implemented\n",
383 s
->regs
[addr
] = data
;
386 static const MemoryRegionOps aspeed_hace_ops
= {
387 .read
= aspeed_hace_read
,
388 .write
= aspeed_hace_write
,
389 .endianness
= DEVICE_LITTLE_ENDIAN
,
391 .min_access_size
= 1,
392 .max_access_size
= 4,
396 static void aspeed_hace_reset(DeviceState
*dev
)
398 struct AspeedHACEState
*s
= ASPEED_HACE(dev
);
400 memset(s
->regs
, 0, sizeof(s
->regs
));
402 s
->total_req_len
= 0;
405 static void aspeed_hace_realize(DeviceState
*dev
, Error
**errp
)
407 AspeedHACEState
*s
= ASPEED_HACE(dev
);
408 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
410 sysbus_init_irq(sbd
, &s
->irq
);
412 memory_region_init_io(&s
->iomem
, OBJECT(s
), &aspeed_hace_ops
, s
,
413 TYPE_ASPEED_HACE
, 0x1000);
416 error_setg(errp
, TYPE_ASPEED_HACE
": 'dram' link not set");
420 address_space_init(&s
->dram_as
, s
->dram_mr
, "dram");
422 sysbus_init_mmio(sbd
, &s
->iomem
);
425 static Property aspeed_hace_properties
[] = {
426 DEFINE_PROP_LINK("dram", AspeedHACEState
, dram_mr
,
427 TYPE_MEMORY_REGION
, MemoryRegion
*),
428 DEFINE_PROP_END_OF_LIST(),
432 static const VMStateDescription vmstate_aspeed_hace
= {
433 .name
= TYPE_ASPEED_HACE
,
435 .minimum_version_id
= 1,
436 .fields
= (const VMStateField
[]) {
437 VMSTATE_UINT32_ARRAY(regs
, AspeedHACEState
, ASPEED_HACE_NR_REGS
),
438 VMSTATE_UINT32(total_req_len
, AspeedHACEState
),
439 VMSTATE_UINT32(iov_count
, AspeedHACEState
),
440 VMSTATE_END_OF_LIST(),
444 static void aspeed_hace_class_init(ObjectClass
*klass
, void *data
)
446 DeviceClass
*dc
= DEVICE_CLASS(klass
);
448 dc
->realize
= aspeed_hace_realize
;
449 dc
->reset
= aspeed_hace_reset
;
450 device_class_set_props(dc
, aspeed_hace_properties
);
451 dc
->vmsd
= &vmstate_aspeed_hace
;
454 static const TypeInfo aspeed_hace_info
= {
455 .name
= TYPE_ASPEED_HACE
,
456 .parent
= TYPE_SYS_BUS_DEVICE
,
457 .instance_size
= sizeof(AspeedHACEState
),
458 .class_init
= aspeed_hace_class_init
,
459 .class_size
= sizeof(AspeedHACEClass
)
462 static void aspeed_ast2400_hace_class_init(ObjectClass
*klass
, void *data
)
464 DeviceClass
*dc
= DEVICE_CLASS(klass
);
465 AspeedHACEClass
*ahc
= ASPEED_HACE_CLASS(klass
);
467 dc
->desc
= "AST2400 Hash and Crypto Engine";
469 ahc
->src_mask
= 0x0FFFFFFF;
470 ahc
->dest_mask
= 0x0FFFFFF8;
471 ahc
->key_mask
= 0x0FFFFFC0;
472 ahc
->hash_mask
= 0x000003ff; /* No SG or SHA512 modes */
475 static const TypeInfo aspeed_ast2400_hace_info
= {
476 .name
= TYPE_ASPEED_AST2400_HACE
,
477 .parent
= TYPE_ASPEED_HACE
,
478 .class_init
= aspeed_ast2400_hace_class_init
,
481 static void aspeed_ast2500_hace_class_init(ObjectClass
*klass
, void *data
)
483 DeviceClass
*dc
= DEVICE_CLASS(klass
);
484 AspeedHACEClass
*ahc
= ASPEED_HACE_CLASS(klass
);
486 dc
->desc
= "AST2500 Hash and Crypto Engine";
488 ahc
->src_mask
= 0x3fffffff;
489 ahc
->dest_mask
= 0x3ffffff8;
490 ahc
->key_mask
= 0x3FFFFFC0;
491 ahc
->hash_mask
= 0x000003ff; /* No SG or SHA512 modes */
494 static const TypeInfo aspeed_ast2500_hace_info
= {
495 .name
= TYPE_ASPEED_AST2500_HACE
,
496 .parent
= TYPE_ASPEED_HACE
,
497 .class_init
= aspeed_ast2500_hace_class_init
,
500 static void aspeed_ast2600_hace_class_init(ObjectClass
*klass
, void *data
)
502 DeviceClass
*dc
= DEVICE_CLASS(klass
);
503 AspeedHACEClass
*ahc
= ASPEED_HACE_CLASS(klass
);
505 dc
->desc
= "AST2600 Hash and Crypto Engine";
507 ahc
->src_mask
= 0x7FFFFFFF;
508 ahc
->dest_mask
= 0x7FFFFFF8;
509 ahc
->key_mask
= 0x7FFFFFF8;
510 ahc
->hash_mask
= 0x00147FFF;
513 static const TypeInfo aspeed_ast2600_hace_info
= {
514 .name
= TYPE_ASPEED_AST2600_HACE
,
515 .parent
= TYPE_ASPEED_HACE
,
516 .class_init
= aspeed_ast2600_hace_class_init
,
519 static void aspeed_ast1030_hace_class_init(ObjectClass
*klass
, void *data
)
521 DeviceClass
*dc
= DEVICE_CLASS(klass
);
522 AspeedHACEClass
*ahc
= ASPEED_HACE_CLASS(klass
);
524 dc
->desc
= "AST1030 Hash and Crypto Engine";
526 ahc
->src_mask
= 0x7FFFFFFF;
527 ahc
->dest_mask
= 0x7FFFFFF8;
528 ahc
->key_mask
= 0x7FFFFFF8;
529 ahc
->hash_mask
= 0x00147FFF;
532 static const TypeInfo aspeed_ast1030_hace_info
= {
533 .name
= TYPE_ASPEED_AST1030_HACE
,
534 .parent
= TYPE_ASPEED_HACE
,
535 .class_init
= aspeed_ast1030_hace_class_init
,
538 static void aspeed_hace_register_types(void)
540 type_register_static(&aspeed_ast2400_hace_info
);
541 type_register_static(&aspeed_ast2500_hace_info
);
542 type_register_static(&aspeed_ast2600_hace_info
);
543 type_register_static(&aspeed_ast1030_hace_info
);
544 type_register_static(&aspeed_hace_info
);
547 type_init(aspeed_hace_register_types
);