core/register: Specify instance_size in the TypeInfo
[qemu/ar7.git] / hw / tpm / tpm_spapr.c
blobe3775adc579d0fa4a075ba28165dcd9f57b63cc4
1 /*
2 * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
4 * PAPR Virtual TPM
6 * Copyright (c) 2015, 2017, 2019 IBM Corporation.
8 * Authors:
9 * Stefan Berger <stefanb@linux.vnet.ibm.com>
11 * This code is licensed under the GPL version 2 or later. See the
12 * COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qemu/error-report.h"
18 #include "qapi/error.h"
19 #include "hw/qdev-properties.h"
20 #include "migration/vmstate.h"
22 #include "sysemu/tpm_backend.h"
23 #include "sysemu/tpm_util.h"
24 #include "tpm_prop.h"
26 #include "hw/ppc/spapr.h"
27 #include "hw/ppc/spapr_vio.h"
28 #include "trace.h"
29 #include "qom/object.h"
31 #define DEBUG_SPAPR 0
33 typedef struct SpaprTpmState SpaprTpmState;
34 DECLARE_INSTANCE_CHECKER(SpaprTpmState, VIO_SPAPR_VTPM,
35 TYPE_TPM_SPAPR)
37 typedef struct TpmCrq {
38 uint8_t valid; /* 0x80: cmd; 0xc0: init crq */
39 /* 0x81-0x83: CRQ message response */
40 uint8_t msg; /* see below */
41 uint16_t len; /* len of TPM request; len of TPM response */
42 uint32_t data; /* rtce_dma_handle when sending TPM request */
43 uint64_t reserved;
44 } TpmCrq;
46 #define SPAPR_VTPM_VALID_INIT_CRQ_COMMAND 0xC0
47 #define SPAPR_VTPM_VALID_COMMAND 0x80
48 #define SPAPR_VTPM_MSG_RESULT 0x80
50 /* msg types for valid = SPAPR_VTPM_VALID_INIT_CRQ */
51 #define SPAPR_VTPM_INIT_CRQ_RESULT 0x1
52 #define SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT 0x2
54 /* msg types for valid = SPAPR_VTPM_VALID_CMD */
55 #define SPAPR_VTPM_GET_VERSION 0x1
56 #define SPAPR_VTPM_TPM_COMMAND 0x2
57 #define SPAPR_VTPM_GET_RTCE_BUFFER_SIZE 0x3
58 #define SPAPR_VTPM_PREPARE_TO_SUSPEND 0x4
60 /* response error messages */
61 #define SPAPR_VTPM_VTPM_ERROR 0xff
63 /* error codes */
64 #define SPAPR_VTPM_ERR_COPY_IN_FAILED 0x3
65 #define SPAPR_VTPM_ERR_COPY_OUT_FAILED 0x4
67 #define TPM_SPAPR_BUFFER_MAX 4096
69 struct SpaprTpmState {
70 SpaprVioDevice vdev;
72 TpmCrq crq; /* track single TPM command */
74 uint8_t state;
75 #define SPAPR_VTPM_STATE_NONE 0
76 #define SPAPR_VTPM_STATE_EXECUTION 1
77 #define SPAPR_VTPM_STATE_COMPLETION 2
79 unsigned char *buffer;
81 uint32_t numbytes; /* number of bytes to deliver on resume */
83 TPMBackendCmd cmd;
85 TPMBackend *be_driver;
86 TPMVersion be_tpm_version;
88 size_t be_buffer_size;
92 * Send a request to the TPM.
94 static void tpm_spapr_tpm_send(SpaprTpmState *s)
96 if (trace_event_get_state_backends(TRACE_TPM_SPAPR_SHOW_BUFFER)) {
97 tpm_util_show_buffer(s->buffer, s->be_buffer_size, "To TPM");
100 s->state = SPAPR_VTPM_STATE_EXECUTION;
101 s->cmd = (TPMBackendCmd) {
102 .locty = 0,
103 .in = s->buffer,
104 .in_len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size),
105 .out = s->buffer,
106 .out_len = s->be_buffer_size,
109 tpm_backend_deliver_request(s->be_driver, &s->cmd);
112 static int tpm_spapr_process_cmd(SpaprTpmState *s, uint64_t dataptr)
114 long rc;
116 /* a max. of be_buffer_size bytes can be transported */
117 rc = spapr_vio_dma_read(&s->vdev, dataptr,
118 s->buffer, s->be_buffer_size);
119 if (rc) {
120 error_report("tpm_spapr_got_payload: DMA read failure");
122 /* let vTPM handle any malformed request */
123 tpm_spapr_tpm_send(s);
125 return rc;
128 static inline int spapr_tpm_send_crq(struct SpaprVioDevice *dev, TpmCrq *crq)
130 return spapr_vio_send_crq(dev, (uint8_t *)crq);
133 static int tpm_spapr_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data)
135 SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
136 TpmCrq local_crq;
137 TpmCrq *crq = &s->crq; /* requests only */
138 int rc;
139 uint8_t valid = crq_data[0];
140 uint8_t msg = crq_data[1];
142 trace_tpm_spapr_do_crq(valid, msg);
144 switch (valid) {
145 case SPAPR_VTPM_VALID_INIT_CRQ_COMMAND: /* Init command/response */
147 /* Respond to initialization request */
148 switch (msg) {
149 case SPAPR_VTPM_INIT_CRQ_RESULT:
150 trace_tpm_spapr_do_crq_crq_result();
151 memset(&local_crq, 0, sizeof(local_crq));
152 local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND;
153 local_crq.msg = SPAPR_VTPM_INIT_CRQ_RESULT;
154 spapr_tpm_send_crq(dev, &local_crq);
155 break;
157 case SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT:
158 trace_tpm_spapr_do_crq_crq_complete_result();
159 memset(&local_crq, 0, sizeof(local_crq));
160 local_crq.valid = SPAPR_VTPM_VALID_INIT_CRQ_COMMAND;
161 local_crq.msg = SPAPR_VTPM_INIT_CRQ_COMPLETE_RESULT;
162 spapr_tpm_send_crq(dev, &local_crq);
163 break;
166 break;
167 case SPAPR_VTPM_VALID_COMMAND: /* Payloads */
168 switch (msg) {
169 case SPAPR_VTPM_TPM_COMMAND:
170 trace_tpm_spapr_do_crq_tpm_command();
171 if (s->state == SPAPR_VTPM_STATE_EXECUTION) {
172 return H_BUSY;
174 memcpy(crq, crq_data, sizeof(*crq));
176 rc = tpm_spapr_process_cmd(s, be32_to_cpu(crq->data));
178 if (rc == H_SUCCESS) {
179 crq->valid = be16_to_cpu(0);
180 } else {
181 local_crq.valid = SPAPR_VTPM_MSG_RESULT;
182 local_crq.msg = SPAPR_VTPM_VTPM_ERROR;
183 local_crq.len = cpu_to_be16(0);
184 local_crq.data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_IN_FAILED);
185 spapr_tpm_send_crq(dev, &local_crq);
187 break;
189 case SPAPR_VTPM_GET_RTCE_BUFFER_SIZE:
190 trace_tpm_spapr_do_crq_tpm_get_rtce_buffer_size(s->be_buffer_size);
191 local_crq.valid = SPAPR_VTPM_VALID_COMMAND;
192 local_crq.msg = SPAPR_VTPM_GET_RTCE_BUFFER_SIZE |
193 SPAPR_VTPM_MSG_RESULT;
194 local_crq.len = cpu_to_be16(s->be_buffer_size);
195 spapr_tpm_send_crq(dev, &local_crq);
196 break;
198 case SPAPR_VTPM_GET_VERSION:
199 local_crq.valid = SPAPR_VTPM_VALID_COMMAND;
200 local_crq.msg = SPAPR_VTPM_GET_VERSION | SPAPR_VTPM_MSG_RESULT;
201 local_crq.len = cpu_to_be16(0);
202 switch (s->be_tpm_version) {
203 case TPM_VERSION_1_2:
204 local_crq.data = cpu_to_be32(1);
205 break;
206 case TPM_VERSION_2_0:
207 local_crq.data = cpu_to_be32(2);
208 break;
209 default:
210 g_assert_not_reached();
211 break;
213 trace_tpm_spapr_do_crq_get_version(be32_to_cpu(local_crq.data));
214 spapr_tpm_send_crq(dev, &local_crq);
215 break;
217 case SPAPR_VTPM_PREPARE_TO_SUSPEND:
218 trace_tpm_spapr_do_crq_prepare_to_suspend();
219 local_crq.valid = SPAPR_VTPM_VALID_COMMAND;
220 local_crq.msg = SPAPR_VTPM_PREPARE_TO_SUSPEND |
221 SPAPR_VTPM_MSG_RESULT;
222 spapr_tpm_send_crq(dev, &local_crq);
223 break;
225 default:
226 trace_tpm_spapr_do_crq_unknown_msg_type(crq->msg);
228 break;
229 default:
230 trace_tpm_spapr_do_crq_unknown_crq(valid, msg);
233 return H_SUCCESS;
236 static void tpm_spapr_request_completed(TPMIf *ti, int ret)
238 SpaprTpmState *s = VIO_SPAPR_VTPM(ti);
239 TpmCrq *crq = &s->crq;
240 uint32_t len;
241 int rc;
243 s->state = SPAPR_VTPM_STATE_COMPLETION;
245 /* a max. of be_buffer_size bytes can be transported */
246 len = MIN(tpm_cmd_get_size(s->buffer), s->be_buffer_size);
248 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
249 trace_tpm_spapr_caught_response(len);
250 /* defer delivery of response until .post_load */
251 s->numbytes = len;
252 return;
255 rc = spapr_vio_dma_write(&s->vdev, be32_to_cpu(crq->data),
256 s->buffer, len);
258 if (trace_event_get_state_backends(TRACE_TPM_SPAPR_SHOW_BUFFER)) {
259 tpm_util_show_buffer(s->buffer, len, "From TPM");
262 crq->valid = SPAPR_VTPM_MSG_RESULT;
263 if (rc == H_SUCCESS) {
264 crq->msg = SPAPR_VTPM_TPM_COMMAND | SPAPR_VTPM_MSG_RESULT;
265 crq->len = cpu_to_be16(len);
266 } else {
267 error_report("%s: DMA write failure", __func__);
268 crq->msg = SPAPR_VTPM_VTPM_ERROR;
269 crq->len = cpu_to_be16(0);
270 crq->data = cpu_to_be32(SPAPR_VTPM_ERR_COPY_OUT_FAILED);
273 rc = spapr_tpm_send_crq(&s->vdev, crq);
274 if (rc) {
275 error_report("%s: Error sending response", __func__);
279 static int tpm_spapr_do_startup_tpm(SpaprTpmState *s, size_t buffersize)
281 return tpm_backend_startup_tpm(s->be_driver, buffersize);
284 static const char *tpm_spapr_get_dt_compatible(SpaprVioDevice *dev)
286 SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
288 switch (s->be_tpm_version) {
289 case TPM_VERSION_1_2:
290 return "IBM,vtpm";
291 case TPM_VERSION_2_0:
292 return "IBM,vtpm20";
293 default:
294 g_assert_not_reached();
298 static void tpm_spapr_reset(SpaprVioDevice *dev)
300 SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
302 s->state = SPAPR_VTPM_STATE_NONE;
303 s->numbytes = 0;
305 s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver);
307 s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->be_driver),
308 TPM_SPAPR_BUFFER_MAX);
310 tpm_backend_reset(s->be_driver);
312 if (tpm_spapr_do_startup_tpm(s, s->be_buffer_size) < 0) {
313 exit(1);
317 static enum TPMVersion tpm_spapr_get_version(TPMIf *ti)
319 SpaprTpmState *s = VIO_SPAPR_VTPM(ti);
321 if (tpm_backend_had_startup_error(s->be_driver)) {
322 return TPM_VERSION_UNSPEC;
325 return tpm_backend_get_tpm_version(s->be_driver);
328 /* persistent state handling */
330 static int tpm_spapr_pre_save(void *opaque)
332 SpaprTpmState *s = opaque;
334 tpm_backend_finish_sync(s->be_driver);
336 * we cannot deliver the results to the VM since DMA would touch VM memory
339 return 0;
342 static int tpm_spapr_post_load(void *opaque, int version_id)
344 SpaprTpmState *s = opaque;
346 if (s->numbytes) {
347 trace_tpm_spapr_post_load();
348 /* deliver the results to the VM via DMA */
349 tpm_spapr_request_completed(TPM_IF(s), 0);
350 s->numbytes = 0;
353 return 0;
356 static const VMStateDescription vmstate_spapr_vtpm = {
357 .name = "tpm-spapr",
358 .pre_save = tpm_spapr_pre_save,
359 .post_load = tpm_spapr_post_load,
360 .fields = (VMStateField[]) {
361 VMSTATE_SPAPR_VIO(vdev, SpaprTpmState),
363 VMSTATE_UINT8(state, SpaprTpmState),
364 VMSTATE_UINT32(numbytes, SpaprTpmState),
365 VMSTATE_VBUFFER_UINT32(buffer, SpaprTpmState, 0, NULL, numbytes),
366 /* remember DMA address */
367 VMSTATE_UINT32(crq.data, SpaprTpmState),
368 VMSTATE_END_OF_LIST(),
372 static Property tpm_spapr_properties[] = {
373 DEFINE_SPAPR_PROPERTIES(SpaprTpmState, vdev),
374 DEFINE_PROP_TPMBE("tpmdev", SpaprTpmState, be_driver),
375 DEFINE_PROP_END_OF_LIST(),
378 static void tpm_spapr_realizefn(SpaprVioDevice *dev, Error **errp)
380 SpaprTpmState *s = VIO_SPAPR_VTPM(dev);
382 if (!tpm_find()) {
383 error_setg(errp, "at most one TPM device is permitted");
384 return;
387 dev->crq.SendFunc = tpm_spapr_do_crq;
389 if (!s->be_driver) {
390 error_setg(errp, "'tpmdev' property is required");
391 return;
393 s->buffer = g_malloc(TPM_SPAPR_BUFFER_MAX);
396 static void tpm_spapr_class_init(ObjectClass *klass, void *data)
398 DeviceClass *dc = DEVICE_CLASS(klass);
399 SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
400 TPMIfClass *tc = TPM_IF_CLASS(klass);
402 k->realize = tpm_spapr_realizefn;
403 k->reset = tpm_spapr_reset;
404 k->dt_name = "vtpm";
405 k->dt_type = "IBM,vtpm";
406 k->get_dt_compatible = tpm_spapr_get_dt_compatible;
407 k->signal_mask = 0x00000001;
408 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
409 device_class_set_props(dc, tpm_spapr_properties);
410 k->rtce_window_size = 0x10000000;
411 dc->vmsd = &vmstate_spapr_vtpm;
413 tc->model = TPM_MODEL_TPM_SPAPR;
414 tc->get_version = tpm_spapr_get_version;
415 tc->request_completed = tpm_spapr_request_completed;
418 static const TypeInfo tpm_spapr_info = {
419 .name = TYPE_TPM_SPAPR,
420 .parent = TYPE_VIO_SPAPR_DEVICE,
421 .instance_size = sizeof(SpaprTpmState),
422 .class_init = tpm_spapr_class_init,
423 .interfaces = (InterfaceInfo[]) {
424 { TYPE_TPM_IF },
429 static void tpm_spapr_register_types(void)
431 type_register_static(&tpm_spapr_info);
434 type_init(tpm_spapr_register_types)