Reduce the time of checkpoint for COLO
[qemu/ar7.git] / hw / tpm / tpm_tis_common.c
blobe700d821816a270d3b4f265e1fa51b3b60d076cf
1 /*
2 * tpm_tis_common.c - QEMU's TPM TIS interface emulator
3 * device agnostic functions
5 * Copyright (C) 2006,2010-2013 IBM Corporation
7 * Authors:
8 * Stefan Berger <stefanb@us.ibm.com>
9 * David Safford <safford@us.ibm.com>
11 * Xen 4 support: Andrease Niederl <andreas.niederl@iaik.tugraz.at>
13 * This work is licensed under the terms of the GNU GPL, version 2 or later.
14 * See the COPYING file in the top-level directory.
16 * Implementation of the TIS interface according to specs found at
17 * http://www.trustedcomputinggroup.org. This implementation currently
18 * supports version 1.3, 21 March 2013
19 * In the developers menu choose the PC Client section then find the TIS
20 * specification.
22 * TPM TIS for TPM 2 implementation following TCG PC Client Platform
23 * TPM Profile (PTP) Specification, Familiy 2.0, Revision 00.43
25 #include "qemu/osdep.h"
26 #include "hw/irq.h"
27 #include "hw/isa/isa.h"
28 #include "qapi/error.h"
29 #include "qemu/module.h"
31 #include "hw/acpi/tpm.h"
32 #include "hw/pci/pci_ids.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
35 #include "sysemu/tpm_backend.h"
36 #include "sysemu/tpm_util.h"
37 #include "tpm_ppi.h"
38 #include "trace.h"
40 #include "tpm_tis.h"
42 #define DEBUG_TIS 0
44 /* local prototypes */
46 static uint64_t tpm_tis_mmio_read(void *opaque, hwaddr addr,
47 unsigned size);
49 /* utility functions */
51 static uint8_t tpm_tis_locality_from_addr(hwaddr addr)
53 return (uint8_t)((addr >> TPM_TIS_LOCALITY_SHIFT) & 0x7);
58 * Set the given flags in the STS register by clearing the register but
59 * preserving the SELFTEST_DONE and TPM_FAMILY_MASK flags and then setting
60 * the new flags.
62 * The SELFTEST_DONE flag is acquired from the backend that determines it by
63 * peeking into TPM commands.
65 * A VM suspend/resume will preserve the flag by storing it into the VM
66 * device state, but the backend will not remember it when QEMU is started
67 * again. Therefore, we cache the flag here. Once set, it will not be unset
68 * except by a reset.
70 static void tpm_tis_sts_set(TPMLocality *l, uint32_t flags)
72 l->sts &= TPM_TIS_STS_SELFTEST_DONE | TPM_TIS_STS_TPM_FAMILY_MASK;
73 l->sts |= flags;
77 * Send a request to the TPM.
79 static void tpm_tis_tpm_send(TPMState *s, uint8_t locty)
81 tpm_util_show_buffer(s->buffer, s->be_buffer_size, "To TPM");
84 * rw_offset serves as length indicator for length of data;
85 * it's reset when the response comes back
87 s->loc[locty].state = TPM_TIS_STATE_EXECUTION;
89 s->cmd = (TPMBackendCmd) {
90 .locty = locty,
91 .in = s->buffer,
92 .in_len = s->rw_offset,
93 .out = s->buffer,
94 .out_len = s->be_buffer_size,
97 tpm_backend_deliver_request(s->be_driver, &s->cmd);
100 /* raise an interrupt if allowed */
101 static void tpm_tis_raise_irq(TPMState *s, uint8_t locty, uint32_t irqmask)
103 if (!TPM_TIS_IS_VALID_LOCTY(locty)) {
104 return;
107 if ((s->loc[locty].inte & TPM_TIS_INT_ENABLED) &&
108 (s->loc[locty].inte & irqmask)) {
109 trace_tpm_tis_raise_irq(irqmask);
110 qemu_irq_raise(s->irq);
111 s->loc[locty].ints |= irqmask;
115 static uint32_t tpm_tis_check_request_use_except(TPMState *s, uint8_t locty)
117 uint8_t l;
119 for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) {
120 if (l == locty) {
121 continue;
123 if ((s->loc[l].access & TPM_TIS_ACCESS_REQUEST_USE)) {
124 return 1;
128 return 0;
131 static void tpm_tis_new_active_locality(TPMState *s, uint8_t new_active_locty)
133 bool change = (s->active_locty != new_active_locty);
134 bool is_seize;
135 uint8_t mask;
137 if (change && TPM_TIS_IS_VALID_LOCTY(s->active_locty)) {
138 is_seize = TPM_TIS_IS_VALID_LOCTY(new_active_locty) &&
139 s->loc[new_active_locty].access & TPM_TIS_ACCESS_SEIZE;
141 if (is_seize) {
142 mask = ~(TPM_TIS_ACCESS_ACTIVE_LOCALITY);
143 } else {
144 mask = ~(TPM_TIS_ACCESS_ACTIVE_LOCALITY|
145 TPM_TIS_ACCESS_REQUEST_USE);
147 /* reset flags on the old active locality */
148 s->loc[s->active_locty].access &= mask;
150 if (is_seize) {
151 s->loc[s->active_locty].access |= TPM_TIS_ACCESS_BEEN_SEIZED;
155 s->active_locty = new_active_locty;
157 trace_tpm_tis_new_active_locality(s->active_locty);
159 if (TPM_TIS_IS_VALID_LOCTY(new_active_locty)) {
160 /* set flags on the new active locality */
161 s->loc[new_active_locty].access |= TPM_TIS_ACCESS_ACTIVE_LOCALITY;
162 s->loc[new_active_locty].access &= ~(TPM_TIS_ACCESS_REQUEST_USE |
163 TPM_TIS_ACCESS_SEIZE);
166 if (change) {
167 tpm_tis_raise_irq(s, s->active_locty, TPM_TIS_INT_LOCALITY_CHANGED);
171 /* abort -- this function switches the locality */
172 static void tpm_tis_abort(TPMState *s)
174 s->rw_offset = 0;
176 trace_tpm_tis_abort(s->next_locty);
179 * Need to react differently depending on who's aborting now and
180 * which locality will become active afterwards.
182 if (s->aborting_locty == s->next_locty) {
183 s->loc[s->aborting_locty].state = TPM_TIS_STATE_READY;
184 tpm_tis_sts_set(&s->loc[s->aborting_locty],
185 TPM_TIS_STS_COMMAND_READY);
186 tpm_tis_raise_irq(s, s->aborting_locty, TPM_TIS_INT_COMMAND_READY);
189 /* locality after abort is another one than the current one */
190 tpm_tis_new_active_locality(s, s->next_locty);
192 s->next_locty = TPM_TIS_NO_LOCALITY;
193 /* nobody's aborting a command anymore */
194 s->aborting_locty = TPM_TIS_NO_LOCALITY;
197 /* prepare aborting current command */
198 static void tpm_tis_prep_abort(TPMState *s, uint8_t locty, uint8_t newlocty)
200 uint8_t busy_locty;
202 assert(TPM_TIS_IS_VALID_LOCTY(newlocty));
204 s->aborting_locty = locty; /* may also be TPM_TIS_NO_LOCALITY */
205 s->next_locty = newlocty; /* locality after successful abort */
208 * only abort a command using an interrupt if currently executing
209 * a command AND if there's a valid connection to the vTPM.
211 for (busy_locty = 0; busy_locty < TPM_TIS_NUM_LOCALITIES; busy_locty++) {
212 if (s->loc[busy_locty].state == TPM_TIS_STATE_EXECUTION) {
214 * request the backend to cancel. Some backends may not
215 * support it
217 tpm_backend_cancel_cmd(s->be_driver);
218 return;
222 tpm_tis_abort(s);
226 * Callback from the TPM to indicate that the response was received.
228 void tpm_tis_request_completed(TPMState *s, int ret)
230 uint8_t locty = s->cmd.locty;
231 uint8_t l;
233 assert(TPM_TIS_IS_VALID_LOCTY(locty));
235 if (s->cmd.selftest_done) {
236 for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) {
237 s->loc[l].sts |= TPM_TIS_STS_SELFTEST_DONE;
241 /* FIXME: report error if ret != 0 */
242 tpm_tis_sts_set(&s->loc[locty],
243 TPM_TIS_STS_VALID | TPM_TIS_STS_DATA_AVAILABLE);
244 s->loc[locty].state = TPM_TIS_STATE_COMPLETION;
245 s->rw_offset = 0;
247 tpm_util_show_buffer(s->buffer, s->be_buffer_size, "From TPM");
249 if (TPM_TIS_IS_VALID_LOCTY(s->next_locty)) {
250 tpm_tis_abort(s);
253 tpm_tis_raise_irq(s, locty,
254 TPM_TIS_INT_DATA_AVAILABLE | TPM_TIS_INT_STS_VALID);
258 * Read a byte of response data
260 static uint32_t tpm_tis_data_read(TPMState *s, uint8_t locty)
262 uint32_t ret = TPM_TIS_NO_DATA_BYTE;
263 uint16_t len;
265 if ((s->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) {
266 len = MIN(tpm_cmd_get_size(&s->buffer),
267 s->be_buffer_size);
269 ret = s->buffer[s->rw_offset++];
270 if (s->rw_offset >= len) {
271 /* got last byte */
272 tpm_tis_sts_set(&s->loc[locty], TPM_TIS_STS_VALID);
273 tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID);
275 trace_tpm_tis_data_read(ret, s->rw_offset - 1);
278 return ret;
281 #ifdef DEBUG_TIS
282 static void tpm_tis_dump_state(TPMState *s, hwaddr addr)
284 static const unsigned regs[] = {
285 TPM_TIS_REG_ACCESS,
286 TPM_TIS_REG_INT_ENABLE,
287 TPM_TIS_REG_INT_VECTOR,
288 TPM_TIS_REG_INT_STATUS,
289 TPM_TIS_REG_INTF_CAPABILITY,
290 TPM_TIS_REG_STS,
291 TPM_TIS_REG_DID_VID,
292 TPM_TIS_REG_RID,
293 0xfff};
294 int idx;
295 uint8_t locty = tpm_tis_locality_from_addr(addr);
296 hwaddr base = addr & ~0xfff;
298 printf("tpm_tis: active locality : %d\n"
299 "tpm_tis: state of locality %d : %d\n"
300 "tpm_tis: register dump:\n",
301 s->active_locty,
302 locty, s->loc[locty].state);
304 for (idx = 0; regs[idx] != 0xfff; idx++) {
305 printf("tpm_tis: 0x%04x : 0x%08x\n", regs[idx],
306 (int)tpm_tis_mmio_read(s, base + regs[idx], 4));
309 printf("tpm_tis: r/w offset : %d\n"
310 "tpm_tis: result buffer : ",
311 s->rw_offset);
312 for (idx = 0;
313 idx < MIN(tpm_cmd_get_size(&s->buffer), s->be_buffer_size);
314 idx++) {
315 printf("%c%02x%s",
316 s->rw_offset == idx ? '>' : ' ',
317 s->buffer[idx],
318 ((idx & 0xf) == 0xf) ? "\ntpm_tis: " : "");
320 printf("\n");
322 #endif
325 * Read a register of the TIS interface
326 * See specs pages 33-63 for description of the registers
328 static uint64_t tpm_tis_mmio_read(void *opaque, hwaddr addr,
329 unsigned size)
331 TPMState *s = opaque;
332 uint16_t offset = addr & 0xffc;
333 uint8_t shift = (addr & 0x3) * 8;
334 uint32_t val = 0xffffffff;
335 uint8_t locty = tpm_tis_locality_from_addr(addr);
336 uint32_t avail;
337 uint8_t v;
339 if (tpm_backend_had_startup_error(s->be_driver)) {
340 return 0;
343 switch (offset) {
344 case TPM_TIS_REG_ACCESS:
345 /* never show the SEIZE flag even though we use it internally */
346 val = s->loc[locty].access & ~TPM_TIS_ACCESS_SEIZE;
347 /* the pending flag is always calculated */
348 if (tpm_tis_check_request_use_except(s, locty)) {
349 val |= TPM_TIS_ACCESS_PENDING_REQUEST;
351 val |= !tpm_backend_get_tpm_established_flag(s->be_driver);
352 break;
353 case TPM_TIS_REG_INT_ENABLE:
354 val = s->loc[locty].inte;
355 break;
356 case TPM_TIS_REG_INT_VECTOR:
357 val = s->irq_num;
358 break;
359 case TPM_TIS_REG_INT_STATUS:
360 val = s->loc[locty].ints;
361 break;
362 case TPM_TIS_REG_INTF_CAPABILITY:
363 switch (s->be_tpm_version) {
364 case TPM_VERSION_UNSPEC:
365 val = 0;
366 break;
367 case TPM_VERSION_1_2:
368 val = TPM_TIS_CAPABILITIES_SUPPORTED1_3;
369 break;
370 case TPM_VERSION_2_0:
371 val = TPM_TIS_CAPABILITIES_SUPPORTED2_0;
372 break;
374 break;
375 case TPM_TIS_REG_STS:
376 if (s->active_locty == locty) {
377 if ((s->loc[locty].sts & TPM_TIS_STS_DATA_AVAILABLE)) {
378 val = TPM_TIS_BURST_COUNT(
379 MIN(tpm_cmd_get_size(&s->buffer),
380 s->be_buffer_size)
381 - s->rw_offset) | s->loc[locty].sts;
382 } else {
383 avail = s->be_buffer_size - s->rw_offset;
385 * byte-sized reads should not return 0x00 for 0x100
386 * available bytes.
388 if (size == 1 && avail > 0xff) {
389 avail = 0xff;
391 val = TPM_TIS_BURST_COUNT(avail) | s->loc[locty].sts;
394 break;
395 case TPM_TIS_REG_DATA_FIFO:
396 case TPM_TIS_REG_DATA_XFIFO ... TPM_TIS_REG_DATA_XFIFO_END:
397 if (s->active_locty == locty) {
398 if (size > 4 - (addr & 0x3)) {
399 /* prevent access beyond FIFO */
400 size = 4 - (addr & 0x3);
402 val = 0;
403 shift = 0;
404 while (size > 0) {
405 switch (s->loc[locty].state) {
406 case TPM_TIS_STATE_COMPLETION:
407 v = tpm_tis_data_read(s, locty);
408 break;
409 default:
410 v = TPM_TIS_NO_DATA_BYTE;
411 break;
413 val |= (v << shift);
414 shift += 8;
415 size--;
417 shift = 0; /* no more adjustments */
419 break;
420 case TPM_TIS_REG_INTERFACE_ID:
421 val = s->loc[locty].iface_id;
422 break;
423 case TPM_TIS_REG_DID_VID:
424 val = (TPM_TIS_TPM_DID << 16) | TPM_TIS_TPM_VID;
425 break;
426 case TPM_TIS_REG_RID:
427 val = TPM_TIS_TPM_RID;
428 break;
429 #ifdef DEBUG_TIS
430 case TPM_TIS_REG_DEBUG:
431 tpm_tis_dump_state(s, addr);
432 break;
433 #endif
436 if (shift) {
437 val >>= shift;
440 trace_tpm_tis_mmio_read(size, addr, val);
442 return val;
446 * Write a value to a register of the TIS interface
447 * See specs pages 33-63 for description of the registers
449 static void tpm_tis_mmio_write(void *opaque, hwaddr addr,
450 uint64_t val, unsigned size)
452 TPMState *s = opaque;
453 uint16_t off = addr & 0xffc;
454 uint8_t shift = (addr & 0x3) * 8;
455 uint8_t locty = tpm_tis_locality_from_addr(addr);
456 uint8_t active_locty, l;
457 int c, set_new_locty = 1;
458 uint16_t len;
459 uint32_t mask = (size == 1) ? 0xff : ((size == 2) ? 0xffff : ~0);
461 trace_tpm_tis_mmio_write(size, addr, val);
463 if (locty == 4) {
464 trace_tpm_tis_mmio_write_locty4();
465 return;
468 if (tpm_backend_had_startup_error(s->be_driver)) {
469 return;
472 val &= mask;
474 if (shift) {
475 val <<= shift;
476 mask <<= shift;
479 mask ^= 0xffffffff;
481 switch (off) {
482 case TPM_TIS_REG_ACCESS:
484 if ((val & TPM_TIS_ACCESS_SEIZE)) {
485 val &= ~(TPM_TIS_ACCESS_REQUEST_USE |
486 TPM_TIS_ACCESS_ACTIVE_LOCALITY);
489 active_locty = s->active_locty;
491 if ((val & TPM_TIS_ACCESS_ACTIVE_LOCALITY)) {
492 /* give up locality if currently owned */
493 if (s->active_locty == locty) {
494 trace_tpm_tis_mmio_write_release_locty(locty);
496 uint8_t newlocty = TPM_TIS_NO_LOCALITY;
497 /* anybody wants the locality ? */
498 for (c = TPM_TIS_NUM_LOCALITIES - 1; c >= 0; c--) {
499 if ((s->loc[c].access & TPM_TIS_ACCESS_REQUEST_USE)) {
500 trace_tpm_tis_mmio_write_locty_req_use(c);
501 newlocty = c;
502 break;
505 trace_tpm_tis_mmio_write_next_locty(newlocty);
507 if (TPM_TIS_IS_VALID_LOCTY(newlocty)) {
508 set_new_locty = 0;
509 tpm_tis_prep_abort(s, locty, newlocty);
510 } else {
511 active_locty = TPM_TIS_NO_LOCALITY;
513 } else {
514 /* not currently the owner; clear a pending request */
515 s->loc[locty].access &= ~TPM_TIS_ACCESS_REQUEST_USE;
519 if ((val & TPM_TIS_ACCESS_BEEN_SEIZED)) {
520 s->loc[locty].access &= ~TPM_TIS_ACCESS_BEEN_SEIZED;
523 if ((val & TPM_TIS_ACCESS_SEIZE)) {
525 * allow seize if a locality is active and the requesting
526 * locality is higher than the one that's active
527 * OR
528 * allow seize for requesting locality if no locality is
529 * active
531 while ((TPM_TIS_IS_VALID_LOCTY(s->active_locty) &&
532 locty > s->active_locty) ||
533 !TPM_TIS_IS_VALID_LOCTY(s->active_locty)) {
534 bool higher_seize = false;
536 /* already a pending SEIZE ? */
537 if ((s->loc[locty].access & TPM_TIS_ACCESS_SEIZE)) {
538 break;
541 /* check for ongoing seize by a higher locality */
542 for (l = locty + 1; l < TPM_TIS_NUM_LOCALITIES; l++) {
543 if ((s->loc[l].access & TPM_TIS_ACCESS_SEIZE)) {
544 higher_seize = true;
545 break;
549 if (higher_seize) {
550 break;
553 /* cancel any seize by a lower locality */
554 for (l = 0; l < locty; l++) {
555 s->loc[l].access &= ~TPM_TIS_ACCESS_SEIZE;
558 s->loc[locty].access |= TPM_TIS_ACCESS_SEIZE;
560 trace_tpm_tis_mmio_write_locty_seized(locty, s->active_locty);
561 trace_tpm_tis_mmio_write_init_abort();
563 set_new_locty = 0;
564 tpm_tis_prep_abort(s, s->active_locty, locty);
565 break;
569 if ((val & TPM_TIS_ACCESS_REQUEST_USE)) {
570 if (s->active_locty != locty) {
571 if (TPM_TIS_IS_VALID_LOCTY(s->active_locty)) {
572 s->loc[locty].access |= TPM_TIS_ACCESS_REQUEST_USE;
573 } else {
574 /* no locality active -> make this one active now */
575 active_locty = locty;
580 if (set_new_locty) {
581 tpm_tis_new_active_locality(s, active_locty);
584 break;
585 case TPM_TIS_REG_INT_ENABLE:
586 if (s->active_locty != locty) {
587 break;
590 s->loc[locty].inte &= mask;
591 s->loc[locty].inte |= (val & (TPM_TIS_INT_ENABLED |
592 TPM_TIS_INT_POLARITY_MASK |
593 TPM_TIS_INTERRUPTS_SUPPORTED));
594 break;
595 case TPM_TIS_REG_INT_VECTOR:
596 /* hard wired -- ignore */
597 break;
598 case TPM_TIS_REG_INT_STATUS:
599 if (s->active_locty != locty) {
600 break;
603 /* clearing of interrupt flags */
604 if (((val & TPM_TIS_INTERRUPTS_SUPPORTED)) &&
605 (s->loc[locty].ints & TPM_TIS_INTERRUPTS_SUPPORTED)) {
606 s->loc[locty].ints &= ~val;
607 if (s->loc[locty].ints == 0) {
608 qemu_irq_lower(s->irq);
609 trace_tpm_tis_mmio_write_lowering_irq();
612 s->loc[locty].ints &= ~(val & TPM_TIS_INTERRUPTS_SUPPORTED);
613 break;
614 case TPM_TIS_REG_STS:
615 if (s->active_locty != locty) {
616 break;
619 if (s->be_tpm_version == TPM_VERSION_2_0) {
620 /* some flags that are only supported for TPM 2 */
621 if (val & TPM_TIS_STS_COMMAND_CANCEL) {
622 if (s->loc[locty].state == TPM_TIS_STATE_EXECUTION) {
624 * request the backend to cancel. Some backends may not
625 * support it
627 tpm_backend_cancel_cmd(s->be_driver);
631 if (val & TPM_TIS_STS_RESET_ESTABLISHMENT_BIT) {
632 if (locty == 3 || locty == 4) {
633 tpm_backend_reset_tpm_established_flag(s->be_driver, locty);
638 val &= (TPM_TIS_STS_COMMAND_READY | TPM_TIS_STS_TPM_GO |
639 TPM_TIS_STS_RESPONSE_RETRY);
641 if (val == TPM_TIS_STS_COMMAND_READY) {
642 switch (s->loc[locty].state) {
644 case TPM_TIS_STATE_READY:
645 s->rw_offset = 0;
646 break;
648 case TPM_TIS_STATE_IDLE:
649 tpm_tis_sts_set(&s->loc[locty], TPM_TIS_STS_COMMAND_READY);
650 s->loc[locty].state = TPM_TIS_STATE_READY;
651 tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY);
652 break;
654 case TPM_TIS_STATE_EXECUTION:
655 case TPM_TIS_STATE_RECEPTION:
656 /* abort currently running command */
657 trace_tpm_tis_mmio_write_init_abort();
658 tpm_tis_prep_abort(s, locty, locty);
659 break;
661 case TPM_TIS_STATE_COMPLETION:
662 s->rw_offset = 0;
663 /* shortcut to ready state with C/R set */
664 s->loc[locty].state = TPM_TIS_STATE_READY;
665 if (!(s->loc[locty].sts & TPM_TIS_STS_COMMAND_READY)) {
666 tpm_tis_sts_set(&s->loc[locty],
667 TPM_TIS_STS_COMMAND_READY);
668 tpm_tis_raise_irq(s, locty, TPM_TIS_INT_COMMAND_READY);
670 s->loc[locty].sts &= ~(TPM_TIS_STS_DATA_AVAILABLE);
671 break;
674 } else if (val == TPM_TIS_STS_TPM_GO) {
675 switch (s->loc[locty].state) {
676 case TPM_TIS_STATE_RECEPTION:
677 if ((s->loc[locty].sts & TPM_TIS_STS_EXPECT) == 0) {
678 tpm_tis_tpm_send(s, locty);
680 break;
681 default:
682 /* ignore */
683 break;
685 } else if (val == TPM_TIS_STS_RESPONSE_RETRY) {
686 switch (s->loc[locty].state) {
687 case TPM_TIS_STATE_COMPLETION:
688 s->rw_offset = 0;
689 tpm_tis_sts_set(&s->loc[locty],
690 TPM_TIS_STS_VALID|
691 TPM_TIS_STS_DATA_AVAILABLE);
692 break;
693 default:
694 /* ignore */
695 break;
698 break;
699 case TPM_TIS_REG_DATA_FIFO:
700 case TPM_TIS_REG_DATA_XFIFO ... TPM_TIS_REG_DATA_XFIFO_END:
701 /* data fifo */
702 if (s->active_locty != locty) {
703 break;
706 if (s->loc[locty].state == TPM_TIS_STATE_IDLE ||
707 s->loc[locty].state == TPM_TIS_STATE_EXECUTION ||
708 s->loc[locty].state == TPM_TIS_STATE_COMPLETION) {
709 /* drop the byte */
710 } else {
711 trace_tpm_tis_mmio_write_data2send(val, size);
712 if (s->loc[locty].state == TPM_TIS_STATE_READY) {
713 s->loc[locty].state = TPM_TIS_STATE_RECEPTION;
714 tpm_tis_sts_set(&s->loc[locty],
715 TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID);
718 val >>= shift;
719 if (size > 4 - (addr & 0x3)) {
720 /* prevent access beyond FIFO */
721 size = 4 - (addr & 0x3);
724 while ((s->loc[locty].sts & TPM_TIS_STS_EXPECT) && size > 0) {
725 if (s->rw_offset < s->be_buffer_size) {
726 s->buffer[s->rw_offset++] =
727 (uint8_t)val;
728 val >>= 8;
729 size--;
730 } else {
731 tpm_tis_sts_set(&s->loc[locty], TPM_TIS_STS_VALID);
735 /* check for complete packet */
736 if (s->rw_offset > 5 &&
737 (s->loc[locty].sts & TPM_TIS_STS_EXPECT)) {
738 /* we have a packet length - see if we have all of it */
739 bool need_irq = !(s->loc[locty].sts & TPM_TIS_STS_VALID);
741 len = tpm_cmd_get_size(&s->buffer);
742 if (len > s->rw_offset) {
743 tpm_tis_sts_set(&s->loc[locty],
744 TPM_TIS_STS_EXPECT | TPM_TIS_STS_VALID);
745 } else {
746 /* packet complete */
747 tpm_tis_sts_set(&s->loc[locty], TPM_TIS_STS_VALID);
749 if (need_irq) {
750 tpm_tis_raise_irq(s, locty, TPM_TIS_INT_STS_VALID);
754 break;
755 case TPM_TIS_REG_INTERFACE_ID:
756 if (val & TPM_TIS_IFACE_ID_INT_SEL_LOCK) {
757 for (l = 0; l < TPM_TIS_NUM_LOCALITIES; l++) {
758 s->loc[l].iface_id |= TPM_TIS_IFACE_ID_INT_SEL_LOCK;
761 break;
765 const MemoryRegionOps tpm_tis_memory_ops = {
766 .read = tpm_tis_mmio_read,
767 .write = tpm_tis_mmio_write,
768 .endianness = DEVICE_LITTLE_ENDIAN,
769 .valid = {
770 .min_access_size = 1,
771 .max_access_size = 4,
776 * Get the TPMVersion of the backend device being used
778 enum TPMVersion tpm_tis_get_tpm_version(TPMState *s)
780 if (tpm_backend_had_startup_error(s->be_driver)) {
781 return TPM_VERSION_UNSPEC;
784 return tpm_backend_get_tpm_version(s->be_driver);
788 * This function is called when the machine starts, resets or due to
789 * S3 resume.
791 void tpm_tis_reset(TPMState *s)
793 int c;
795 s->be_tpm_version = tpm_backend_get_tpm_version(s->be_driver);
796 s->be_buffer_size = MIN(tpm_backend_get_buffer_size(s->be_driver),
797 TPM_TIS_BUFFER_MAX);
799 if (s->ppi_enabled) {
800 tpm_ppi_reset(&s->ppi);
802 tpm_backend_reset(s->be_driver);
804 s->active_locty = TPM_TIS_NO_LOCALITY;
805 s->next_locty = TPM_TIS_NO_LOCALITY;
806 s->aborting_locty = TPM_TIS_NO_LOCALITY;
808 for (c = 0; c < TPM_TIS_NUM_LOCALITIES; c++) {
809 s->loc[c].access = TPM_TIS_ACCESS_TPM_REG_VALID_STS;
810 switch (s->be_tpm_version) {
811 case TPM_VERSION_UNSPEC:
812 break;
813 case TPM_VERSION_1_2:
814 s->loc[c].sts = TPM_TIS_STS_TPM_FAMILY1_2;
815 s->loc[c].iface_id = TPM_TIS_IFACE_ID_SUPPORTED_FLAGS1_3;
816 break;
817 case TPM_VERSION_2_0:
818 s->loc[c].sts = TPM_TIS_STS_TPM_FAMILY2_0;
819 s->loc[c].iface_id = TPM_TIS_IFACE_ID_SUPPORTED_FLAGS2_0;
820 break;
822 s->loc[c].inte = TPM_TIS_INT_POLARITY_LOW_LEVEL;
823 s->loc[c].ints = 0;
824 s->loc[c].state = TPM_TIS_STATE_IDLE;
826 s->rw_offset = 0;
829 if (tpm_backend_startup_tpm(s->be_driver, s->be_buffer_size) < 0) {
830 exit(1);
834 /* persistent state handling */
836 int tpm_tis_pre_save(TPMState *s)
838 uint8_t locty = s->active_locty;
840 trace_tpm_tis_pre_save(locty, s->rw_offset);
842 if (DEBUG_TIS) {
843 tpm_tis_dump_state(s, 0);
847 * Synchronize with backend completion.
849 tpm_backend_finish_sync(s->be_driver);
851 return 0;
854 const VMStateDescription vmstate_locty = {
855 .name = "tpm-tis/locty",
856 .version_id = 0,
857 .fields = (VMStateField[]) {
858 VMSTATE_UINT32(state, TPMLocality),
859 VMSTATE_UINT32(inte, TPMLocality),
860 VMSTATE_UINT32(ints, TPMLocality),
861 VMSTATE_UINT8(access, TPMLocality),
862 VMSTATE_UINT32(sts, TPMLocality),
863 VMSTATE_UINT32(iface_id, TPMLocality),
864 VMSTATE_END_OF_LIST(),