drivers/mrc_cache: Avoid `sizeof` on struct type
[coreboot.git] / src / drivers / mrc_cache / mrc_cache.c
blob841c97ea3df23b59ab2f26dcd6d95947401cf2bb
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <string.h>
4 #include <boot_device.h>
5 #include <bootstate.h>
6 #include <bootmode.h>
7 #include <console/console.h>
8 #include <cbmem.h>
9 #include <elog.h>
10 #include <fmap.h>
11 #include <ip_checksum.h>
12 #include <region_file.h>
13 #include <security/vboot/antirollback.h>
14 #include <security/vboot/mrc_cache_hash_tpm.h>
15 #include <security/vboot/vboot_common.h>
16 #include <spi_flash.h>
18 #include "mrc_cache.h"
20 #define DEFAULT_MRC_CACHE "RW_MRC_CACHE"
21 #define VARIABLE_MRC_CACHE "RW_VAR_MRC_CACHE"
22 #define RECOVERY_MRC_CACHE "RECOVERY_MRC_CACHE"
23 #define UNIFIED_MRC_CACHE "UNIFIED_MRC_CACHE"
25 #define MRC_DATA_SIGNATURE (('M'<<0)|('R'<<8)|('C'<<16)|('D'<<24))
27 struct mrc_metadata {
28 uint32_t signature;
29 uint32_t data_size;
30 uint16_t data_checksum;
31 uint16_t header_checksum;
32 uint32_t version;
33 } __packed;
35 enum result {
36 UPDATE_FAILURE = -1,
37 UPDATE_SUCCESS = 0,
38 ALREADY_UPTODATE = 1
41 #define NORMAL_FLAG (1 << 0)
42 #define RECOVERY_FLAG (1 << 1)
44 struct cache_region {
45 const char *name;
46 uint32_t cbmem_id;
47 int type;
48 int elog_slot;
49 uint32_t tpm_hash_index;
50 int flags;
53 static const struct cache_region recovery_training = {
54 .name = RECOVERY_MRC_CACHE,
55 .cbmem_id = CBMEM_ID_MRCDATA,
56 .type = MRC_TRAINING_DATA,
57 .elog_slot = ELOG_MEM_CACHE_UPDATE_SLOT_RECOVERY,
58 .tpm_hash_index = MRC_REC_HASH_NV_INDEX,
59 #if CONFIG(HAS_RECOVERY_MRC_CACHE)
60 .flags = RECOVERY_FLAG,
61 #else
62 .flags = 0,
63 #endif
66 static const struct cache_region normal_training = {
67 .name = DEFAULT_MRC_CACHE,
68 .cbmem_id = CBMEM_ID_MRCDATA,
69 .type = MRC_TRAINING_DATA,
70 .elog_slot = ELOG_MEM_CACHE_UPDATE_SLOT_NORMAL,
71 .tpm_hash_index = MRC_RW_HASH_NV_INDEX,
72 #if CONFIG(VBOOT_STARTS_IN_ROMSTAGE)
74 * If VBOOT_STARTS_IN_ROMSTAGE is selected, this means that
75 * memory training happens before vboot (in RO) and the
76 * mrc_cache data is always safe to use.
78 .flags = NORMAL_FLAG | RECOVERY_FLAG,
79 #else
81 * If !VBOOT_STARTS_IN_ROMSTAGE, this means that memory training happens after
82 * vboot (in RW code) and is never safe to use in recovery.
84 .flags = NORMAL_FLAG,
85 #endif
88 static const struct cache_region variable_data = {
89 .name = VARIABLE_MRC_CACHE,
90 .cbmem_id = CBMEM_ID_VAR_MRCDATA,
91 .type = MRC_VARIABLE_DATA,
92 .elog_slot = ELOG_MEM_CACHE_UPDATE_SLOT_VARIABLE,
93 .tpm_hash_index = 0,
94 #if CONFIG(VBOOT_STARTS_IN_ROMSTAGE)
96 * If VBOOT_STARTS_IN_ROMSTAGE is selected, this means that
97 * memory training happens before vboot (in RO) and the
98 * mrc_cache data is always safe to use.
100 .flags = NORMAL_FLAG | RECOVERY_FLAG,
101 #else
103 * If !VBOOT_STARTS_IN_ROMSTAGE, this means that memory training happens after
104 * vboot (in RW code) and is never safe to use in recovery.
106 .flags = NORMAL_FLAG,
107 #endif
110 /* Order matters here for priority in matching. */
111 static const struct cache_region *cache_regions[] = {
112 &recovery_training,
113 &normal_training,
114 &variable_data,
117 /* TPM MRC hash functionality depends on vboot starting before memory init. */
118 _Static_assert(!CONFIG(MRC_SAVE_HASH_IN_TPM) ||
119 CONFIG(VBOOT_STARTS_IN_BOOTBLOCK),
120 "for TPM MRC hash functionality, vboot must start in bootblock");
122 static int lookup_region_by_name(const char *name, struct region *r)
124 if (fmap_locate_area(name, r) == 0)
125 return 0;
126 return -1;
129 static const struct cache_region *lookup_region_type(int type)
131 int i;
132 int flags;
134 if (CONFIG(VBOOT_STARTS_IN_BOOTBLOCK) && vboot_recovery_mode_enabled())
135 flags = RECOVERY_FLAG;
136 else
137 flags = NORMAL_FLAG;
139 for (i = 0; i < ARRAY_SIZE(cache_regions); i++) {
140 if (cache_regions[i]->type != type)
141 continue;
142 if ((cache_regions[i]->flags & flags) == flags)
143 return cache_regions[i];
146 return NULL;
149 static const struct cache_region *lookup_region(struct region *r, int type)
151 const struct cache_region *cr;
153 cr = lookup_region_type(type);
155 if (cr == NULL) {
156 printk(BIOS_ERR, "MRC: failed to locate region type %d.\n",
157 type);
158 return NULL;
161 if (lookup_region_by_name(cr->name, r) < 0)
162 return NULL;
164 return cr;
167 static int mrc_header_valid(struct region_device *rdev, struct mrc_metadata *md)
169 uint16_t checksum;
170 uint16_t checksum_result;
171 size_t size;
173 if (rdev_readat(rdev, md, 0, sizeof(*md)) < 0) {
174 printk(BIOS_ERR, "MRC: couldn't read metadata\n");
175 return -1;
178 if (md->signature != MRC_DATA_SIGNATURE) {
179 printk(BIOS_ERR, "MRC: invalid header signature\n");
180 return -1;
183 /* Compute checksum over header with 0 as the value. */
184 checksum = md->header_checksum;
185 md->header_checksum = 0;
186 checksum_result = compute_ip_checksum(md, sizeof(*md));
188 if (checksum != checksum_result) {
189 printk(BIOS_ERR, "MRC: header checksum mismatch: %x vs %x\n",
190 checksum, checksum_result);
191 return -1;
194 /* Put back original. */
195 md->header_checksum = checksum;
197 /* Re-size the region device according to the metadata as a region_file
198 * does block allocation. */
199 size = sizeof(*md) + md->data_size;
200 if (rdev_chain(rdev, rdev, 0, size) < 0) {
201 printk(BIOS_ERR, "MRC: size exceeds rdev size: %zx vs %zx\n",
202 size, region_device_sz(rdev));
203 return -1;
206 return 0;
209 static int mrc_data_valid(int type, const struct mrc_metadata *md,
210 void *data, size_t data_size)
212 uint16_t checksum;
213 const struct cache_region *cr = lookup_region_type(type);
214 uint32_t hash_idx;
216 if (cr == NULL)
217 return -1;
219 if (md->data_size != data_size)
220 return -1;
222 hash_idx = cr->tpm_hash_index;
223 if (hash_idx && CONFIG(MRC_SAVE_HASH_IN_TPM)) {
224 if (!mrc_cache_verify_hash(hash_idx, data, data_size))
225 return -1;
226 } else {
227 checksum = compute_ip_checksum(data, data_size);
229 if (md->data_checksum != checksum) {
230 printk(BIOS_ERR, "MRC: data checksum mismatch: %x vs %x\n",
231 md->data_checksum, checksum);
232 return -1;
236 return 0;
239 static int mrc_cache_get_latest_slot_info(const char *name,
240 const struct region_device *backing_rdev,
241 struct mrc_metadata *md,
242 struct region_file *cache_file,
243 struct region_device *rdev,
244 bool fail_bad_data)
246 /* Init and obtain a handle to the file data. */
247 if (region_file_init(cache_file, backing_rdev) < 0) {
248 printk(BIOS_ERR, "MRC: region file invalid in '%s'\n", name);
249 return -1;
252 /* Provide a 0 sized region_device from here on out so the caller
253 * has a valid yet unusable region_device. */
254 rdev_chain(rdev, backing_rdev, 0, 0);
256 /* No data to return. */
257 if (region_file_data(cache_file, rdev) < 0) {
258 printk(BIOS_ERR, "MRC: no data in '%s'\n", name);
259 return fail_bad_data ? -1 : 0;
262 /* Validate header and resize region to reflect actual usage on the
263 * saved medium (including metadata and data). */
264 if (mrc_header_valid(rdev, md) < 0) {
265 printk(BIOS_ERR, "MRC: invalid header in '%s'\n", name);
266 return fail_bad_data ? -1 : 0;
269 return 0;
272 static int mrc_cache_find_current(int type, uint32_t version,
273 struct region_device *rdev,
274 struct mrc_metadata *md)
276 const struct cache_region *cr;
277 struct region region;
278 struct region_device read_rdev;
279 struct region_file cache_file;
280 size_t data_size;
281 const size_t md_size = sizeof(*md);
282 const bool fail_bad_data = true;
285 * In recovery mode, force retraining if the memory retrain
286 * switch is set.
288 if (CONFIG(VBOOT_STARTS_IN_BOOTBLOCK) && vboot_recovery_mode_enabled()
289 && get_recovery_mode_retrain_switch())
290 return -1;
292 cr = lookup_region(&region, type);
294 if (cr == NULL)
295 return -1;
297 if (boot_device_ro_subregion(&region, &read_rdev) < 0)
298 return -1;
300 if (mrc_cache_get_latest_slot_info(cr->name,
301 &read_rdev,
303 &cache_file,
304 rdev,
305 fail_bad_data) < 0)
306 return -1;
308 if (version != md->version) {
309 printk(BIOS_INFO, "MRC: version mismatch: %x vs %x\n",
310 md->version, version);
311 return -1;
314 /* Re-size rdev to only contain the data. i.e. remove metadata. */
315 data_size = md->data_size;
316 return rdev_chain(rdev, rdev, md_size, data_size);
319 ssize_t mrc_cache_load_current(int type, uint32_t version, void *buffer,
320 size_t buffer_size)
322 struct region_device rdev;
323 struct mrc_metadata md;
324 ssize_t data_size;
326 if (mrc_cache_find_current(type, version, &rdev, &md) < 0)
327 return -1;
329 data_size = region_device_sz(&rdev);
330 if (buffer_size < data_size)
331 return -1;
333 if (rdev_readat(&rdev, buffer, 0, data_size) != data_size)
334 return -1;
336 if (mrc_data_valid(type, &md, buffer, data_size) < 0)
337 return -1;
339 return data_size;
342 void *mrc_cache_current_mmap_leak(int type, uint32_t version,
343 size_t *data_size)
345 struct region_device rdev;
346 void *data;
347 size_t region_device_size;
348 struct mrc_metadata md;
350 if (mrc_cache_find_current(type, version, &rdev, &md) < 0)
351 return NULL;
353 region_device_size = region_device_sz(&rdev);
354 if (data_size)
355 *data_size = region_device_size;
356 data = rdev_mmap_full(&rdev);
358 if (data == NULL) {
359 printk(BIOS_INFO, "MRC: mmap failure.\n");
360 return NULL;
363 if (mrc_data_valid(type, &md, data, region_device_size) < 0)
364 return NULL;
366 return data;
369 static bool mrc_cache_needs_update(const struct region_device *rdev,
370 const struct mrc_metadata *new_md,
371 const void *new_data, size_t new_data_size)
373 void *mapping, *data_mapping;
374 size_t old_data_size = region_device_sz(rdev) - sizeof(struct mrc_metadata);
375 bool need_update = false;
377 if (new_data_size != old_data_size)
378 return true;
380 mapping = rdev_mmap_full(rdev);
381 if (mapping == NULL) {
382 printk(BIOS_ERR, "MRC: cannot mmap existing cache.\n");
383 return true;
385 data_mapping = mapping + sizeof(struct mrc_metadata);
387 /* we need to compare the md and the data separately */
388 /* check the mrc_metadata */
389 if (memcmp(new_md, mapping, sizeof(struct mrc_metadata)))
390 need_update = true;
392 /* check the data */
393 if (!need_update && memcmp(new_data, data_mapping, new_data_size))
394 need_update = true;
396 rdev_munmap(rdev, mapping);
398 return need_update;
401 static void log_event_cache_update(uint8_t slot, enum result res)
403 const int type = ELOG_TYPE_MEM_CACHE_UPDATE;
404 struct elog_event_mem_cache_update event = {
405 .slot = slot
408 /* Filter through interesting events only */
409 switch (res) {
410 case UPDATE_FAILURE:
411 event.status = ELOG_MEM_CACHE_UPDATE_STATUS_FAIL;
412 break;
413 case UPDATE_SUCCESS:
414 event.status = ELOG_MEM_CACHE_UPDATE_STATUS_SUCCESS;
415 break;
416 default:
417 return;
420 if (elog_add_event_raw(type, &event, sizeof(event)) < 0)
421 printk(BIOS_ERR, "Failed to log mem cache update event.\n");
424 /* During ramstage this code purposefully uses incoherent transactions between
425 * read and write. The read assumes a memory-mapped boot device that can be used
426 * to quickly locate and compare the up-to-date data. However, when an update
427 * is required it uses the writeable region access to perform the update. */
428 static void update_mrc_cache_by_type(int type,
429 struct mrc_metadata *new_md,
430 const void *new_data,
431 size_t new_data_size)
433 const struct cache_region *cr;
434 struct region region;
435 struct region_device read_rdev;
436 struct region_device write_rdev;
437 struct region_file cache_file;
438 struct mrc_metadata md;
439 struct incoherent_rdev backing_irdev;
440 const struct region_device *backing_rdev;
441 struct region_device latest_rdev;
442 const bool fail_bad_data = false;
443 uint32_t hash_idx;
445 cr = lookup_region(&region, type);
447 if (cr == NULL)
448 return;
450 printk(BIOS_DEBUG, "MRC: Checking cached data update for '%s'.\n",
451 cr->name);
453 if (boot_device_ro_subregion(&region, &read_rdev) < 0)
454 return;
456 if (boot_device_rw_subregion(&region, &write_rdev) < 0)
457 return;
459 backing_rdev = incoherent_rdev_init(&backing_irdev, &region, &read_rdev,
460 &write_rdev);
462 if (backing_rdev == NULL)
463 return;
465 /* Note that mrc_cache_get_latest_slot_info doesn't check the
466 * validity of the current slot. If the slot is invalid,
467 * we'll overwrite it anyway when we update the mrc_cache.
469 if (mrc_cache_get_latest_slot_info(cr->name,
470 backing_rdev,
471 &md,
472 &cache_file,
473 &latest_rdev,
474 fail_bad_data) < 0)
476 return;
478 if (!mrc_cache_needs_update(&latest_rdev,
479 new_md, new_data, new_data_size)) {
480 printk(BIOS_DEBUG, "MRC: '%s' does not need update.\n", cr->name);
481 log_event_cache_update(cr->elog_slot, ALREADY_UPTODATE);
482 return;
485 printk(BIOS_DEBUG, "MRC: cache data '%s' needs update.\n", cr->name);
487 struct update_region_file_entry entries[] = {
488 [0] = {
489 .size = sizeof(*new_md),
490 .data = new_md,
492 [1] = {
493 .size = new_data_size,
494 .data = new_data,
497 if (region_file_update_data_arr(&cache_file, entries, ARRAY_SIZE(entries)) < 0) {
498 printk(BIOS_ERR, "MRC: failed to update '%s'.\n", cr->name);
499 log_event_cache_update(cr->elog_slot, UPDATE_FAILURE);
500 } else {
501 printk(BIOS_DEBUG, "MRC: updated '%s'.\n", cr->name);
502 log_event_cache_update(cr->elog_slot, UPDATE_SUCCESS);
503 hash_idx = cr->tpm_hash_index;
504 if (hash_idx && CONFIG(MRC_SAVE_HASH_IN_TPM))
505 mrc_cache_update_hash(hash_idx, new_data, new_data_size);
509 /* Read flash status register to determine if write protect is active */
510 static int nvm_is_write_protected(void)
512 u8 sr1;
513 u8 wp_gpio;
514 u8 wp_spi;
516 if (!CONFIG(CHROMEOS))
517 return 0;
519 if (!CONFIG(BOOT_DEVICE_SPI_FLASH))
520 return 0;
522 /* Read Write Protect GPIO if available */
523 wp_gpio = get_write_protect_state();
525 /* Read Status Register 1 */
526 if (spi_flash_status(boot_device_spi_flash(), &sr1) < 0) {
527 printk(BIOS_ERR, "Failed to read SPI status register 1\n");
528 return -1;
530 wp_spi = !!(sr1 & 0x80);
532 printk(BIOS_DEBUG, "SPI flash protection: WPSW=%d SRP0=%d\n",
533 wp_gpio, wp_spi);
535 return wp_gpio && wp_spi;
538 /* Apply protection to a range of flash */
539 static int nvm_protect(const struct region *r)
541 if (!CONFIG(MRC_SETTINGS_PROTECT))
542 return 0;
544 if (!CONFIG(BOOT_DEVICE_SPI_FLASH))
545 return 0;
547 return spi_flash_ctrlr_protect_region(boot_device_spi_flash(), r, WRITE_PROTECT);
550 /* Protect mrc region with a Protected Range Register */
551 static int protect_mrc_cache(const char *name)
553 struct region region;
555 if (!CONFIG(MRC_SETTINGS_PROTECT))
556 return 0;
558 if (lookup_region_by_name(name, &region) < 0) {
559 printk(BIOS_INFO, "MRC: Could not find region '%s'\n", name);
560 return -1;
563 if (nvm_is_write_protected() <= 0) {
564 printk(BIOS_INFO, "MRC: NOT enabling PRR for '%s'.\n", name);
565 return 0;
568 if (nvm_protect(&region) < 0) {
569 printk(BIOS_ERR, "MRC: ERROR setting PRR for '%s'.\n", name);
570 return -1;
573 printk(BIOS_INFO, "MRC: Enabled Protected Range on '%s'.\n", name);
574 return 0;
577 static void protect_mrc_region(void)
580 * Check if there is a single unified region that encompasses both
581 * RECOVERY_MRC_CACHE and DEFAULT_MRC_CACHE. In that case protect the
582 * entire region using a single PRR.
584 * If we are not able to protect the entire region, try protecting
585 * individual regions next.
587 if (protect_mrc_cache(UNIFIED_MRC_CACHE) == 0)
588 return;
590 if (CONFIG(HAS_RECOVERY_MRC_CACHE))
591 protect_mrc_cache(RECOVERY_MRC_CACHE);
593 protect_mrc_cache(DEFAULT_MRC_CACHE);
596 static void invalidate_normal_cache(void)
598 struct region_file cache_file;
599 struct region_device rdev;
600 const char *name = DEFAULT_MRC_CACHE;
601 const uint32_t invalid = ~MRC_DATA_SIGNATURE;
604 * If !HAS_RECOVERY_MRC_CACHE and VBOOT_STARTS_IN_ROMSTAGE is
605 * selected, this means that memory training occurs before
606 * verified boot (in RO), so normal mode cache does not need
607 * to be invalidated.
609 if (!CONFIG(HAS_RECOVERY_MRC_CACHE) && CONFIG(VBOOT_STARTS_IN_ROMSTAGE))
610 return;
612 /* We only invalidate the normal cache in recovery mode. */
613 if (!vboot_recovery_mode_enabled())
614 return;
617 * For platforms with a recovery mrc_cache, no need to
618 * invalidate when retrain switch is not set.
620 if (CONFIG(HAS_RECOVERY_MRC_CACHE) && !get_recovery_mode_retrain_switch())
621 return;
623 if (fmap_locate_area_as_rdev_rw(name, &rdev) < 0) {
624 printk(BIOS_ERR, "MRC: Couldn't find '%s' region. Invalidation failed\n",
625 name);
626 return;
629 if (region_file_init(&cache_file, &rdev) < 0) {
630 printk(BIOS_ERR, "MRC: region file invalid for '%s'. Invalidation failed\n",
631 name);
632 return;
635 /* Push an update that consists of 4 bytes that is smaller than the
636 * MRC metadata as well as an invalid signature. */
637 if (region_file_update_data(&cache_file, &invalid, sizeof(invalid)) < 0)
638 printk(BIOS_ERR, "MRC: invalidation failed for '%s'.\n", name);
641 static void update_mrc_cache_from_cbmem(int type)
643 const struct cache_region *cr;
644 struct region region;
645 const struct cbmem_entry *to_be_updated;
647 cr = lookup_region(&region, type);
649 if (cr == NULL) {
650 printk(BIOS_INFO, "MRC: could not find cache_region type %d\n", type);
651 return;
654 to_be_updated = cbmem_entry_find(cr->cbmem_id);
656 if (to_be_updated == NULL) {
657 printk(BIOS_INFO, "MRC: No data in cbmem for '%s'.\n",
658 cr->name);
659 return;
662 update_mrc_cache_by_type(type,
663 /* pointer to mrc_cache entry metadata header */
664 cbmem_entry_start(to_be_updated),
665 /* pointer to start of mrc_cache entry data */
666 cbmem_entry_start(to_be_updated) +
667 sizeof(struct mrc_metadata),
668 /* size of just data portion of the entry */
669 cbmem_entry_size(to_be_updated) -
670 sizeof(struct mrc_metadata));
673 static void finalize_mrc_cache(void *unused)
675 if (CONFIG(MRC_STASH_TO_CBMEM)) {
676 update_mrc_cache_from_cbmem(MRC_TRAINING_DATA);
678 if (CONFIG(MRC_SETTINGS_VARIABLE_DATA))
679 update_mrc_cache_from_cbmem(MRC_VARIABLE_DATA);
682 invalidate_normal_cache();
684 protect_mrc_region();
687 int mrc_cache_stash_data(int type, uint32_t version, const void *data,
688 size_t size)
690 const struct cache_region *cr;
692 struct mrc_metadata md = {
693 .signature = MRC_DATA_SIGNATURE,
694 .data_size = size,
695 .version = version,
696 .data_checksum = compute_ip_checksum(data, size),
698 md.header_checksum =
699 compute_ip_checksum(&md, sizeof(md));
701 if (CONFIG(MRC_STASH_TO_CBMEM)) {
702 /* Store data in cbmem for use in ramstage */
703 struct mrc_metadata *cbmem_md;
704 size_t cbmem_size;
705 cbmem_size = sizeof(*cbmem_md) + size;
707 cr = lookup_region_type(type);
708 if (cr == NULL) {
709 printk(BIOS_INFO, "MRC: No region type found. Skip adding to cbmem for type %d.\n",
710 type);
711 return 0;
714 cbmem_md = cbmem_add(cr->cbmem_id, cbmem_size);
716 if (cbmem_md == NULL) {
717 printk(BIOS_ERR, "MRC: failed to add '%s' to cbmem.\n",
718 cr->name);
719 return -1;
722 memcpy(cbmem_md, &md, sizeof(*cbmem_md));
723 /* cbmem_md + 1 is the pointer to the mrc_cache data */
724 memcpy(cbmem_md + 1, data, size);
725 } else {
726 /* Otherwise store to mrc_cache right away */
727 update_mrc_cache_by_type(type, &md, data, size);
729 return 0;
733 * Ensures MRC training data is stored into SPI after PCI enumeration is done.
734 * Some implementations may require this to be later than others.
736 #if CONFIG(MRC_WRITE_NV_LATE)
737 BOOT_STATE_INIT_ENTRY(BS_OS_RESUME_CHECK, BS_ON_ENTRY, finalize_mrc_cache, NULL);
738 #else
739 BOOT_STATE_INIT_ENTRY(BS_DEV_ENUMERATE, BS_ON_EXIT, finalize_mrc_cache, NULL);
740 #endif