Merge commit '7928f4baf4ab3230557eb6289be68aa7a3003f38'
[unleashed.git] / arch / x86 / kernel / platform / i86pc / os / microcode.c
blobb447ea140f127f0a6f16c15ced4471c619707c78
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2018, Joyent, Inc.
30 #include <sys/asm_linkage.h>
31 #include <sys/bootconf.h>
32 #include <sys/cpuvar.h>
33 #include <sys/cmn_err.h>
34 #include <sys/controlregs.h>
35 #include <sys/debug.h>
36 #include <sys/kobj.h>
37 #include <sys/kobj_impl.h>
38 #include <sys/machsystm.h>
39 #include <sys/ontrap.h>
40 #include <sys/param.h>
41 #include <sys/machparam.h>
42 #include <sys/promif.h>
43 #include <sys/sysmacros.h>
44 #include <sys/systm.h>
45 #include <sys/types.h>
46 #include <sys/thread.h>
47 #include <sys/ucode.h>
48 #include <sys/x86_archext.h>
49 #include <sys/x_call.h>
52 * AMD-specific equivalence table
54 static ucode_eqtbl_amd_t *ucode_eqtbl_amd;
57 * mcpu_ucode_info for the boot CPU. Statically allocated.
59 static struct cpu_ucode_info cpu_ucode_info0;
61 static ucode_file_t ucodefile;
63 static void* ucode_zalloc(processorid_t, size_t);
64 static void ucode_free(processorid_t, void *, size_t);
66 static int ucode_capable_amd(cpu_t *);
67 static int ucode_capable_intel(cpu_t *);
69 static ucode_errno_t ucode_extract_amd(ucode_update_t *, uint8_t *, int);
70 static ucode_errno_t ucode_extract_intel(ucode_update_t *, uint8_t *,
71 int);
73 static void ucode_file_reset_amd(ucode_file_t *, processorid_t);
74 static void ucode_file_reset_intel(ucode_file_t *, processorid_t);
76 static uint32_t ucode_load_amd(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
77 static uint32_t ucode_load_intel(ucode_file_t *, cpu_ucode_info_t *, cpu_t *);
80 static int ucode_equiv_cpu_amd(cpu_t *, uint16_t *);
82 static ucode_errno_t ucode_locate_amd(cpu_t *, cpu_ucode_info_t *,
83 ucode_file_t *);
84 static ucode_errno_t ucode_locate_intel(cpu_t *, cpu_ucode_info_t *,
85 ucode_file_t *);
87 static ucode_errno_t ucode_match_amd(uint16_t, cpu_ucode_info_t *,
88 ucode_file_amd_t *, int);
89 static ucode_errno_t ucode_match_intel(int, cpu_ucode_info_t *,
90 ucode_header_intel_t *, ucode_ext_table_intel_t *);
92 static void ucode_read_rev_amd(cpu_ucode_info_t *);
93 static void ucode_read_rev_intel(cpu_ucode_info_t *);
95 static const struct ucode_ops ucode_amd = {
96 MSR_AMD_PATCHLOADER,
97 ucode_capable_amd,
98 ucode_file_reset_amd,
99 ucode_read_rev_amd,
100 ucode_load_amd,
101 ucode_validate_amd,
102 ucode_extract_amd,
103 ucode_locate_amd
106 static const struct ucode_ops ucode_intel = {
107 MSR_INTC_UCODE_WRITE,
108 ucode_capable_intel,
109 ucode_file_reset_intel,
110 ucode_read_rev_intel,
111 ucode_load_intel,
112 ucode_validate_intel,
113 ucode_extract_intel,
114 ucode_locate_intel
117 const struct ucode_ops *ucode;
119 static const char ucode_failure_fmt[] =
120 "cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
121 static const char ucode_success_fmt[] =
122 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
125 * Force flag. If set, the first microcode binary that matches
126 * signature and platform id will be used for microcode update,
127 * regardless of version. Should only be used for debugging.
129 int ucode_force_update = 0;
132 * Allocate space for mcpu_ucode_info in the machcpu structure
133 * for all non-boot CPUs.
135 void
136 ucode_alloc_space(cpu_t *cp)
138 ASSERT(cp->cpu_id != 0);
139 ASSERT(cp->cpu_m.mcpu_ucode_info == NULL);
140 cp->cpu_m.mcpu_ucode_info =
141 kmem_zalloc(sizeof (*cp->cpu_m.mcpu_ucode_info), KM_SLEEP);
144 void
145 ucode_free_space(cpu_t *cp)
147 ASSERT(cp->cpu_m.mcpu_ucode_info != NULL);
148 ASSERT(cp->cpu_m.mcpu_ucode_info != &cpu_ucode_info0);
149 kmem_free(cp->cpu_m.mcpu_ucode_info,
150 sizeof (*cp->cpu_m.mcpu_ucode_info));
151 cp->cpu_m.mcpu_ucode_info = NULL;
155 * Called when we are done with microcode update on all processors to free up
156 * space allocated for the microcode file.
158 void
159 ucode_cleanup()
161 if (ucode == NULL)
162 return;
164 ucode->file_reset(&ucodefile, -1);
168 * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
169 * allocated with BOP_ALLOC() and does not require a free.
171 static void*
172 ucode_zalloc(processorid_t id, size_t size)
174 if (id)
175 return (kmem_zalloc(size, KM_NOSLEEP));
177 /* BOP_ALLOC() failure results in panic */
178 return (BOP_ALLOC(bootops, NULL, size, MMU_PAGESIZE));
181 static void
182 ucode_free(processorid_t id, void* buf, size_t size)
184 if (id)
185 kmem_free(buf, size);
189 * Check whether or not a processor is capable of microcode operations
190 * Returns 1 if it is capable, 0 if not.
192 * At this point we only support microcode update for:
193 * - Intel processors family 6 and above, and
194 * - AMD processors family 0x10 and above.
196 * We also assume that we don't support a mix of Intel and
197 * AMD processors in the same box.
199 * An i86xpv guest domain or VM can't update the microcode.
202 #define XPVDOMU_OR_HVM \
203 ((hwenv == HW_XEN_PV && !is_controldom()) || (hwenv & HW_VIRTUAL) != 0)
205 /*ARGSUSED*/
206 static int
207 ucode_capable_amd(cpu_t *cp)
209 int hwenv = get_hwenv();
211 if (XPVDOMU_OR_HVM)
212 return (0);
214 return (cpuid_getfamily(cp) >= 0x10);
217 static int
218 ucode_capable_intel(cpu_t *cp)
220 int hwenv = get_hwenv();
222 if (XPVDOMU_OR_HVM)
223 return (0);
225 return (cpuid_getfamily(cp) >= 6);
229 * Called when it is no longer necessary to keep the microcode around,
230 * or when the cached microcode doesn't match the CPU being processed.
232 static void
233 ucode_file_reset_amd(ucode_file_t *ufp, processorid_t id)
235 ucode_file_amd_t *ucodefp = ufp->amd;
237 if (ucodefp == NULL)
238 return;
240 ucode_free(id, ucodefp, sizeof (ucode_file_amd_t));
241 ufp->amd = NULL;
244 static void
245 ucode_file_reset_intel(ucode_file_t *ufp, processorid_t id)
247 ucode_file_intel_t *ucodefp = &ufp->intel;
248 int total_size, body_size;
250 if (ucodefp == NULL || ucodefp->uf_header == NULL)
251 return;
253 total_size = UCODE_TOTAL_SIZE_INTEL(ucodefp->uf_header->uh_total_size);
254 body_size = UCODE_BODY_SIZE_INTEL(ucodefp->uf_header->uh_body_size);
255 if (ucodefp->uf_body) {
256 ucode_free(id, ucodefp->uf_body, body_size);
257 ucodefp->uf_body = NULL;
260 if (ucodefp->uf_ext_table) {
261 int size = total_size - body_size - UCODE_HEADER_SIZE_INTEL;
263 ucode_free(id, ucodefp->uf_ext_table, size);
264 ucodefp->uf_ext_table = NULL;
267 ucode_free(id, ucodefp->uf_header, UCODE_HEADER_SIZE_INTEL);
268 ucodefp->uf_header = NULL;
272 * Find the equivalent CPU id in the equivalence table.
274 static int
275 ucode_equiv_cpu_amd(cpu_t *cp, uint16_t *eq_sig)
277 char name[MAXPATHLEN];
278 intptr_t fd;
279 int count;
280 int offset = 0, cpi_sig = cpuid_getsig(cp);
281 ucode_eqtbl_amd_t *eqtbl = ucode_eqtbl_amd;
283 (void) snprintf(name, MAXPATHLEN, "/%s/%s/equivalence-table",
284 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp));
287 * No kmem_zalloc() etc. available on boot cpu.
289 if (cp->cpu_id == 0) {
290 if ((fd = kobj_open(name)) == -1)
291 return (EM_OPENFILE);
292 /* ucode_zalloc() cannot fail on boot cpu */
293 eqtbl = ucode_zalloc(cp->cpu_id, sizeof (*eqtbl));
294 ASSERT(eqtbl);
295 do {
296 count = kobj_read(fd, (int8_t *)eqtbl,
297 sizeof (*eqtbl), offset);
298 if (count != sizeof (*eqtbl)) {
299 (void) kobj_close(fd);
300 return (EM_HIGHERREV);
302 offset += count;
303 } while (eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig);
304 (void) kobj_close(fd);
308 * If not already done, load the equivalence table.
309 * Not done on boot CPU.
311 if (eqtbl == NULL) {
312 struct _buf *eq;
313 uint64_t size;
315 if ((eq = kobj_open_file(name)) == (struct _buf *)-1)
316 return (EM_OPENFILE);
318 if (kobj_get_filesize(eq, &size) < 0) {
319 kobj_close_file(eq);
320 return (EM_OPENFILE);
323 ucode_eqtbl_amd = kmem_zalloc(size, KM_NOSLEEP);
324 if (ucode_eqtbl_amd == NULL) {
325 kobj_close_file(eq);
326 return (EM_NOMEM);
329 count = kobj_read_file(eq, (char *)ucode_eqtbl_amd, size, 0);
330 kobj_close_file(eq);
332 if (count != size)
333 return (EM_FILESIZE);
336 /* Get the equivalent CPU id. */
337 if (cp->cpu_id)
338 for (eqtbl = ucode_eqtbl_amd;
339 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != cpi_sig;
340 eqtbl++)
343 *eq_sig = eqtbl->ue_equiv_cpu;
345 /* No equivalent CPU id found, assume outdated microcode file. */
346 if (*eq_sig == 0)
347 return (EM_HIGHERREV);
349 return (EM_OK);
353 * xVM cannot check for the presence of PCI devices. Look for chipset-
354 * specific microcode patches in the container file and disable them
355 * by setting their CPU revision to an invalid value.
359 * Populate the ucode file structure from microcode file corresponding to
360 * this CPU, if exists.
362 * Return EM_OK on success, corresponding error code on failure.
364 /*ARGSUSED*/
365 static ucode_errno_t
366 ucode_locate_amd(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
368 char name[MAXPATHLEN];
369 intptr_t fd;
370 int count, rc;
371 ucode_file_amd_t *ucodefp = ufp->amd;
373 uint16_t eq_sig = 0;
374 int i;
376 /* get equivalent CPU id */
377 if ((rc = ucode_equiv_cpu_amd(cp, &eq_sig)) != EM_OK)
378 return (rc);
381 * Allocate a buffer for the microcode patch. If the buffer has been
382 * allocated before, check for a matching microcode to avoid loading
383 * the file again.
385 if (ucodefp == NULL)
386 ucodefp = ucode_zalloc(cp->cpu_id, sizeof (*ucodefp));
387 else if (ucode_match_amd(eq_sig, uinfop, ucodefp, sizeof (*ucodefp))
388 == EM_OK)
389 return (EM_OK);
391 if (ucodefp == NULL)
392 return (EM_NOMEM);
394 ufp->amd = ucodefp;
397 * Find the patch for this CPU. The patch files are named XXXX-YY, where
398 * XXXX is the equivalent CPU id and YY is the running patch number.
399 * Patches specific to certain chipsets are guaranteed to have lower
400 * numbers than less specific patches, so we can just load the first
401 * patch that matches.
404 for (i = 0; i < 0xff; i++) {
405 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%04X-%02X",
406 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), eq_sig, i);
407 if ((fd = kobj_open(name)) == -1)
408 return (EM_NOMATCH);
409 count = kobj_read(fd, (char *)ucodefp, sizeof (*ucodefp), 0);
410 (void) kobj_close(fd);
412 if (ucode_match_amd(eq_sig, uinfop, ucodefp, count) == EM_OK)
413 return (EM_OK);
415 return (EM_NOMATCH);
418 static ucode_errno_t
419 ucode_locate_intel(cpu_t *cp, cpu_ucode_info_t *uinfop, ucode_file_t *ufp)
421 char name[MAXPATHLEN];
422 intptr_t fd;
423 int count;
424 int header_size = UCODE_HEADER_SIZE_INTEL;
425 int cpi_sig = cpuid_getsig(cp);
426 ucode_errno_t rc = EM_OK;
427 ucode_file_intel_t *ucodefp = &ufp->intel;
429 ASSERT(ucode);
432 * If the microcode matches the CPU we are processing, use it.
434 if (ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
435 ucodefp->uf_ext_table) == EM_OK && ucodefp->uf_body != NULL) {
436 return (EM_OK);
440 * Look for microcode file with the right name.
442 (void) snprintf(name, MAXPATHLEN, "/%s/%s/%08X-%02X",
443 UCODE_INSTALL_PATH, cpuid_getvendorstr(cp), cpi_sig,
444 uinfop->cui_platid);
445 if ((fd = kobj_open(name)) == -1) {
446 return (EM_OPENFILE);
450 * We found a microcode file for the CPU we are processing,
451 * reset the microcode data structure and read in the new
452 * file.
454 ucode->file_reset(ufp, cp->cpu_id);
456 ucodefp->uf_header = ucode_zalloc(cp->cpu_id, header_size);
457 if (ucodefp->uf_header == NULL)
458 return (EM_NOMEM);
460 count = kobj_read(fd, (char *)ucodefp->uf_header, header_size, 0);
462 switch (count) {
463 case UCODE_HEADER_SIZE_INTEL: {
465 ucode_header_intel_t *uhp = ucodefp->uf_header;
466 uint32_t offset = header_size;
467 int total_size, body_size, ext_size;
468 uint32_t sum = 0;
471 * Make sure that the header contains valid fields.
473 if ((rc = ucode_header_validate_intel(uhp)) == EM_OK) {
474 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
475 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
476 ucodefp->uf_body = ucode_zalloc(cp->cpu_id, body_size);
477 if (ucodefp->uf_body == NULL) {
478 rc = EM_NOMEM;
479 break;
482 if (kobj_read(fd, (char *)ucodefp->uf_body,
483 body_size, offset) != body_size)
484 rc = EM_FILESIZE;
487 if (rc)
488 break;
490 sum = ucode_checksum_intel(0, header_size,
491 (uint8_t *)ucodefp->uf_header);
492 if (ucode_checksum_intel(sum, body_size, ucodefp->uf_body)) {
493 rc = EM_CHECKSUM;
494 break;
498 * Check to see if there is extended signature table.
500 offset = body_size + header_size;
501 ext_size = total_size - offset;
503 if (ext_size <= 0)
504 break;
506 ucodefp->uf_ext_table = ucode_zalloc(cp->cpu_id, ext_size);
507 if (ucodefp->uf_ext_table == NULL) {
508 rc = EM_NOMEM;
509 break;
512 if (kobj_read(fd, (char *)ucodefp->uf_ext_table,
513 ext_size, offset) != ext_size) {
514 rc = EM_FILESIZE;
515 } else if (ucode_checksum_intel(0, ext_size,
516 (uint8_t *)(ucodefp->uf_ext_table))) {
517 rc = EM_CHECKSUM;
518 } else {
519 int i;
521 ext_size -= UCODE_EXT_TABLE_SIZE_INTEL;
522 for (i = 0; i < ucodefp->uf_ext_table->uet_count;
523 i++) {
524 if (ucode_checksum_intel(0,
525 UCODE_EXT_SIG_SIZE_INTEL,
526 (uint8_t *)(&(ucodefp->uf_ext_table->
527 uet_ext_sig[i])))) {
528 rc = EM_CHECKSUM;
529 break;
533 break;
536 default:
537 rc = EM_FILESIZE;
538 break;
541 kobj_close(fd);
543 if (rc != EM_OK)
544 return (rc);
546 rc = ucode_match_intel(cpi_sig, uinfop, ucodefp->uf_header,
547 ucodefp->uf_ext_table);
549 return (rc);
552 static ucode_errno_t
553 ucode_match_amd(uint16_t eq_sig, cpu_ucode_info_t *uinfop,
554 ucode_file_amd_t *ucodefp, int size)
556 ucode_header_amd_t *uh;
558 if (ucodefp == NULL || size < sizeof (ucode_header_amd_t))
559 return (EM_NOMATCH);
561 uh = &ucodefp->uf_header;
564 * Don't even think about loading patches that would require code
565 * execution. Does not apply to patches for family 0x14 and beyond.
567 if (uh->uh_cpu_rev < 0x5000 &&
568 size > offsetof(ucode_file_amd_t, uf_code_present) &&
569 ucodefp->uf_code_present)
570 return (EM_NOMATCH);
572 if (eq_sig != uh->uh_cpu_rev)
573 return (EM_NOMATCH);
575 if (uh->uh_nb_id) {
576 cmn_err(CE_WARN, "ignoring northbridge-specific ucode: "
577 "chipset id %x, revision %x", uh->uh_nb_id, uh->uh_nb_rev);
578 return (EM_NOMATCH);
581 if (uh->uh_sb_id) {
582 cmn_err(CE_WARN, "ignoring southbridge-specific ucode: "
583 "chipset id %x, revision %x", uh->uh_sb_id, uh->uh_sb_rev);
584 return (EM_NOMATCH);
587 if (uh->uh_patch_id <= uinfop->cui_rev && !ucode_force_update)
588 return (EM_HIGHERREV);
590 return (EM_OK);
594 * Returns 1 if the microcode is for this processor; 0 otherwise.
596 static ucode_errno_t
597 ucode_match_intel(int cpi_sig, cpu_ucode_info_t *uinfop,
598 ucode_header_intel_t *uhp, ucode_ext_table_intel_t *uetp)
600 if (uhp == NULL)
601 return (EM_NOMATCH);
603 if (UCODE_MATCH_INTEL(cpi_sig, uhp->uh_signature,
604 uinfop->cui_platid, uhp->uh_proc_flags)) {
606 if (uinfop->cui_rev >= uhp->uh_rev && !ucode_force_update)
607 return (EM_HIGHERREV);
609 return (EM_OK);
612 if (uetp != NULL) {
613 int i;
615 for (i = 0; i < uetp->uet_count; i++) {
616 ucode_ext_sig_intel_t *uesp;
618 uesp = &uetp->uet_ext_sig[i];
620 if (UCODE_MATCH_INTEL(cpi_sig, uesp->ues_signature,
621 uinfop->cui_platid, uesp->ues_proc_flags)) {
623 if (uinfop->cui_rev >= uhp->uh_rev &&
624 !ucode_force_update)
625 return (EM_HIGHERREV);
627 return (EM_OK);
632 return (EM_NOMATCH);
635 /*ARGSUSED*/
636 static int
637 ucode_write(xc_arg_t arg1, xc_arg_t unused2, xc_arg_t unused3)
639 ucode_update_t *uusp = (ucode_update_t *)arg1;
640 cpu_ucode_info_t *uinfop = CPU->cpu_m.mcpu_ucode_info;
641 on_trap_data_t otd;
643 ASSERT(ucode);
644 ASSERT(uusp->ucodep);
647 * Check one more time to see if it is really necessary to update
648 * microcode just in case this is a hyperthreaded processor where
649 * the threads share the same microcode.
651 if (!ucode_force_update) {
652 ucode->read_rev(uinfop);
653 uusp->new_rev = uinfop->cui_rev;
654 if (uinfop->cui_rev >= uusp->expected_rev)
655 return (0);
658 if (!on_trap(&otd, OT_DATA_ACCESS)) {
660 * On some platforms a cache invalidation is required for the
661 * ucode update to be successful due to the parts of the
662 * processor that the microcode is updating.
664 invalidate_cache();
665 wrmsr(ucode->write_msr, (uintptr_t)uusp->ucodep);
668 no_trap();
669 ucode->read_rev(uinfop);
670 uusp->new_rev = uinfop->cui_rev;
672 return (0);
675 /*ARGSUSED*/
676 static uint32_t
677 ucode_load_amd(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
679 ucode_file_amd_t *ucodefp = ufp->amd;
680 on_trap_data_t otd;
682 ASSERT(ucode);
683 ASSERT(ucodefp);
685 kpreempt_disable();
686 if (on_trap(&otd, OT_DATA_ACCESS)) {
687 no_trap();
688 kpreempt_enable();
689 return (0);
691 wrmsr(ucode->write_msr, (uintptr_t)ucodefp);
692 no_trap();
693 ucode->read_rev(uinfop);
694 kpreempt_enable();
696 return (ucodefp->uf_header.uh_patch_id);
699 /*ARGSUSED2*/
700 static uint32_t
701 ucode_load_intel(ucode_file_t *ufp, cpu_ucode_info_t *uinfop, cpu_t *cp)
703 ucode_file_intel_t *ucodefp = &ufp->intel;
705 ASSERT(ucode);
707 kpreempt_disable();
709 * On some platforms a cache invalidation is required for the
710 * ucode update to be successful due to the parts of the
711 * processor that the microcode is updating.
713 invalidate_cache();
714 wrmsr(ucode->write_msr, (uintptr_t)ucodefp->uf_body);
715 ucode->read_rev(uinfop);
716 kpreempt_enable();
718 return (ucodefp->uf_header->uh_rev);
723 static void
724 ucode_read_rev_amd(cpu_ucode_info_t *uinfop)
726 uinfop->cui_rev = rdmsr(MSR_AMD_PATCHLEVEL);
729 static void
730 ucode_read_rev_intel(cpu_ucode_info_t *uinfop)
732 struct cpuid_regs crs;
735 * The Intel 64 and IA-32 Architecture Software Developer's Manual
736 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
737 * execute cpuid to guarantee the correct reading of this register.
739 wrmsr(MSR_INTC_UCODE_REV, 0);
740 (void) __cpuid_insn(&crs);
741 uinfop->cui_rev = (rdmsr(MSR_INTC_UCODE_REV) >> INTC_UCODE_REV_SHIFT);
744 static ucode_errno_t
745 ucode_extract_amd(ucode_update_t *uusp, uint8_t *ucodep, int size)
747 uint32_t *ptr = (uint32_t *)ucodep;
748 ucode_eqtbl_amd_t *eqtbl;
749 ucode_file_amd_t *ufp;
750 int count;
751 int higher = 0;
752 ucode_errno_t rc = EM_NOMATCH;
753 uint16_t eq_sig;
755 /* skip over magic number & equivalence table header */
756 ptr += 2; size -= 8;
758 count = *ptr++; size -= 4;
759 for (eqtbl = (ucode_eqtbl_amd_t *)ptr;
760 eqtbl->ue_inst_cpu && eqtbl->ue_inst_cpu != uusp->sig;
761 eqtbl++)
764 eq_sig = eqtbl->ue_equiv_cpu;
766 /* No equivalent CPU id found, assume outdated microcode file. */
767 if (eq_sig == 0)
768 return (EM_HIGHERREV);
770 /* Use the first microcode patch that matches. */
771 do {
772 ptr += count >> 2; size -= count;
774 if (!size)
775 return (higher ? EM_HIGHERREV : EM_NOMATCH);
777 ptr++; size -= 4;
778 count = *ptr++; size -= 4;
779 ufp = (ucode_file_amd_t *)ptr;
781 rc = ucode_match_amd(eq_sig, &uusp->info, ufp, count);
782 if (rc == EM_HIGHERREV)
783 higher = 1;
784 } while (rc != EM_OK);
786 uusp->ucodep = (uint8_t *)ufp;
787 uusp->usize = count;
788 uusp->expected_rev = ufp->uf_header.uh_patch_id;
790 return (EM_OK);
793 static ucode_errno_t
794 ucode_extract_intel(ucode_update_t *uusp, uint8_t *ucodep, int size)
796 uint32_t header_size = UCODE_HEADER_SIZE_INTEL;
797 int remaining;
798 int found = 0;
799 ucode_errno_t search_rc = EM_NOMATCH; /* search result */
802 * Go through the whole buffer in case there are
803 * multiple versions of matching microcode for this
804 * processor.
806 for (remaining = size; remaining > 0; ) {
807 int total_size, body_size, ext_size;
808 uint8_t *curbuf = &ucodep[size - remaining];
809 ucode_header_intel_t *uhp = (ucode_header_intel_t *)curbuf;
810 ucode_ext_table_intel_t *uetp = NULL;
811 ucode_errno_t tmprc;
813 total_size = UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
814 body_size = UCODE_BODY_SIZE_INTEL(uhp->uh_body_size);
815 ext_size = total_size - (header_size + body_size);
817 if (ext_size > 0)
818 uetp = (ucode_ext_table_intel_t *)
819 &curbuf[header_size + body_size];
821 tmprc = ucode_match_intel(uusp->sig, &uusp->info, uhp, uetp);
824 * Since we are searching through a big file
825 * containing microcode for pretty much all the
826 * processors, we are bound to get EM_NOMATCH
827 * at one point. However, if we return
828 * EM_NOMATCH to users, it will really confuse
829 * them. Therefore, if we ever find a match of
830 * a lower rev, we will set return code to
831 * EM_HIGHERREV.
833 if (tmprc == EM_HIGHERREV)
834 search_rc = EM_HIGHERREV;
836 if (tmprc == EM_OK &&
837 uusp->expected_rev < uhp->uh_rev) {
838 uusp->ucodep = (uint8_t *)&curbuf[header_size];
839 uusp->usize =
840 UCODE_TOTAL_SIZE_INTEL(uhp->uh_total_size);
841 uusp->expected_rev = uhp->uh_rev;
842 found = 1;
845 remaining -= total_size;
848 if (!found)
849 return (search_rc);
851 return (EM_OK);
854 * Entry point to microcode update from the ucode_drv driver.
856 * Returns EM_OK on success, corresponding error code on failure.
858 ucode_errno_t
859 ucode_update(uint8_t *ucodep, int size)
861 int found = 0;
862 processorid_t id;
863 ucode_update_t cached = { 0 };
864 ucode_update_t *cachedp = NULL;
865 ucode_errno_t rc = EM_OK;
866 ucode_errno_t search_rc = EM_NOMATCH; /* search result */
867 cpuset_t cpuset;
869 ASSERT(ucode);
870 ASSERT(ucodep);
871 CPUSET_ZERO(cpuset);
873 if (!ucode->capable(CPU))
874 return (EM_NOTSUP);
876 mutex_enter(&cpu_lock);
878 for (id = 0; id < max_ncpus; id++) {
879 cpu_t *cpu;
880 ucode_update_t uus = { 0 };
881 ucode_update_t *uusp = &uus;
884 * If there is no such CPU or it is not xcall ready, skip it.
886 if ((cpu = cpu_get(id)) == NULL ||
887 !(cpu->cpu_flags & CPU_READY))
888 continue;
890 uusp->sig = cpuid_getsig(cpu);
891 bcopy(cpu->cpu_m.mcpu_ucode_info, &uusp->info,
892 sizeof (uusp->info));
895 * If the current CPU has the same signature and platform
896 * id as the previous one we processed, reuse the information.
898 if (cachedp && cachedp->sig == cpuid_getsig(cpu) &&
899 cachedp->info.cui_platid == uusp->info.cui_platid) {
900 uusp->ucodep = cachedp->ucodep;
901 uusp->expected_rev = cachedp->expected_rev;
903 * Intuitively we should check here to see whether the
904 * running microcode rev is >= the expected rev, and
905 * quit if it is. But we choose to proceed with the
906 * xcall regardless of the running version so that
907 * the other threads in an HT processor can update
908 * the cpu_ucode_info structure in machcpu.
910 } else if ((search_rc = ucode->extract(uusp, ucodep, size))
911 == EM_OK) {
912 bcopy(uusp, &cached, sizeof (cached));
913 cachedp = &cached;
914 found = 1;
917 /* Nothing to do */
918 if (uusp->ucodep == NULL)
919 continue;
922 CPUSET_ADD(cpuset, id);
923 kpreempt_disable();
924 xc_sync((xc_arg_t)uusp, 0, 0, CPUSET2BV(cpuset), ucode_write);
925 kpreempt_enable();
926 CPUSET_DEL(cpuset, id);
928 if (uusp->new_rev != 0 && uusp->info.cui_rev == uusp->new_rev &&
929 !ucode_force_update) {
930 rc = EM_HIGHERREV;
931 } else if ((uusp->new_rev == 0) || (uusp->expected_rev != 0 &&
932 uusp->expected_rev != uusp->new_rev)) {
933 cmn_err(CE_WARN, ucode_failure_fmt,
934 id, uusp->info.cui_rev, uusp->expected_rev);
935 rc = EM_UPDATE;
936 } else {
937 cmn_err(CE_CONT, ucode_success_fmt,
938 id, uusp->info.cui_rev, uusp->new_rev);
942 mutex_exit(&cpu_lock);
944 if (!found) {
945 rc = search_rc;
946 } else if (rc == EM_OK) {
947 cpuid_post_ucodeadm();
950 return (rc);
954 * Initialize mcpu_ucode_info, and perform microcode update if necessary.
955 * This is the entry point from boot path where pointer to CPU structure
956 * is available.
958 * cpuid_info must be initialized before ucode_check can be called.
960 void
961 ucode_check(cpu_t *cp)
963 cpu_ucode_info_t *uinfop;
964 ucode_errno_t rc = EM_OK;
965 uint32_t new_rev = 0;
967 ASSERT(cp);
969 * Space statically allocated for BSP, ensure pointer is set
971 if (cp->cpu_id == 0 && cp->cpu_m.mcpu_ucode_info == NULL)
972 cp->cpu_m.mcpu_ucode_info = &cpu_ucode_info0;
974 uinfop = cp->cpu_m.mcpu_ucode_info;
975 ASSERT(uinfop);
977 /* set up function pointers if not already done */
978 if (!ucode)
979 switch (cpuid_getvendor(cp)) {
980 case X86_VENDOR_AMD:
981 ucode = &ucode_amd;
982 break;
983 case X86_VENDOR_Intel:
984 ucode = &ucode_intel;
985 break;
986 default:
987 ucode = NULL;
988 return;
991 if (!ucode->capable(cp))
992 return;
995 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
996 * (Family 6, model 5 and above) and all processors after.
998 if ((cpuid_getvendor(cp) == X86_VENDOR_Intel) &&
999 ((cpuid_getmodel(cp) >= 5) || (cpuid_getfamily(cp) > 6))) {
1000 uinfop->cui_platid = 1 << ((rdmsr(MSR_INTC_PLATFORM_ID) >>
1001 INTC_PLATFORM_ID_SHIFT) & INTC_PLATFORM_ID_MASK);
1004 ucode->read_rev(uinfop);
1008 * Check to see if we need ucode update
1010 if ((rc = ucode->locate(cp, uinfop, &ucodefile)) == EM_OK) {
1011 new_rev = ucode->load(&ucodefile, uinfop, cp);
1013 if (uinfop->cui_rev != new_rev)
1014 cmn_err(CE_WARN, ucode_failure_fmt, cp->cpu_id,
1015 uinfop->cui_rev, new_rev);
1019 * If we fail to find a match for any reason, free the file structure
1020 * just in case we have read in a partial file.
1022 * Since the scratch memory for holding the microcode for the boot CPU
1023 * came from BOP_ALLOC, we will reset the data structure as if we
1024 * never did the allocation so we don't have to keep track of this
1025 * special chunk of memory. We free the memory used for the rest
1026 * of the CPUs in start_other_cpus().
1028 if (rc != EM_OK || cp->cpu_id == 0)
1029 ucode->file_reset(&ucodefile, cp->cpu_id);
1033 * Returns microcode revision from the machcpu structure.
1035 ucode_errno_t
1036 ucode_get_rev(uint32_t *revp)
1038 int i;
1040 ASSERT(ucode);
1041 ASSERT(revp);
1043 if (!ucode->capable(CPU))
1044 return (EM_NOTSUP);
1046 mutex_enter(&cpu_lock);
1047 for (i = 0; i < max_ncpus; i++) {
1048 cpu_t *cpu;
1050 if ((cpu = cpu_get(i)) == NULL)
1051 continue;
1053 revp[i] = cpu->cpu_m.mcpu_ucode_info->cui_rev;
1055 mutex_exit(&cpu_lock);
1057 return (EM_OK);