4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2018, Joyent, Inc.
30 #include <sys/asm_linkage.h>
31 #include <sys/bootconf.h>
32 #include <sys/cpuvar.h>
33 #include <sys/cmn_err.h>
34 #include <sys/controlregs.h>
35 #include <sys/debug.h>
37 #include <sys/kobj_impl.h>
38 #include <sys/machsystm.h>
39 #include <sys/ontrap.h>
40 #include <sys/param.h>
41 #include <sys/machparam.h>
42 #include <sys/promif.h>
43 #include <sys/sysmacros.h>
44 #include <sys/systm.h>
45 #include <sys/types.h>
46 #include <sys/thread.h>
47 #include <sys/ucode.h>
48 #include <sys/x86_archext.h>
49 #include <sys/x_call.h>
52 * AMD-specific equivalence table
54 static ucode_eqtbl_amd_t
*ucode_eqtbl_amd
;
57 * mcpu_ucode_info for the boot CPU. Statically allocated.
59 static struct cpu_ucode_info cpu_ucode_info0
;
61 static ucode_file_t ucodefile
;
63 static void* ucode_zalloc(processorid_t
, size_t);
64 static void ucode_free(processorid_t
, void *, size_t);
66 static int ucode_capable_amd(cpu_t
*);
67 static int ucode_capable_intel(cpu_t
*);
69 static ucode_errno_t
ucode_extract_amd(ucode_update_t
*, uint8_t *, int);
70 static ucode_errno_t
ucode_extract_intel(ucode_update_t
*, uint8_t *,
73 static void ucode_file_reset_amd(ucode_file_t
*, processorid_t
);
74 static void ucode_file_reset_intel(ucode_file_t
*, processorid_t
);
76 static uint32_t ucode_load_amd(ucode_file_t
*, cpu_ucode_info_t
*, cpu_t
*);
77 static uint32_t ucode_load_intel(ucode_file_t
*, cpu_ucode_info_t
*, cpu_t
*);
80 static int ucode_equiv_cpu_amd(cpu_t
*, uint16_t *);
82 static ucode_errno_t
ucode_locate_amd(cpu_t
*, cpu_ucode_info_t
*,
84 static ucode_errno_t
ucode_locate_intel(cpu_t
*, cpu_ucode_info_t
*,
87 static ucode_errno_t
ucode_match_amd(uint16_t, cpu_ucode_info_t
*,
88 ucode_file_amd_t
*, int);
89 static ucode_errno_t
ucode_match_intel(int, cpu_ucode_info_t
*,
90 ucode_header_intel_t
*, ucode_ext_table_intel_t
*);
92 static void ucode_read_rev_amd(cpu_ucode_info_t
*);
93 static void ucode_read_rev_intel(cpu_ucode_info_t
*);
95 static const struct ucode_ops ucode_amd
= {
106 static const struct ucode_ops ucode_intel
= {
107 MSR_INTC_UCODE_WRITE
,
109 ucode_file_reset_intel
,
110 ucode_read_rev_intel
,
112 ucode_validate_intel
,
117 const struct ucode_ops
*ucode
;
119 static const char ucode_failure_fmt
[] =
120 "cpu%d: failed to update microcode from version 0x%x to 0x%x\n";
121 static const char ucode_success_fmt
[] =
122 "?cpu%d: microcode has been updated from version 0x%x to 0x%x\n";
125 * Force flag. If set, the first microcode binary that matches
126 * signature and platform id will be used for microcode update,
127 * regardless of version. Should only be used for debugging.
129 int ucode_force_update
= 0;
132 * Allocate space for mcpu_ucode_info in the machcpu structure
133 * for all non-boot CPUs.
136 ucode_alloc_space(cpu_t
*cp
)
138 ASSERT(cp
->cpu_id
!= 0);
139 ASSERT(cp
->cpu_m
.mcpu_ucode_info
== NULL
);
140 cp
->cpu_m
.mcpu_ucode_info
=
141 kmem_zalloc(sizeof (*cp
->cpu_m
.mcpu_ucode_info
), KM_SLEEP
);
145 ucode_free_space(cpu_t
*cp
)
147 ASSERT(cp
->cpu_m
.mcpu_ucode_info
!= NULL
);
148 ASSERT(cp
->cpu_m
.mcpu_ucode_info
!= &cpu_ucode_info0
);
149 kmem_free(cp
->cpu_m
.mcpu_ucode_info
,
150 sizeof (*cp
->cpu_m
.mcpu_ucode_info
));
151 cp
->cpu_m
.mcpu_ucode_info
= NULL
;
155 * Called when we are done with microcode update on all processors to free up
156 * space allocated for the microcode file.
164 ucode
->file_reset(&ucodefile
, -1);
168 * Allocate/free a buffer used to hold ucode data. Space for the boot CPU is
169 * allocated with BOP_ALLOC() and does not require a free.
172 ucode_zalloc(processorid_t id
, size_t size
)
175 return (kmem_zalloc(size
, KM_NOSLEEP
));
177 /* BOP_ALLOC() failure results in panic */
178 return (BOP_ALLOC(bootops
, NULL
, size
, MMU_PAGESIZE
));
182 ucode_free(processorid_t id
, void* buf
, size_t size
)
185 kmem_free(buf
, size
);
189 * Check whether or not a processor is capable of microcode operations
190 * Returns 1 if it is capable, 0 if not.
192 * At this point we only support microcode update for:
193 * - Intel processors family 6 and above, and
194 * - AMD processors family 0x10 and above.
196 * We also assume that we don't support a mix of Intel and
197 * AMD processors in the same box.
199 * An i86xpv guest domain or VM can't update the microcode.
202 #define XPVDOMU_OR_HVM \
203 ((hwenv == HW_XEN_PV && !is_controldom()) || (hwenv & HW_VIRTUAL) != 0)
207 ucode_capable_amd(cpu_t
*cp
)
209 int hwenv
= get_hwenv();
214 return (cpuid_getfamily(cp
) >= 0x10);
218 ucode_capable_intel(cpu_t
*cp
)
220 int hwenv
= get_hwenv();
225 return (cpuid_getfamily(cp
) >= 6);
229 * Called when it is no longer necessary to keep the microcode around,
230 * or when the cached microcode doesn't match the CPU being processed.
233 ucode_file_reset_amd(ucode_file_t
*ufp
, processorid_t id
)
235 ucode_file_amd_t
*ucodefp
= ufp
->amd
;
240 ucode_free(id
, ucodefp
, sizeof (ucode_file_amd_t
));
245 ucode_file_reset_intel(ucode_file_t
*ufp
, processorid_t id
)
247 ucode_file_intel_t
*ucodefp
= &ufp
->intel
;
248 int total_size
, body_size
;
250 if (ucodefp
== NULL
|| ucodefp
->uf_header
== NULL
)
253 total_size
= UCODE_TOTAL_SIZE_INTEL(ucodefp
->uf_header
->uh_total_size
);
254 body_size
= UCODE_BODY_SIZE_INTEL(ucodefp
->uf_header
->uh_body_size
);
255 if (ucodefp
->uf_body
) {
256 ucode_free(id
, ucodefp
->uf_body
, body_size
);
257 ucodefp
->uf_body
= NULL
;
260 if (ucodefp
->uf_ext_table
) {
261 int size
= total_size
- body_size
- UCODE_HEADER_SIZE_INTEL
;
263 ucode_free(id
, ucodefp
->uf_ext_table
, size
);
264 ucodefp
->uf_ext_table
= NULL
;
267 ucode_free(id
, ucodefp
->uf_header
, UCODE_HEADER_SIZE_INTEL
);
268 ucodefp
->uf_header
= NULL
;
272 * Find the equivalent CPU id in the equivalence table.
275 ucode_equiv_cpu_amd(cpu_t
*cp
, uint16_t *eq_sig
)
277 char name
[MAXPATHLEN
];
280 int offset
= 0, cpi_sig
= cpuid_getsig(cp
);
281 ucode_eqtbl_amd_t
*eqtbl
= ucode_eqtbl_amd
;
283 (void) snprintf(name
, MAXPATHLEN
, "/%s/%s/equivalence-table",
284 UCODE_INSTALL_PATH
, cpuid_getvendorstr(cp
));
287 * No kmem_zalloc() etc. available on boot cpu.
289 if (cp
->cpu_id
== 0) {
290 if ((fd
= kobj_open(name
)) == -1)
291 return (EM_OPENFILE
);
292 /* ucode_zalloc() cannot fail on boot cpu */
293 eqtbl
= ucode_zalloc(cp
->cpu_id
, sizeof (*eqtbl
));
296 count
= kobj_read(fd
, (int8_t *)eqtbl
,
297 sizeof (*eqtbl
), offset
);
298 if (count
!= sizeof (*eqtbl
)) {
299 (void) kobj_close(fd
);
300 return (EM_HIGHERREV
);
303 } while (eqtbl
->ue_inst_cpu
&& eqtbl
->ue_inst_cpu
!= cpi_sig
);
304 (void) kobj_close(fd
);
308 * If not already done, load the equivalence table.
309 * Not done on boot CPU.
315 if ((eq
= kobj_open_file(name
)) == (struct _buf
*)-1)
316 return (EM_OPENFILE
);
318 if (kobj_get_filesize(eq
, &size
) < 0) {
320 return (EM_OPENFILE
);
323 ucode_eqtbl_amd
= kmem_zalloc(size
, KM_NOSLEEP
);
324 if (ucode_eqtbl_amd
== NULL
) {
329 count
= kobj_read_file(eq
, (char *)ucode_eqtbl_amd
, size
, 0);
333 return (EM_FILESIZE
);
336 /* Get the equivalent CPU id. */
338 for (eqtbl
= ucode_eqtbl_amd
;
339 eqtbl
->ue_inst_cpu
&& eqtbl
->ue_inst_cpu
!= cpi_sig
;
343 *eq_sig
= eqtbl
->ue_equiv_cpu
;
345 /* No equivalent CPU id found, assume outdated microcode file. */
347 return (EM_HIGHERREV
);
353 * xVM cannot check for the presence of PCI devices. Look for chipset-
354 * specific microcode patches in the container file and disable them
355 * by setting their CPU revision to an invalid value.
359 * Populate the ucode file structure from microcode file corresponding to
360 * this CPU, if exists.
362 * Return EM_OK on success, corresponding error code on failure.
366 ucode_locate_amd(cpu_t
*cp
, cpu_ucode_info_t
*uinfop
, ucode_file_t
*ufp
)
368 char name
[MAXPATHLEN
];
371 ucode_file_amd_t
*ucodefp
= ufp
->amd
;
376 /* get equivalent CPU id */
377 if ((rc
= ucode_equiv_cpu_amd(cp
, &eq_sig
)) != EM_OK
)
381 * Allocate a buffer for the microcode patch. If the buffer has been
382 * allocated before, check for a matching microcode to avoid loading
386 ucodefp
= ucode_zalloc(cp
->cpu_id
, sizeof (*ucodefp
));
387 else if (ucode_match_amd(eq_sig
, uinfop
, ucodefp
, sizeof (*ucodefp
))
397 * Find the patch for this CPU. The patch files are named XXXX-YY, where
398 * XXXX is the equivalent CPU id and YY is the running patch number.
399 * Patches specific to certain chipsets are guaranteed to have lower
400 * numbers than less specific patches, so we can just load the first
401 * patch that matches.
404 for (i
= 0; i
< 0xff; i
++) {
405 (void) snprintf(name
, MAXPATHLEN
, "/%s/%s/%04X-%02X",
406 UCODE_INSTALL_PATH
, cpuid_getvendorstr(cp
), eq_sig
, i
);
407 if ((fd
= kobj_open(name
)) == -1)
409 count
= kobj_read(fd
, (char *)ucodefp
, sizeof (*ucodefp
), 0);
410 (void) kobj_close(fd
);
412 if (ucode_match_amd(eq_sig
, uinfop
, ucodefp
, count
) == EM_OK
)
419 ucode_locate_intel(cpu_t
*cp
, cpu_ucode_info_t
*uinfop
, ucode_file_t
*ufp
)
421 char name
[MAXPATHLEN
];
424 int header_size
= UCODE_HEADER_SIZE_INTEL
;
425 int cpi_sig
= cpuid_getsig(cp
);
426 ucode_errno_t rc
= EM_OK
;
427 ucode_file_intel_t
*ucodefp
= &ufp
->intel
;
432 * If the microcode matches the CPU we are processing, use it.
434 if (ucode_match_intel(cpi_sig
, uinfop
, ucodefp
->uf_header
,
435 ucodefp
->uf_ext_table
) == EM_OK
&& ucodefp
->uf_body
!= NULL
) {
440 * Look for microcode file with the right name.
442 (void) snprintf(name
, MAXPATHLEN
, "/%s/%s/%08X-%02X",
443 UCODE_INSTALL_PATH
, cpuid_getvendorstr(cp
), cpi_sig
,
445 if ((fd
= kobj_open(name
)) == -1) {
446 return (EM_OPENFILE
);
450 * We found a microcode file for the CPU we are processing,
451 * reset the microcode data structure and read in the new
454 ucode
->file_reset(ufp
, cp
->cpu_id
);
456 ucodefp
->uf_header
= ucode_zalloc(cp
->cpu_id
, header_size
);
457 if (ucodefp
->uf_header
== NULL
)
460 count
= kobj_read(fd
, (char *)ucodefp
->uf_header
, header_size
, 0);
463 case UCODE_HEADER_SIZE_INTEL
: {
465 ucode_header_intel_t
*uhp
= ucodefp
->uf_header
;
466 uint32_t offset
= header_size
;
467 int total_size
, body_size
, ext_size
;
471 * Make sure that the header contains valid fields.
473 if ((rc
= ucode_header_validate_intel(uhp
)) == EM_OK
) {
474 total_size
= UCODE_TOTAL_SIZE_INTEL(uhp
->uh_total_size
);
475 body_size
= UCODE_BODY_SIZE_INTEL(uhp
->uh_body_size
);
476 ucodefp
->uf_body
= ucode_zalloc(cp
->cpu_id
, body_size
);
477 if (ucodefp
->uf_body
== NULL
) {
482 if (kobj_read(fd
, (char *)ucodefp
->uf_body
,
483 body_size
, offset
) != body_size
)
490 sum
= ucode_checksum_intel(0, header_size
,
491 (uint8_t *)ucodefp
->uf_header
);
492 if (ucode_checksum_intel(sum
, body_size
, ucodefp
->uf_body
)) {
498 * Check to see if there is extended signature table.
500 offset
= body_size
+ header_size
;
501 ext_size
= total_size
- offset
;
506 ucodefp
->uf_ext_table
= ucode_zalloc(cp
->cpu_id
, ext_size
);
507 if (ucodefp
->uf_ext_table
== NULL
) {
512 if (kobj_read(fd
, (char *)ucodefp
->uf_ext_table
,
513 ext_size
, offset
) != ext_size
) {
515 } else if (ucode_checksum_intel(0, ext_size
,
516 (uint8_t *)(ucodefp
->uf_ext_table
))) {
521 ext_size
-= UCODE_EXT_TABLE_SIZE_INTEL
;
522 for (i
= 0; i
< ucodefp
->uf_ext_table
->uet_count
;
524 if (ucode_checksum_intel(0,
525 UCODE_EXT_SIG_SIZE_INTEL
,
526 (uint8_t *)(&(ucodefp
->uf_ext_table
->
546 rc
= ucode_match_intel(cpi_sig
, uinfop
, ucodefp
->uf_header
,
547 ucodefp
->uf_ext_table
);
553 ucode_match_amd(uint16_t eq_sig
, cpu_ucode_info_t
*uinfop
,
554 ucode_file_amd_t
*ucodefp
, int size
)
556 ucode_header_amd_t
*uh
;
558 if (ucodefp
== NULL
|| size
< sizeof (ucode_header_amd_t
))
561 uh
= &ucodefp
->uf_header
;
564 * Don't even think about loading patches that would require code
565 * execution. Does not apply to patches for family 0x14 and beyond.
567 if (uh
->uh_cpu_rev
< 0x5000 &&
568 size
> offsetof(ucode_file_amd_t
, uf_code_present
) &&
569 ucodefp
->uf_code_present
)
572 if (eq_sig
!= uh
->uh_cpu_rev
)
576 cmn_err(CE_WARN
, "ignoring northbridge-specific ucode: "
577 "chipset id %x, revision %x", uh
->uh_nb_id
, uh
->uh_nb_rev
);
582 cmn_err(CE_WARN
, "ignoring southbridge-specific ucode: "
583 "chipset id %x, revision %x", uh
->uh_sb_id
, uh
->uh_sb_rev
);
587 if (uh
->uh_patch_id
<= uinfop
->cui_rev
&& !ucode_force_update
)
588 return (EM_HIGHERREV
);
594 * Returns 1 if the microcode is for this processor; 0 otherwise.
597 ucode_match_intel(int cpi_sig
, cpu_ucode_info_t
*uinfop
,
598 ucode_header_intel_t
*uhp
, ucode_ext_table_intel_t
*uetp
)
603 if (UCODE_MATCH_INTEL(cpi_sig
, uhp
->uh_signature
,
604 uinfop
->cui_platid
, uhp
->uh_proc_flags
)) {
606 if (uinfop
->cui_rev
>= uhp
->uh_rev
&& !ucode_force_update
)
607 return (EM_HIGHERREV
);
615 for (i
= 0; i
< uetp
->uet_count
; i
++) {
616 ucode_ext_sig_intel_t
*uesp
;
618 uesp
= &uetp
->uet_ext_sig
[i
];
620 if (UCODE_MATCH_INTEL(cpi_sig
, uesp
->ues_signature
,
621 uinfop
->cui_platid
, uesp
->ues_proc_flags
)) {
623 if (uinfop
->cui_rev
>= uhp
->uh_rev
&&
625 return (EM_HIGHERREV
);
637 ucode_write(xc_arg_t arg1
, xc_arg_t unused2
, xc_arg_t unused3
)
639 ucode_update_t
*uusp
= (ucode_update_t
*)arg1
;
640 cpu_ucode_info_t
*uinfop
= CPU
->cpu_m
.mcpu_ucode_info
;
644 ASSERT(uusp
->ucodep
);
647 * Check one more time to see if it is really necessary to update
648 * microcode just in case this is a hyperthreaded processor where
649 * the threads share the same microcode.
651 if (!ucode_force_update
) {
652 ucode
->read_rev(uinfop
);
653 uusp
->new_rev
= uinfop
->cui_rev
;
654 if (uinfop
->cui_rev
>= uusp
->expected_rev
)
658 if (!on_trap(&otd
, OT_DATA_ACCESS
)) {
660 * On some platforms a cache invalidation is required for the
661 * ucode update to be successful due to the parts of the
662 * processor that the microcode is updating.
665 wrmsr(ucode
->write_msr
, (uintptr_t)uusp
->ucodep
);
669 ucode
->read_rev(uinfop
);
670 uusp
->new_rev
= uinfop
->cui_rev
;
677 ucode_load_amd(ucode_file_t
*ufp
, cpu_ucode_info_t
*uinfop
, cpu_t
*cp
)
679 ucode_file_amd_t
*ucodefp
= ufp
->amd
;
686 if (on_trap(&otd
, OT_DATA_ACCESS
)) {
691 wrmsr(ucode
->write_msr
, (uintptr_t)ucodefp
);
693 ucode
->read_rev(uinfop
);
696 return (ucodefp
->uf_header
.uh_patch_id
);
701 ucode_load_intel(ucode_file_t
*ufp
, cpu_ucode_info_t
*uinfop
, cpu_t
*cp
)
703 ucode_file_intel_t
*ucodefp
= &ufp
->intel
;
709 * On some platforms a cache invalidation is required for the
710 * ucode update to be successful due to the parts of the
711 * processor that the microcode is updating.
714 wrmsr(ucode
->write_msr
, (uintptr_t)ucodefp
->uf_body
);
715 ucode
->read_rev(uinfop
);
718 return (ucodefp
->uf_header
->uh_rev
);
724 ucode_read_rev_amd(cpu_ucode_info_t
*uinfop
)
726 uinfop
->cui_rev
= rdmsr(MSR_AMD_PATCHLEVEL
);
730 ucode_read_rev_intel(cpu_ucode_info_t
*uinfop
)
732 struct cpuid_regs crs
;
735 * The Intel 64 and IA-32 Architecture Software Developer's Manual
736 * recommends that MSR_INTC_UCODE_REV be loaded with 0 first, then
737 * execute cpuid to guarantee the correct reading of this register.
739 wrmsr(MSR_INTC_UCODE_REV
, 0);
740 (void) __cpuid_insn(&crs
);
741 uinfop
->cui_rev
= (rdmsr(MSR_INTC_UCODE_REV
) >> INTC_UCODE_REV_SHIFT
);
745 ucode_extract_amd(ucode_update_t
*uusp
, uint8_t *ucodep
, int size
)
747 uint32_t *ptr
= (uint32_t *)ucodep
;
748 ucode_eqtbl_amd_t
*eqtbl
;
749 ucode_file_amd_t
*ufp
;
752 ucode_errno_t rc
= EM_NOMATCH
;
755 /* skip over magic number & equivalence table header */
758 count
= *ptr
++; size
-= 4;
759 for (eqtbl
= (ucode_eqtbl_amd_t
*)ptr
;
760 eqtbl
->ue_inst_cpu
&& eqtbl
->ue_inst_cpu
!= uusp
->sig
;
764 eq_sig
= eqtbl
->ue_equiv_cpu
;
766 /* No equivalent CPU id found, assume outdated microcode file. */
768 return (EM_HIGHERREV
);
770 /* Use the first microcode patch that matches. */
772 ptr
+= count
>> 2; size
-= count
;
775 return (higher
? EM_HIGHERREV
: EM_NOMATCH
);
778 count
= *ptr
++; size
-= 4;
779 ufp
= (ucode_file_amd_t
*)ptr
;
781 rc
= ucode_match_amd(eq_sig
, &uusp
->info
, ufp
, count
);
782 if (rc
== EM_HIGHERREV
)
784 } while (rc
!= EM_OK
);
786 uusp
->ucodep
= (uint8_t *)ufp
;
788 uusp
->expected_rev
= ufp
->uf_header
.uh_patch_id
;
794 ucode_extract_intel(ucode_update_t
*uusp
, uint8_t *ucodep
, int size
)
796 uint32_t header_size
= UCODE_HEADER_SIZE_INTEL
;
799 ucode_errno_t search_rc
= EM_NOMATCH
; /* search result */
802 * Go through the whole buffer in case there are
803 * multiple versions of matching microcode for this
806 for (remaining
= size
; remaining
> 0; ) {
807 int total_size
, body_size
, ext_size
;
808 uint8_t *curbuf
= &ucodep
[size
- remaining
];
809 ucode_header_intel_t
*uhp
= (ucode_header_intel_t
*)curbuf
;
810 ucode_ext_table_intel_t
*uetp
= NULL
;
813 total_size
= UCODE_TOTAL_SIZE_INTEL(uhp
->uh_total_size
);
814 body_size
= UCODE_BODY_SIZE_INTEL(uhp
->uh_body_size
);
815 ext_size
= total_size
- (header_size
+ body_size
);
818 uetp
= (ucode_ext_table_intel_t
*)
819 &curbuf
[header_size
+ body_size
];
821 tmprc
= ucode_match_intel(uusp
->sig
, &uusp
->info
, uhp
, uetp
);
824 * Since we are searching through a big file
825 * containing microcode for pretty much all the
826 * processors, we are bound to get EM_NOMATCH
827 * at one point. However, if we return
828 * EM_NOMATCH to users, it will really confuse
829 * them. Therefore, if we ever find a match of
830 * a lower rev, we will set return code to
833 if (tmprc
== EM_HIGHERREV
)
834 search_rc
= EM_HIGHERREV
;
836 if (tmprc
== EM_OK
&&
837 uusp
->expected_rev
< uhp
->uh_rev
) {
838 uusp
->ucodep
= (uint8_t *)&curbuf
[header_size
];
840 UCODE_TOTAL_SIZE_INTEL(uhp
->uh_total_size
);
841 uusp
->expected_rev
= uhp
->uh_rev
;
845 remaining
-= total_size
;
854 * Entry point to microcode update from the ucode_drv driver.
856 * Returns EM_OK on success, corresponding error code on failure.
859 ucode_update(uint8_t *ucodep
, int size
)
863 ucode_update_t cached
= { 0 };
864 ucode_update_t
*cachedp
= NULL
;
865 ucode_errno_t rc
= EM_OK
;
866 ucode_errno_t search_rc
= EM_NOMATCH
; /* search result */
873 if (!ucode
->capable(CPU
))
876 mutex_enter(&cpu_lock
);
878 for (id
= 0; id
< max_ncpus
; id
++) {
880 ucode_update_t uus
= { 0 };
881 ucode_update_t
*uusp
= &uus
;
884 * If there is no such CPU or it is not xcall ready, skip it.
886 if ((cpu
= cpu_get(id
)) == NULL
||
887 !(cpu
->cpu_flags
& CPU_READY
))
890 uusp
->sig
= cpuid_getsig(cpu
);
891 bcopy(cpu
->cpu_m
.mcpu_ucode_info
, &uusp
->info
,
892 sizeof (uusp
->info
));
895 * If the current CPU has the same signature and platform
896 * id as the previous one we processed, reuse the information.
898 if (cachedp
&& cachedp
->sig
== cpuid_getsig(cpu
) &&
899 cachedp
->info
.cui_platid
== uusp
->info
.cui_platid
) {
900 uusp
->ucodep
= cachedp
->ucodep
;
901 uusp
->expected_rev
= cachedp
->expected_rev
;
903 * Intuitively we should check here to see whether the
904 * running microcode rev is >= the expected rev, and
905 * quit if it is. But we choose to proceed with the
906 * xcall regardless of the running version so that
907 * the other threads in an HT processor can update
908 * the cpu_ucode_info structure in machcpu.
910 } else if ((search_rc
= ucode
->extract(uusp
, ucodep
, size
))
912 bcopy(uusp
, &cached
, sizeof (cached
));
918 if (uusp
->ucodep
== NULL
)
922 CPUSET_ADD(cpuset
, id
);
924 xc_sync((xc_arg_t
)uusp
, 0, 0, CPUSET2BV(cpuset
), ucode_write
);
926 CPUSET_DEL(cpuset
, id
);
928 if (uusp
->new_rev
!= 0 && uusp
->info
.cui_rev
== uusp
->new_rev
&&
929 !ucode_force_update
) {
931 } else if ((uusp
->new_rev
== 0) || (uusp
->expected_rev
!= 0 &&
932 uusp
->expected_rev
!= uusp
->new_rev
)) {
933 cmn_err(CE_WARN
, ucode_failure_fmt
,
934 id
, uusp
->info
.cui_rev
, uusp
->expected_rev
);
937 cmn_err(CE_CONT
, ucode_success_fmt
,
938 id
, uusp
->info
.cui_rev
, uusp
->new_rev
);
942 mutex_exit(&cpu_lock
);
946 } else if (rc
== EM_OK
) {
947 cpuid_post_ucodeadm();
954 * Initialize mcpu_ucode_info, and perform microcode update if necessary.
955 * This is the entry point from boot path where pointer to CPU structure
958 * cpuid_info must be initialized before ucode_check can be called.
961 ucode_check(cpu_t
*cp
)
963 cpu_ucode_info_t
*uinfop
;
964 ucode_errno_t rc
= EM_OK
;
965 uint32_t new_rev
= 0;
969 * Space statically allocated for BSP, ensure pointer is set
971 if (cp
->cpu_id
== 0 && cp
->cpu_m
.mcpu_ucode_info
== NULL
)
972 cp
->cpu_m
.mcpu_ucode_info
= &cpu_ucode_info0
;
974 uinfop
= cp
->cpu_m
.mcpu_ucode_info
;
977 /* set up function pointers if not already done */
979 switch (cpuid_getvendor(cp
)) {
983 case X86_VENDOR_Intel
:
984 ucode
= &ucode_intel
;
991 if (!ucode
->capable(cp
))
995 * The MSR_INTC_PLATFORM_ID is supported in Celeron and Xeon
996 * (Family 6, model 5 and above) and all processors after.
998 if ((cpuid_getvendor(cp
) == X86_VENDOR_Intel
) &&
999 ((cpuid_getmodel(cp
) >= 5) || (cpuid_getfamily(cp
) > 6))) {
1000 uinfop
->cui_platid
= 1 << ((rdmsr(MSR_INTC_PLATFORM_ID
) >>
1001 INTC_PLATFORM_ID_SHIFT
) & INTC_PLATFORM_ID_MASK
);
1004 ucode
->read_rev(uinfop
);
1008 * Check to see if we need ucode update
1010 if ((rc
= ucode
->locate(cp
, uinfop
, &ucodefile
)) == EM_OK
) {
1011 new_rev
= ucode
->load(&ucodefile
, uinfop
, cp
);
1013 if (uinfop
->cui_rev
!= new_rev
)
1014 cmn_err(CE_WARN
, ucode_failure_fmt
, cp
->cpu_id
,
1015 uinfop
->cui_rev
, new_rev
);
1019 * If we fail to find a match for any reason, free the file structure
1020 * just in case we have read in a partial file.
1022 * Since the scratch memory for holding the microcode for the boot CPU
1023 * came from BOP_ALLOC, we will reset the data structure as if we
1024 * never did the allocation so we don't have to keep track of this
1025 * special chunk of memory. We free the memory used for the rest
1026 * of the CPUs in start_other_cpus().
1028 if (rc
!= EM_OK
|| cp
->cpu_id
== 0)
1029 ucode
->file_reset(&ucodefile
, cp
->cpu_id
);
1033 * Returns microcode revision from the machcpu structure.
1036 ucode_get_rev(uint32_t *revp
)
1043 if (!ucode
->capable(CPU
))
1046 mutex_enter(&cpu_lock
);
1047 for (i
= 0; i
< max_ncpus
; i
++) {
1050 if ((cpu
= cpu_get(i
)) == NULL
)
1053 revp
[i
] = cpu
->cpu_m
.mcpu_ucode_info
->cui_rev
;
1055 mutex_exit(&cpu_lock
);