4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2010, Intel Corporation.
27 * All rights reserved.
31 * LOCALITY GROUP (LGROUP) PLATFORM SUPPORT FOR X86/AMD64 PLATFORMS
32 * ================================================================
33 * Multiprocessor AMD and Intel systems may have Non Uniform Memory Access
34 * (NUMA). A NUMA machine consists of one or more "nodes" that each consist of
35 * one or more CPUs and some local memory. The CPUs in each node can access
36 * the memory in the other nodes but at a higher latency than accessing their
37 * local memory. Typically, a system with only one node has Uniform Memory
38 * Access (UMA), but it may be possible to have a one node system that has
39 * some global memory outside of the node which is higher latency.
43 * This module provides a platform interface for determining which CPUs and
44 * which memory (and how much) are in a NUMA node and how far each node is from
45 * each other. The interface is used by the Virtual Memory (VM) system and the
46 * common lgroup framework. The VM system uses the plat_*() routines to fill
47 * in its memory node (memnode) array with the physical address range spanned
48 * by each NUMA node to know which memory belongs to which node, so it can
49 * build and manage a physical page free list for each NUMA node and allocate
50 * local memory from each node as needed. The common lgroup framework uses the
51 * exported lgrp_plat_*() routines to figure out which CPUs and memory belong
52 * to each node (leaf lgroup) and how far each node is from each other, so it
53 * can build the latency (lgroup) topology for the machine in order to optimize
54 * for locality. Also, an lgroup platform handle instead of lgroups are used
55 * in the interface with this module, so this module shouldn't need to know
56 * anything about lgroups. Instead, it just needs to know which CPUs, memory,
57 * etc. are in each NUMA node, how far each node is from each other, and to use
58 * a unique lgroup platform handle to refer to each node through the interface.
60 * Determining NUMA Configuration
61 * ------------------------------
62 * By default, this module will try to determine the NUMA configuration of the
63 * machine by reading the ACPI System Resource Affinity Table (SRAT) and System
64 * Locality Information Table (SLIT). The SRAT contains info to tell which
65 * CPUs and memory are local to a given proximity domain (NUMA node). The SLIT
66 * is a matrix that gives the distance between each system locality (which is
67 * a NUMA node and should correspond to proximity domains in the SRAT). For
68 * more details on the SRAT and SLIT, please refer to an ACPI 3.0 or newer
71 * If the SRAT doesn't exist on a system with AMD Opteron processors, we
72 * examine registers in PCI configuration space to determine how many nodes are
73 * in the system and which CPUs and memory are in each node.
74 * do while booting the kernel.
76 * NOTE: Using these PCI configuration space registers to determine this
77 * locality info is not guaranteed to work or be compatible across all
78 * Opteron processor families.
80 * If the SLIT does not exist or look right, the kernel will probe to determine
81 * the distance between nodes as long as the NUMA CPU and memory configuration
82 * has been determined (see lgrp_plat_probe() for details).
86 * The main data structures used by this code are the following:
88 * - lgrp_plat_cpu_node[] CPU to node ID mapping table indexed by
89 * CPU ID (only used for SRAT)
91 * - lgrp_plat_lat_stats.latencies[][] Table of latencies between same and
92 * different nodes indexed by node ID
94 * - lgrp_plat_node_cnt Number of NUMA nodes in system for
95 * non-DR-capable systems,
96 * maximum possible number of NUMA nodes
97 * in system for DR capable systems.
99 * - lgrp_plat_node_domain[] Node ID to proximity domain ID mapping
100 * table indexed by node ID (only used
103 * - lgrp_plat_memnode_info[] Table with physical address range for
104 * each memory node indexed by memory node
107 * The code is implemented to make the following always be true:
109 * lgroup platform handle == node ID == memnode ID
111 * Moreover, it allows for the proximity domain ID to be equal to all of the
112 * above as long as the proximity domains IDs are numbered from 0 to <number of
113 * nodes - 1>. This is done by hashing each proximity domain ID into the range
114 * from 0 to <number of nodes - 1>. Then proximity ID N will hash into node ID
115 * N and proximity domain ID N will be entered into lgrp_plat_node_domain[N]
116 * and be assigned node ID N. If the proximity domain IDs aren't numbered
117 * from 0 to <number of nodes - 1>, then hashing the proximity domain IDs into
118 * lgrp_plat_node_domain[] will still work for assigning proximity domain IDs
119 * to node IDs. However, the proximity domain IDs may not map to the
120 * equivalent node ID since we want to keep the node IDs numbered from 0 to
121 * <number of nodes - 1> to minimize cost of searching and potentially space.
123 * With the introduction of support of memory DR operations on x86 platforms,
124 * things get a little complicated. The addresses of hot-added memory may not
125 * be continuous with other memory connected to the same lgrp node. In other
126 * words, memory addresses may get interleaved among lgrp nodes after memory
127 * DR operations. To work around this limitation, we have extended the
128 * relationship between lgrp node and memory node from 1:1 map to 1:N map,
129 * that means there may be multiple memory nodes associated with a lgrp node
130 * after memory DR operations.
132 * To minimize the code changes to support memory DR operations, the
133 * following policies have been adopted.
134 * 1) On non-DR-capable systems, the relationship among lgroup platform handle,
135 * node ID and memnode ID is still kept as:
136 * lgroup platform handle == node ID == memnode ID
137 * 2) For memory present at boot time on DR capable platforms, the relationship
138 * is still kept as is.
139 * lgroup platform handle == node ID == memnode ID
140 * 3) For hot-added memory, the relationship between lgrp ID and memnode ID have
141 * been changed from 1:1 map to 1:N map. Memnode IDs [0 - lgrp_plat_node_cnt)
142 * are reserved for memory present at boot time, and memnode IDs
143 * [lgrp_plat_node_cnt, max_mem_nodes) are used to dynamically allocate
144 * memnode ID for hot-added memory.
145 * 4) All boot code having the assumption "node ID == memnode ID" can live as
146 * is, that's because node ID is always equal to memnode ID at boot time.
147 * 5) The lgrp_plat_memnode_info_update(), plat_pfn_to_mem_node() and
148 * lgrp_plat_mem_size() related logics have been enhanced to deal with
149 * the 1:N map relationship.
150 * 6) The latency probing related logics, which have the assumption
151 * "node ID == memnode ID" and may be called at run time, is disabled if
152 * memory DR operation is enabled.
156 #include <sys/archsystm.h> /* for {in,out}{b,w,l}() */
157 #include <sys/atomic.h>
158 #include <sys/bootconf.h>
159 #include <sys/cmn_err.h>
160 #include <sys/controlregs.h>
161 #include <sys/cpupart.h>
162 #include <sys/cpuvar.h>
163 #include <sys/lgrp.h>
164 #include <sys/machsystm.h>
165 #include <sys/memlist.h>
166 #include <sys/memnode.h>
167 #include <sys/mman.h>
168 #include <sys/note.h>
169 #include <sys/pci_cfgspace.h>
170 #include <sys/pci_impl.h>
171 #include <sys/param.h>
172 #include <sys/pghw.h>
173 #include <sys/promif.h> /* for prom_printf() */
174 #include <sys/sysmacros.h>
175 #include <sys/systm.h>
176 #include <sys/thread.h>
177 #include <sys/types.h>
179 #include <sys/x86_archext.h>
180 #include <vm/hat_i86.h>
181 #include <vm/seg_kmem.h>
182 #include <vm/vm_dep.h>
184 #include <sys/acpidev.h>
185 #include <sys/acpi/acpi.h> /* for SRAT, SLIT and MSCT */
188 extern ACPI_TABLE_SRAT
*srat_ptr
;
189 extern ACPI_TABLE_SLIT
*slit_ptr
;
190 extern ACPI_TABLE_MSCT
*msct_ptr
;
193 #define NLGRP (MAX_NODES * (MAX_NODES - 1) + 1)
196 * Constants for configuring probing
198 #define LGRP_PLAT_PROBE_NROUNDS 64 /* default laps for probing */
199 #define LGRP_PLAT_PROBE_NSAMPLES 1 /* default samples to take */
200 #define LGRP_PLAT_PROBE_NREADS 256 /* number of vendor ID reads */
205 #define LGRP_PLAT_PROBE_ENABLE 0x1 /* enable probing */
206 #define LGRP_PLAT_PROBE_PGCPY 0x2 /* probe using page copy */
207 #define LGRP_PLAT_PROBE_VENDOR 0x4 /* probe vendor ID register */
210 * Hash proximity domain ID into node to domain mapping table "mod" number of
211 * nodes to minimize span of entries used and try to have lowest numbered
212 * proximity domain be node 0
214 #define NODE_DOMAIN_HASH(domain, node_cnt) \
215 ((lgrp_plat_prox_domain_min == UINT32_MAX) ? (domain) % node_cnt : \
216 ((domain) - lgrp_plat_prox_domain_min) % node_cnt)
219 * CPU to node ID mapping structure (only used with SRAT)
221 typedef struct cpu_node_map
{
225 uint32_t prox_domain
;
231 typedef struct lgrp_plat_latency_stats
{
232 hrtime_t latencies
[MAX_NODES
][MAX_NODES
];
233 hrtime_t latency_max
;
234 hrtime_t latency_min
;
235 } lgrp_plat_latency_stats_t
;
238 * Memory configuration for probing
240 typedef struct lgrp_plat_probe_mem_config
{
241 size_t probe_memsize
; /* how much memory to probe per node */
242 caddr_t probe_va
[MAX_NODES
]; /* where memory mapped for probing */
243 pfn_t probe_pfn
[MAX_NODES
]; /* physical pages to map for probing */
244 } lgrp_plat_probe_mem_config_t
;
247 * Statistics kept for probing
249 typedef struct lgrp_plat_probe_stats
{
252 hrtime_t probe_cost_total
;
253 hrtime_t probe_error_code
;
254 hrtime_t probe_errors
[MAX_NODES
][MAX_NODES
];
255 int probe_suspect
[MAX_NODES
][MAX_NODES
];
256 hrtime_t probe_max
[MAX_NODES
][MAX_NODES
];
257 hrtime_t probe_min
[MAX_NODES
][MAX_NODES
];
258 } lgrp_plat_probe_stats_t
;
261 * Node to proximity domain ID mapping structure (only used with SRAT)
263 typedef struct node_domain_map
{
265 uint32_t prox_domain
;
269 * Node ID and starting and ending page for physical memory in memory node
271 typedef struct memnode_phys_addr_map
{
275 uint32_t prox_domain
;
278 } memnode_phys_addr_map_t
;
281 * Number of CPUs for which we got APIC IDs
283 static int lgrp_plat_apic_ncpus
= 0;
286 * CPU to node ID mapping table (only used for SRAT) and its max number of
289 static cpu_node_map_t
*lgrp_plat_cpu_node
= NULL
;
290 static uint_t lgrp_plat_cpu_node_nentries
= 0;
295 lgrp_plat_latency_stats_t lgrp_plat_lat_stats
;
298 * Whether memory is interleaved across nodes causing MPO to be disabled
300 static int lgrp_plat_mem_intrlv
= 0;
303 * Node ID to proximity domain ID mapping table (only used for SRAT)
305 static node_domain_map_t lgrp_plat_node_domain
[MAX_NODES
];
308 * Physical address range for memory in each node
310 static memnode_phys_addr_map_t lgrp_plat_memnode_info
[MAX_MEM_NODES
];
313 * Statistics gotten from probing
315 static lgrp_plat_probe_stats_t lgrp_plat_probe_stats
;
318 * Memory configuration for probing
320 static lgrp_plat_probe_mem_config_t lgrp_plat_probe_mem_config
;
323 * Lowest proximity domain ID seen in ACPI SRAT
325 static uint32_t lgrp_plat_prox_domain_min
= UINT32_MAX
;
328 * Error code from processing ACPI SRAT
330 static int lgrp_plat_srat_error
= 0;
333 * Error code from processing ACPI SLIT
335 static int lgrp_plat_slit_error
= 0;
338 * Whether lgrp topology has been flattened to 2 levels.
340 static int lgrp_plat_topo_flatten
= 0;
344 * Maximum memory node ID in use.
346 static uint_t lgrp_plat_max_mem_node
;
349 * Allocate lgroup array statically
351 static lgrp_t lgrp_space
[NLGRP
];
352 static int nlgrps_alloc
;
356 * Enable finding and using minimum proximity domain ID when hashing
358 int lgrp_plat_domain_min_enable
= 1;
361 * Maximum possible number of nodes in system
363 uint_t lgrp_plat_node_cnt
= 1;
366 * Enable sorting nodes in ascending order by starting physical address
368 int lgrp_plat_node_sort_enable
= 1;
371 * Configuration Parameters for Probing
372 * - lgrp_plat_probe_flags Flags to specify enabling probing, probe
374 * - lgrp_plat_probe_nrounds How many rounds of probing to do
375 * - lgrp_plat_probe_nsamples Number of samples to take when probing each
377 * - lgrp_plat_probe_nreads Number of times to read vendor ID from
378 * Northbridge for each probe
380 uint_t lgrp_plat_probe_flags
= 0;
381 int lgrp_plat_probe_nrounds
= LGRP_PLAT_PROBE_NROUNDS
;
382 int lgrp_plat_probe_nsamples
= LGRP_PLAT_PROBE_NSAMPLES
;
383 int lgrp_plat_probe_nreads
= LGRP_PLAT_PROBE_NREADS
;
386 * Enable use of ACPI System Resource Affinity Table (SRAT), System
387 * Locality Information Table (SLIT) and Maximum System Capability Table (MSCT)
389 int lgrp_plat_srat_enable
= 1;
390 int lgrp_plat_slit_enable
= 1;
391 int lgrp_plat_msct_enable
= 1;
394 * mnode_xwa: set to non-zero value to initiate workaround if large pages are
395 * found to be crossing memory node boundaries. The workaround will eliminate
396 * a base size page at the end of each memory node boundary to ensure that
397 * a large page with constituent pages that span more than 1 memory node
398 * can never be formed.
404 * Static array to hold lgroup statistics
406 struct lgrp_stats lgrp_stats
[NLGRP
];
410 * Forward declarations of platform interface routines
412 void plat_build_mem_nodes(struct memlist
*list
);
414 int plat_mnode_xcheck(pfn_t pfncnt
);
416 lgrp_handle_t
plat_mem_node_to_lgrphand(int mnode
);
418 int plat_pfn_to_mem_node(pfn_t pfn
);
421 * Forward declarations of lgroup platform interface routines
423 lgrp_t
*lgrp_plat_alloc(lgrp_id_t lgrpid
);
425 void lgrp_plat_config(lgrp_config_flag_t flag
, uintptr_t arg
);
427 lgrp_handle_t
lgrp_plat_cpu_to_hand(processorid_t id
);
429 void lgrp_plat_init(lgrp_init_stages_t stage
);
431 int lgrp_plat_latency(lgrp_handle_t from
, lgrp_handle_t to
);
433 int lgrp_plat_max_lgrps(void);
435 pgcnt_t
lgrp_plat_mem_size(lgrp_handle_t plathand
,
436 lgrp_mem_query_t query
);
438 lgrp_handle_t
lgrp_plat_pfn_to_hand(pfn_t pfn
);
440 void lgrp_plat_probe(void);
442 lgrp_handle_t
lgrp_plat_root_hand(void);
446 * Forward declarations of local routines
448 static int is_opteron(void);
450 static int lgrp_plat_cpu_node_update(node_domain_map_t
*node_domain
,
451 int node_cnt
, cpu_node_map_t
*cpu_node
, int nentries
, uint32_t apicid
,
454 static int lgrp_plat_cpu_to_node(cpu_t
*cp
, cpu_node_map_t
*cpu_node
,
455 int cpu_node_nentries
);
457 static int lgrp_plat_domain_to_node(node_domain_map_t
*node_domain
,
458 int node_cnt
, uint32_t domain
);
460 static void lgrp_plat_get_numa_config(void);
462 static void lgrp_plat_latency_adjust(memnode_phys_addr_map_t
*memnode_info
,
463 lgrp_plat_latency_stats_t
*lat_stats
,
464 lgrp_plat_probe_stats_t
*probe_stats
);
466 static int lgrp_plat_latency_verify(memnode_phys_addr_map_t
*memnode_info
,
467 lgrp_plat_latency_stats_t
*lat_stats
);
469 static void lgrp_plat_main_init(void);
471 static pgcnt_t
lgrp_plat_mem_size_default(lgrp_handle_t
, lgrp_mem_query_t
);
473 static int lgrp_plat_node_domain_update(node_domain_map_t
*node_domain
,
474 int node_cnt
, uint32_t domain
);
476 static int lgrp_plat_memnode_info_update(node_domain_map_t
*node_domain
,
477 int node_cnt
, memnode_phys_addr_map_t
*memnode_info
, int memnode_cnt
,
478 uint64_t start
, uint64_t end
, uint32_t domain
, uint32_t device_id
);
480 static void lgrp_plat_node_sort(node_domain_map_t
*node_domain
,
481 int node_cnt
, cpu_node_map_t
*cpu_node
, int cpu_count
,
482 memnode_phys_addr_map_t
*memnode_info
);
484 static hrtime_t
lgrp_plat_probe_time(int to
, cpu_node_map_t
*cpu_node
,
485 int cpu_node_nentries
, lgrp_plat_probe_mem_config_t
*probe_mem_config
,
486 lgrp_plat_latency_stats_t
*lat_stats
, lgrp_plat_probe_stats_t
*probe_stats
);
488 static int lgrp_plat_process_cpu_apicids(cpu_node_map_t
*cpu_node
);
490 static int lgrp_plat_process_slit(ACPI_TABLE_SLIT
*tp
,
491 node_domain_map_t
*node_domain
, uint_t node_cnt
,
492 memnode_phys_addr_map_t
*memnode_info
,
493 lgrp_plat_latency_stats_t
*lat_stats
);
495 static int lgrp_plat_process_sli(uint32_t domain
, uchar_t
*sli_info
,
496 uint32_t sli_cnt
, node_domain_map_t
*node_domain
, uint_t node_cnt
,
497 lgrp_plat_latency_stats_t
*lat_stats
);
499 static int lgrp_plat_process_srat(ACPI_TABLE_SRAT
*tp
, ACPI_TABLE_MSCT
*mp
,
500 uint32_t *prox_domain_min
, node_domain_map_t
*node_domain
,
501 cpu_node_map_t
*cpu_node
, int cpu_count
,
502 memnode_phys_addr_map_t
*memnode_info
);
504 static void lgrp_plat_release_bootstrap(void);
506 static int lgrp_plat_srat_domains(ACPI_TABLE_SRAT
*tp
,
507 uint32_t *prox_domain_min
);
509 static int lgrp_plat_msct_domains(ACPI_TABLE_MSCT
*tp
,
510 uint32_t *prox_domain_min
);
512 static void lgrp_plat_2level_setup(lgrp_plat_latency_stats_t
*lat_stats
);
514 static void opt_get_numa_config(uint_t
*node_cnt
, int *mem_intrlv
,
515 memnode_phys_addr_map_t
*memnode_info
);
517 static hrtime_t
opt_probe_vendor(int dest_node
, int nreads
);
521 * PLATFORM INTERFACE ROUTINES
525 * Configure memory nodes for machines with more than one node (ie NUMA)
528 plat_build_mem_nodes(struct memlist
*list
)
530 pfn_t cur_start
; /* start addr of subrange */
531 pfn_t cur_end
; /* end addr of subrange */
532 pfn_t start
; /* start addr of whole range */
533 pfn_t end
; /* end addr of whole range */
534 pgcnt_t endcnt
; /* pages to sacrifice */
537 * Boot install lists are arranged <addr, len>, ...
542 start
= list
->ml_address
>> PAGESHIFT
;
543 end
= (list
->ml_address
+ list
->ml_size
- 1) >> PAGESHIFT
;
545 if (start
> physmax
) {
546 list
= list
->ml_next
;
553 * When there is only one memnode, just add memory to memnode
555 if (max_mem_nodes
== 1) {
556 mem_node_add_slice(start
, end
);
557 list
= list
->ml_next
;
562 * mem_node_add_slice() expects to get a memory range that
563 * is within one memnode, so need to split any memory range
564 * that spans multiple memnodes into subranges that are each
565 * contained within one memnode when feeding them to
566 * mem_node_add_slice()
570 node
= plat_pfn_to_mem_node(cur_start
);
573 * Panic if DRAM address map registers or SRAT say
574 * memory in node doesn't exist or address from
575 * boot installed memory list entry isn't in this node.
576 * This shouldn't happen and rest of code can't deal
577 * with this if it does.
579 if (node
< 0 || node
>= lgrp_plat_max_mem_node
||
580 !lgrp_plat_memnode_info
[node
].exists
||
581 cur_start
< lgrp_plat_memnode_info
[node
].start
||
582 cur_start
> lgrp_plat_memnode_info
[node
].end
) {
583 cmn_err(CE_PANIC
, "Don't know which memnode "
584 "to add installed memory address 0x%lx\n",
589 * End of current subrange should not span memnodes
593 if (lgrp_plat_memnode_info
[node
].exists
&&
594 cur_end
> lgrp_plat_memnode_info
[node
].end
) {
595 cur_end
= lgrp_plat_memnode_info
[node
].end
;
598 * sacrifice the last page in each
599 * node to eliminate large pages
600 * that span more than 1 memory node.
607 mem_node_add_slice(cur_start
, cur_end
- endcnt
);
610 * Next subrange starts after end of current one
612 cur_start
= cur_end
+ 1;
613 } while (cur_end
< end
);
615 list
= list
->ml_next
;
617 mem_node_physalign
= 0;
618 mem_node_pfn_shift
= 0;
623 * plat_mnode_xcheck: checks the node memory ranges to see if there is a pfncnt
624 * range of pages aligned on pfncnt that crosses an node boundary. Returns 1 if
625 * a crossing is found and returns 0 otherwise.
628 plat_mnode_xcheck(pfn_t pfncnt
)
630 int node
, prevnode
= -1, basenode
;
633 for (node
= 0; node
< lgrp_plat_max_mem_node
; node
++) {
635 if (lgrp_plat_memnode_info
[node
].exists
== 0)
638 if (prevnode
== -1) {
644 /* assume x86 node pfn ranges are in increasing order */
645 ASSERT(lgrp_plat_memnode_info
[node
].start
>
646 lgrp_plat_memnode_info
[prevnode
].end
);
649 * continue if the starting address of node is not contiguous
650 * with the previous node.
653 if (lgrp_plat_memnode_info
[node
].start
!=
654 (lgrp_plat_memnode_info
[prevnode
].end
+ 1)) {
660 /* check if the starting address of node is pfncnt aligned */
661 if ((lgrp_plat_memnode_info
[node
].start
& (pfncnt
- 1)) != 0) {
664 * at this point, node starts at an unaligned boundary
665 * and is contiguous with the previous node(s) to
666 * basenode. Check if there is an aligned contiguous
667 * range of length pfncnt that crosses this boundary.
670 sa
= P2ALIGN(lgrp_plat_memnode_info
[prevnode
].end
,
672 ea
= P2ROUNDUP((lgrp_plat_memnode_info
[node
].start
),
675 ASSERT((ea
- sa
) == pfncnt
);
676 if (sa
>= lgrp_plat_memnode_info
[basenode
].start
&&
677 ea
<= (lgrp_plat_memnode_info
[node
].end
+ 1)) {
679 * large page found to cross mnode boundary.
680 * Return Failure if workaround not enabled.
694 plat_mem_node_to_lgrphand(int mnode
)
696 if (max_mem_nodes
== 1)
697 return (LGRP_DEFAULT_HANDLE
);
699 ASSERT(0 <= mnode
&& mnode
< lgrp_plat_max_mem_node
);
701 return ((lgrp_handle_t
)(lgrp_plat_memnode_info
[mnode
].lgrphand
));
705 plat_pfn_to_mem_node(pfn_t pfn
)
709 if (max_mem_nodes
== 1)
712 for (node
= 0; node
< lgrp_plat_max_mem_node
; node
++) {
714 * Skip nodes with no memory
716 if (!lgrp_plat_memnode_info
[node
].exists
)
720 if (pfn
>= lgrp_plat_memnode_info
[node
].start
&&
721 pfn
<= lgrp_plat_memnode_info
[node
].end
)
726 * Didn't find memnode where this PFN lives which should never happen
728 ASSERT(node
< lgrp_plat_max_mem_node
);
734 * LGROUP PLATFORM INTERFACE ROUTINES
738 * Allocate additional space for an lgroup.
741 lgrp_plat_alloc(lgrp_id_t lgrpid
)
745 lgrp
= &lgrp_space
[nlgrps_alloc
++];
746 if (lgrpid
>= NLGRP
|| nlgrps_alloc
> NLGRP
)
753 * Platform handling for (re)configuration changes
755 * Mechanism to protect lgrp_plat_cpu_node[] at CPU hotplug:
756 * 1) Use cpu_lock to synchronize between lgrp_plat_config() and
757 * lgrp_plat_cpu_to_hand().
758 * 2) Disable latency probing logic by making sure that the flag
759 * LGRP_PLAT_PROBE_ENABLE is cleared.
761 * Mechanism to protect lgrp_plat_memnode_info[] at memory hotplug:
762 * 1) Only inserts into lgrp_plat_memnode_info at memory hotplug, no removal.
763 * 2) Only expansion to existing entries, no shrinking.
764 * 3) On writing side, DR framework ensures that lgrp_plat_config() is called
765 * in single-threaded context. And membar_producer() is used to ensure that
766 * all changes are visible to other CPUs before setting the "exists" flag.
767 * 4) On reading side, membar_consumer() after checking the "exists" flag
768 * ensures that right values are retrieved.
770 * Mechanism to protect lgrp_plat_node_domain[] at hotplug:
771 * 1) Only insertion into lgrp_plat_node_domain at hotplug, no removal.
772 * 2) On writing side, it's single-threaded and membar_producer() is used to
773 * ensure all changes are visible to other CPUs before setting the "exists"
775 * 3) On reading side, membar_consumer() after checking the "exists" flag
776 * ensures that right values are retrieved.
779 lgrp_plat_config(lgrp_config_flag_t flag
, uintptr_t arg
)
784 uchar_t
*sliptr
= NULL
;
785 uint32_t domain
, apicid
, slicnt
= 0;
786 update_membounds_t
*mp
;
788 extern int acpidev_dr_get_cpu_numa_info(cpu_t
*, void **, uint32_t *,
789 uint32_t *, uint32_t *, uchar_t
**);
790 extern void acpidev_dr_free_cpu_numa_info(void *);
793 * This interface is used to support CPU/memory DR operations.
794 * Don't bother here if it's still during boot or only one lgrp node
797 if (!lgrp_topo_initialized
|| lgrp_plat_node_cnt
== 1)
801 case LGRP_CONFIG_CPU_ADD
:
804 ASSERT(MUTEX_HELD(&cpu_lock
));
806 /* Check whether CPU already exists. */
807 ASSERT(!lgrp_plat_cpu_node
[cp
->cpu_id
].exists
);
808 if (lgrp_plat_cpu_node
[cp
->cpu_id
].exists
) {
810 "!lgrp: CPU(%d) already exists in cpu_node map.",
815 /* Query CPU lgrp information. */
816 rc
= acpidev_dr_get_cpu_numa_info(cp
, &hdl
, &apicid
, &domain
,
821 "!lgrp: failed to query lgrp info for CPU(%d).",
826 /* Update node to proximity domain mapping */
827 node
= lgrp_plat_domain_to_node(lgrp_plat_node_domain
,
828 lgrp_plat_node_cnt
, domain
);
830 node
= lgrp_plat_node_domain_update(
831 lgrp_plat_node_domain
, lgrp_plat_node_cnt
, domain
);
834 acpidev_dr_free_cpu_numa_info(hdl
);
835 cmn_err(CE_WARN
, "!lgrp: failed to update "
836 "node_domain map for domain(%u).", domain
);
841 /* Update latency information among lgrps. */
842 if (slicnt
!= 0 && sliptr
!= NULL
) {
843 if (lgrp_plat_process_sli(domain
, sliptr
, slicnt
,
844 lgrp_plat_node_domain
, lgrp_plat_node_cnt
,
845 &lgrp_plat_lat_stats
) != 0) {
846 cmn_err(CE_WARN
, "!lgrp: failed to update "
847 "latency information for domain (%u).",
852 /* Update CPU to node mapping. */
853 lgrp_plat_cpu_node
[cp
->cpu_id
].prox_domain
= domain
;
854 lgrp_plat_cpu_node
[cp
->cpu_id
].node
= node
;
855 lgrp_plat_cpu_node
[cp
->cpu_id
].apicid
= apicid
;
856 lgrp_plat_cpu_node
[cp
->cpu_id
].exists
= 1;
857 lgrp_plat_apic_ncpus
++;
859 acpidev_dr_free_cpu_numa_info(hdl
);
862 case LGRP_CONFIG_CPU_DEL
:
865 ASSERT(MUTEX_HELD(&cpu_lock
));
867 /* Check whether CPU exists. */
868 ASSERT(lgrp_plat_cpu_node
[cp
->cpu_id
].exists
);
869 if (!lgrp_plat_cpu_node
[cp
->cpu_id
].exists
) {
871 "!lgrp: CPU(%d) doesn't exist in cpu_node map.",
876 /* Query CPU lgrp information. */
877 rc
= acpidev_dr_get_cpu_numa_info(cp
, &hdl
, &apicid
, &domain
,
882 "!lgrp: failed to query lgrp info for CPU(%d).",
888 ASSERT(lgrp_plat_cpu_node
[cp
->cpu_id
].apicid
== apicid
);
889 ASSERT(lgrp_plat_cpu_node
[cp
->cpu_id
].prox_domain
== domain
);
890 lgrp_plat_cpu_node
[cp
->cpu_id
].exists
= 0;
891 lgrp_plat_cpu_node
[cp
->cpu_id
].apicid
= UINT32_MAX
;
892 lgrp_plat_cpu_node
[cp
->cpu_id
].prox_domain
= UINT32_MAX
;
893 lgrp_plat_cpu_node
[cp
->cpu_id
].node
= UINT_MAX
;
894 lgrp_plat_apic_ncpus
--;
896 acpidev_dr_free_cpu_numa_info(hdl
);
899 case LGRP_CONFIG_MEM_ADD
:
900 mp
= (update_membounds_t
*)arg
;
903 /* Update latency information among lgrps. */
904 if (mp
->u_sli_cnt
!= 0 && mp
->u_sli_ptr
!= NULL
) {
905 if (lgrp_plat_process_sli(mp
->u_domain
,
906 mp
->u_sli_ptr
, mp
->u_sli_cnt
,
907 lgrp_plat_node_domain
, lgrp_plat_node_cnt
,
908 &lgrp_plat_lat_stats
) != 0) {
909 cmn_err(CE_WARN
, "!lgrp: failed to update "
910 "latency information for domain (%u).",
915 if (lgrp_plat_memnode_info_update(lgrp_plat_node_domain
,
916 lgrp_plat_node_cnt
, lgrp_plat_memnode_info
, max_mem_nodes
,
917 mp
->u_base
, mp
->u_base
+ mp
->u_length
,
918 mp
->u_domain
, mp
->u_device_id
) < 0) {
920 "!lgrp: failed to update latency information for "
921 "memory (0x%" PRIx64
" - 0x%" PRIx64
").",
922 mp
->u_base
, mp
->u_base
+ mp
->u_length
);
933 * Return the platform handle for the lgroup containing the given CPU
936 lgrp_plat_cpu_to_hand(processorid_t id
)
940 ASSERT(!lgrp_initialized
|| MUTEX_HELD(&cpu_lock
));
942 if (lgrp_plat_node_cnt
== 1)
943 return (LGRP_DEFAULT_HANDLE
);
945 hand
= (lgrp_handle_t
)lgrp_plat_cpu_to_node(cpu
[id
],
946 lgrp_plat_cpu_node
, lgrp_plat_cpu_node_nentries
);
948 ASSERT(hand
!= (lgrp_handle_t
)-1);
949 if (hand
== (lgrp_handle_t
)-1)
950 return (LGRP_NULL_HANDLE
);
957 * Platform-specific initialization of lgroups
960 lgrp_plat_init(lgrp_init_stages_t stage
)
965 case LGRP_INIT_STAGE1
:
968 * Get boot property for lgroup topology height limit
970 if (bootprop_getval(BP_LGRP_TOPO_LEVELS
, &value
) == 0)
971 (void) lgrp_topo_ht_limit_set((int)value
);
974 * Get boot property for enabling/disabling SRAT
976 if (bootprop_getval(BP_LGRP_SRAT_ENABLE
, &value
) == 0)
977 lgrp_plat_srat_enable
= (int)value
;
980 * Get boot property for enabling/disabling SLIT
982 if (bootprop_getval(BP_LGRP_SLIT_ENABLE
, &value
) == 0)
983 lgrp_plat_slit_enable
= (int)value
;
986 * Get boot property for enabling/disabling MSCT
988 if (bootprop_getval(BP_LGRP_MSCT_ENABLE
, &value
) == 0)
989 lgrp_plat_msct_enable
= (int)value
;
992 * Initialize as a UMA machine
994 if (lgrp_topo_ht_limit() == 1) {
995 lgrp_plat_node_cnt
= max_mem_nodes
= 1;
996 lgrp_plat_max_mem_node
= 1;
1000 lgrp_plat_get_numa_config();
1003 * Each lgrp node needs MAX_MEM_NODES_PER_LGROUP memnodes
1004 * to support memory DR operations if memory DR is enabled.
1006 lgrp_plat_max_mem_node
= lgrp_plat_node_cnt
;
1007 if (plat_dr_support_memory() && lgrp_plat_node_cnt
!= 1) {
1008 max_mem_nodes
= MAX_MEM_NODES_PER_LGROUP
*
1010 ASSERT(max_mem_nodes
<= MAX_MEM_NODES
);
1014 case LGRP_INIT_STAGE3
:
1016 lgrp_plat_release_bootstrap();
1019 case LGRP_INIT_STAGE4
:
1020 lgrp_plat_main_init();
1030 * Return latency between "from" and "to" lgroups
1032 * This latency number can only be used for relative comparison
1033 * between lgroups on the running system, cannot be used across platforms,
1034 * and may not reflect the actual latency. It is platform and implementation
1035 * specific, so platform gets to decide its value. It would be nice if the
1036 * number was at least proportional to make comparisons more meaningful though.
1039 lgrp_plat_latency(lgrp_handle_t from
, lgrp_handle_t to
)
1041 lgrp_handle_t src
, dest
;
1044 if (max_mem_nodes
== 1)
1048 * Return max latency for root lgroup
1050 if (from
== LGRP_DEFAULT_HANDLE
|| to
== LGRP_DEFAULT_HANDLE
)
1051 return (lgrp_plat_lat_stats
.latency_max
);
1057 * Return 0 for nodes (lgroup platform handles) out of range
1059 if (src
< 0 || src
>= MAX_NODES
|| dest
< 0 || dest
>= MAX_NODES
)
1063 * Probe from current CPU if its lgroup latencies haven't been set yet
1064 * and we are trying to get latency from current CPU to some node.
1065 * Avoid probing if CPU/memory DR is enabled.
1067 if (lgrp_plat_lat_stats
.latencies
[src
][src
] == 0) {
1069 * Latency information should be updated by lgrp_plat_config()
1070 * for DR operations. Something is wrong if reaches here.
1071 * For safety, flatten lgrp topology to two levels.
1073 if (plat_dr_support_cpu() || plat_dr_support_memory()) {
1074 ASSERT(lgrp_plat_lat_stats
.latencies
[src
][src
]);
1076 "lgrp: failed to get latency information, "
1077 "fall back to two-level topology.");
1078 lgrp_plat_2level_setup(&lgrp_plat_lat_stats
);
1080 node
= lgrp_plat_cpu_to_node(CPU
, lgrp_plat_cpu_node
,
1081 lgrp_plat_cpu_node_nentries
);
1082 ASSERT(node
>= 0 && node
< lgrp_plat_node_cnt
);
1088 return (lgrp_plat_lat_stats
.latencies
[src
][dest
]);
1093 * Return the maximum number of lgrps supported by the platform.
1094 * Before lgrp topology is known it returns an estimate based on the number of
1095 * nodes. Once topology is known it returns:
1096 * 1) the actual maximim number of lgrps created if CPU/memory DR operations
1097 * are not suppported.
1098 * 2) the maximum possible number of lgrps if CPU/memory DR operations are
1102 lgrp_plat_max_lgrps(void)
1104 if (!lgrp_topo_initialized
|| plat_dr_support_cpu() ||
1105 plat_dr_support_memory()) {
1106 return (lgrp_plat_node_cnt
* (lgrp_plat_node_cnt
- 1) + 1);
1108 return (lgrp_alloc_max
+ 1);
1114 * Count number of memory pages (_t) based on mnode id (_n) and query type (_t).
1116 #define _LGRP_PLAT_MEM_SIZE(_n, _q, _t) \
1117 if (mem_node_config[_n].exists) { \
1119 case LGRP_MEM_SIZE_FREE: \
1120 _t += MNODE_PGCNT(_n); \
1122 case LGRP_MEM_SIZE_AVAIL: \
1123 _t += mem_node_memlist_pages(_n, phys_avail); \
1125 case LGRP_MEM_SIZE_INSTALL: \
1126 _t += mem_node_memlist_pages(_n, phys_install); \
1134 * Return the number of free pages in an lgroup.
1136 * For query of LGRP_MEM_SIZE_FREE, return the number of base pagesize
1137 * pages on freelists. For query of LGRP_MEM_SIZE_AVAIL, return the
1138 * number of allocatable base pagesize pages corresponding to the
1139 * lgroup (e.g. do not include page_t's, BOP_ALLOC()'ed memory, ..)
1140 * For query of LGRP_MEM_SIZE_INSTALL, return the amount of physical
1141 * memory installed, regardless of whether or not it's usable.
1144 lgrp_plat_mem_size(lgrp_handle_t plathand
, lgrp_mem_query_t query
)
1147 pgcnt_t npgs
= (pgcnt_t
)0;
1148 extern struct memlist
*phys_avail
;
1149 extern struct memlist
*phys_install
;
1152 if (plathand
== LGRP_DEFAULT_HANDLE
)
1153 return (lgrp_plat_mem_size_default(plathand
, query
));
1155 if (plathand
!= LGRP_NULL_HANDLE
) {
1156 /* Count memory node present at boot. */
1157 mnode
= (int)plathand
;
1158 ASSERT(mnode
< lgrp_plat_node_cnt
);
1159 _LGRP_PLAT_MEM_SIZE(mnode
, query
, npgs
);
1161 /* Count possible hot-added memory nodes. */
1162 for (mnode
= lgrp_plat_node_cnt
;
1163 mnode
< lgrp_plat_max_mem_node
; mnode
++) {
1164 if (lgrp_plat_memnode_info
[mnode
].lgrphand
== plathand
)
1165 _LGRP_PLAT_MEM_SIZE(mnode
, query
, npgs
);
1174 * Return the platform handle of the lgroup that contains the physical memory
1175 * corresponding to the given page frame number
1178 lgrp_plat_pfn_to_hand(pfn_t pfn
)
1182 if (max_mem_nodes
== 1)
1183 return (LGRP_DEFAULT_HANDLE
);
1186 return (LGRP_NULL_HANDLE
);
1188 mnode
= plat_pfn_to_mem_node(pfn
);
1190 return (LGRP_NULL_HANDLE
);
1192 return (MEM_NODE_2_LGRPHAND(mnode
));
1197 * Probe memory in each node from current CPU to determine latency topology
1199 * The probing code will probe the vendor ID register on the Northbridge of
1200 * Opteron processors and probe memory for other processors by default.
1202 * Since probing is inherently error prone, the code takes laps across all the
1203 * nodes probing from each node to each of the other nodes some number of
1204 * times. Furthermore, each node is probed some number of times before moving
1205 * onto the next one during each lap. The minimum latency gotten between nodes
1206 * is kept as the latency between the nodes.
1208 * After all that, the probe times are adjusted by normalizing values that are
1209 * close to each other and local latencies are made the same. Lastly, the
1210 * latencies are verified to make sure that certain conditions are met (eg.
1211 * local < remote, latency(a, b) == latency(b, a), etc.).
1213 * If any of the conditions aren't met, the code will export a NUMA
1214 * configuration with the local CPUs and memory given by the SRAT or PCI config
1215 * space registers and one remote memory latency since it can't tell exactly
1216 * how far each node is from each other.
1219 lgrp_plat_probe(void)
1223 lgrp_plat_latency_stats_t
*lat_stats
;
1225 hrtime_t probe_time
;
1228 if (!(lgrp_plat_probe_flags
& LGRP_PLAT_PROBE_ENABLE
) ||
1229 max_mem_nodes
== 1 || lgrp_topo_ht_limit() <= 2)
1232 /* SRAT and SLIT should be enabled if DR operations are enabled. */
1233 if (plat_dr_support_cpu() || plat_dr_support_memory())
1237 * Determine ID of node containing current CPU
1239 from
= lgrp_plat_cpu_to_node(CPU
, lgrp_plat_cpu_node
,
1240 lgrp_plat_cpu_node_nentries
);
1241 ASSERT(from
>= 0 && from
< lgrp_plat_node_cnt
);
1242 if (srat_ptr
&& lgrp_plat_srat_enable
&& !lgrp_plat_srat_error
)
1243 ASSERT(lgrp_plat_node_domain
[from
].exists
);
1246 * Don't need to probe if got times already
1248 lat_stats
= &lgrp_plat_lat_stats
;
1249 if (lat_stats
->latencies
[from
][from
] != 0)
1253 * Read vendor ID in Northbridge or read and write page(s)
1254 * in each node from current CPU and remember how long it takes,
1255 * so we can build latency topology of machine later.
1256 * This should approximate the memory latency between each node.
1259 for (i
= 0; i
< lgrp_plat_probe_nrounds
; i
++) {
1260 for (to
= 0; to
< lgrp_plat_node_cnt
; to
++) {
1262 * Get probe time and skip over any nodes that can't be
1263 * probed yet or don't have memory
1265 probe_time
= lgrp_plat_probe_time(to
,
1266 lgrp_plat_cpu_node
, lgrp_plat_cpu_node_nentries
,
1267 &lgrp_plat_probe_mem_config
, &lgrp_plat_lat_stats
,
1268 &lgrp_plat_probe_stats
);
1269 if (probe_time
== 0)
1275 * Keep lowest probe time as latency between nodes
1277 if (lat_stats
->latencies
[from
][to
] == 0 ||
1278 probe_time
< lat_stats
->latencies
[from
][to
])
1279 lat_stats
->latencies
[from
][to
] = probe_time
;
1282 * Update overall minimum and maximum probe times
1285 if (probe_time
< lat_stats
->latency_min
||
1286 lat_stats
->latency_min
== -1)
1287 lat_stats
->latency_min
= probe_time
;
1288 if (probe_time
> lat_stats
->latency_max
)
1289 lat_stats
->latency_max
= probe_time
;
1294 * Bail out if weren't able to probe any nodes from current CPU
1296 if (probed
== B_FALSE
)
1300 * - Fix up latencies such that local latencies are same,
1301 * latency(i, j) == latency(j, i), etc. (if possible)
1303 * - Verify that latencies look ok
1305 * - Fallback to just optimizing for local and remote if
1306 * latencies didn't look right
1308 lgrp_plat_latency_adjust(lgrp_plat_memnode_info
, &lgrp_plat_lat_stats
,
1309 &lgrp_plat_probe_stats
);
1310 lgrp_plat_probe_stats
.probe_error_code
=
1311 lgrp_plat_latency_verify(lgrp_plat_memnode_info
,
1312 &lgrp_plat_lat_stats
);
1313 if (lgrp_plat_probe_stats
.probe_error_code
)
1314 lgrp_plat_2level_setup(&lgrp_plat_lat_stats
);
1319 * Return platform handle for root lgroup
1322 lgrp_plat_root_hand(void)
1324 return (LGRP_DEFAULT_HANDLE
);
1334 * Update CPU to node mapping for given CPU and proximity domain.
1336 * - zero for success
1337 * - positive numbers for warnings
1338 * - negative numbers for errors
1341 lgrp_plat_cpu_node_update(node_domain_map_t
*node_domain
, int node_cnt
,
1342 cpu_node_map_t
*cpu_node
, int nentries
, uint32_t apicid
, uint32_t domain
)
1348 * Get node number for proximity domain
1350 node
= lgrp_plat_domain_to_node(node_domain
, node_cnt
, domain
);
1352 node
= lgrp_plat_node_domain_update(node_domain
, node_cnt
,
1359 * Search for entry with given APIC ID and fill in its node and
1360 * proximity domain IDs (if they haven't been set already)
1362 for (i
= 0; i
< nentries
; i
++) {
1364 * Skip nonexistent entries and ones without matching APIC ID
1366 if (!cpu_node
[i
].exists
|| cpu_node
[i
].apicid
!= apicid
)
1370 * Just return if entry completely and correctly filled in
1373 if (cpu_node
[i
].prox_domain
== domain
&&
1374 cpu_node
[i
].node
== node
)
1378 * It's invalid to have more than one entry with the same
1379 * local APIC ID in SRAT table.
1381 if (cpu_node
[i
].node
!= UINT_MAX
)
1385 * Fill in node and proximity domain IDs
1387 cpu_node
[i
].prox_domain
= domain
;
1388 cpu_node
[i
].node
= node
;
1394 * It's possible that an apicid doesn't exist in the cpu_node map due
1395 * to user limits number of CPUs powered on at boot by specifying the
1396 * boot_ncpus kernel option.
1403 * Get node ID for given CPU
1406 lgrp_plat_cpu_to_node(cpu_t
*cp
, cpu_node_map_t
*cpu_node
,
1407 int cpu_node_nentries
)
1409 processorid_t cpuid
;
1415 if (cpuid
< 0 || cpuid
>= max_ncpus
)
1419 * SRAT doesn't exist, isn't enabled, or there was an error processing
1420 * it, so return node ID for Opteron and -1 otherwise.
1422 if (srat_ptr
== NULL
|| !lgrp_plat_srat_enable
||
1423 lgrp_plat_srat_error
) {
1425 return (pg_plat_hw_instance_id(cp
, PGHW_PROCNODE
));
1430 * Return -1 when CPU to node ID mapping entry doesn't exist for given
1433 if (cpuid
>= cpu_node_nentries
|| !cpu_node
[cpuid
].exists
)
1436 return (cpu_node
[cpuid
].node
);
1441 * Return node number for given proximity domain/system locality
1444 lgrp_plat_domain_to_node(node_domain_map_t
*node_domain
, int node_cnt
,
1451 * Hash proximity domain ID into node to domain mapping table (array),
1452 * search for entry with matching proximity domain ID, and return index
1453 * of matching entry as node ID.
1455 node
= start
= NODE_DOMAIN_HASH(domain
, node_cnt
);
1457 if (node_domain
[node
].exists
) {
1459 if (node_domain
[node
].prox_domain
== domain
)
1462 node
= (node
+ 1) % node_cnt
;
1463 } while (node
!= start
);
1469 * Get NUMA configuration of machine
1472 lgrp_plat_get_numa_config(void)
1477 * Read boot property with CPU to APIC ID mapping table/array to
1478 * determine number of CPUs
1480 lgrp_plat_apic_ncpus
= lgrp_plat_process_cpu_apicids(NULL
);
1483 * Determine which CPUs and memory are local to each other and number
1484 * of NUMA nodes by reading ACPI System Resource Affinity Table (SRAT)
1486 if (lgrp_plat_apic_ncpus
> 0) {
1489 /* Reserve enough resources if CPU DR is enabled. */
1490 if (plat_dr_support_cpu() && max_ncpus
> lgrp_plat_apic_ncpus
)
1491 lgrp_plat_cpu_node_nentries
= max_ncpus
;
1493 lgrp_plat_cpu_node_nentries
= lgrp_plat_apic_ncpus
;
1496 * Temporarily allocate boot memory to use for CPU to node
1497 * mapping since kernel memory allocator isn't alive yet
1499 lgrp_plat_cpu_node
= (cpu_node_map_t
*)BOP_ALLOC(bootops
,
1500 NULL
, lgrp_plat_cpu_node_nentries
* sizeof (cpu_node_map_t
),
1503 ASSERT(lgrp_plat_cpu_node
!= NULL
);
1504 if (lgrp_plat_cpu_node
) {
1505 bzero(lgrp_plat_cpu_node
, lgrp_plat_cpu_node_nentries
*
1506 sizeof (cpu_node_map_t
));
1508 lgrp_plat_cpu_node_nentries
= 0;
1512 * Fill in CPU to node ID mapping table with APIC ID for each
1515 (void) lgrp_plat_process_cpu_apicids(lgrp_plat_cpu_node
);
1517 retval
= lgrp_plat_process_srat(srat_ptr
, msct_ptr
,
1518 &lgrp_plat_prox_domain_min
,
1519 lgrp_plat_node_domain
, lgrp_plat_cpu_node
,
1520 lgrp_plat_apic_ncpus
, lgrp_plat_memnode_info
);
1522 lgrp_plat_srat_error
= retval
;
1523 lgrp_plat_node_cnt
= 1;
1525 lgrp_plat_srat_error
= 0;
1526 lgrp_plat_node_cnt
= retval
;
1531 * Try to use PCI config space registers on Opteron if there's an error
1532 * processing CPU to APIC ID mapping or SRAT
1534 if ((lgrp_plat_apic_ncpus
<= 0 || lgrp_plat_srat_error
!= 0) &&
1536 opt_get_numa_config(&lgrp_plat_node_cnt
, &lgrp_plat_mem_intrlv
,
1537 lgrp_plat_memnode_info
);
1540 * Don't bother to setup system for multiple lgroups and only use one
1541 * memory node when memory is interleaved between any nodes or there is
1542 * only one NUMA node
1544 if (lgrp_plat_mem_intrlv
|| lgrp_plat_node_cnt
== 1) {
1545 lgrp_plat_node_cnt
= max_mem_nodes
= 1;
1546 (void) lgrp_topo_ht_limit_set(1);
1551 * Leaf lgroups on x86/x64 architectures contain one physical
1552 * processor chip. Tune lgrp_expand_proc_thresh and
1553 * lgrp_expand_proc_diff so that lgrp_choose() will spread
1554 * things out aggressively.
1556 lgrp_expand_proc_thresh
= LGRP_LOADAVG_THREAD_MAX
/ 2;
1557 lgrp_expand_proc_diff
= 0;
1560 * There should be one memnode (physical page free list(s)) for
1561 * each node if memory DR is disabled.
1563 max_mem_nodes
= lgrp_plat_node_cnt
;
1566 * Initialize min and max latency before reading SLIT or probing
1568 lgrp_plat_lat_stats
.latency_min
= -1;
1569 lgrp_plat_lat_stats
.latency_max
= 0;
1572 * Determine how far each NUMA node is from each other by
1573 * reading ACPI System Locality Information Table (SLIT) if it
1576 lgrp_plat_slit_error
= lgrp_plat_process_slit(slit_ptr
,
1577 lgrp_plat_node_domain
, lgrp_plat_node_cnt
, lgrp_plat_memnode_info
,
1578 &lgrp_plat_lat_stats
);
1581 * Disable support of CPU/memory DR operations if multiple locality
1582 * domains exist in system and either of following is true.
1583 * 1) Failed to process SLIT table.
1584 * 2) Latency probing is enabled by user.
1586 if (lgrp_plat_node_cnt
> 1 &&
1587 (plat_dr_support_cpu() || plat_dr_support_memory())) {
1588 if (!lgrp_plat_slit_enable
|| lgrp_plat_slit_error
!= 0 ||
1589 !lgrp_plat_srat_enable
|| lgrp_plat_srat_error
!= 0 ||
1590 lgrp_plat_apic_ncpus
<= 0) {
1592 "?lgrp: failed to process ACPI SRAT/SLIT table, "
1593 "disable support of CPU/memory DR operations.");
1594 plat_dr_disable_cpu();
1595 plat_dr_disable_memory();
1596 } else if (lgrp_plat_probe_flags
& LGRP_PLAT_PROBE_ENABLE
) {
1598 "?lgrp: latency probing enabled by user, "
1599 "disable support of CPU/memory DR operations.");
1600 plat_dr_disable_cpu();
1601 plat_dr_disable_memory();
1605 /* Done if succeeded to process SLIT table. */
1606 if (lgrp_plat_slit_error
== 0)
1610 * Probe to determine latency between NUMA nodes when SLIT
1611 * doesn't exist or make sense
1613 lgrp_plat_probe_flags
|= LGRP_PLAT_PROBE_ENABLE
;
1616 * Specify whether to probe using vendor ID register or page copy
1617 * if hasn't been specified already or is overspecified
1619 probe_op
= lgrp_plat_probe_flags
&
1620 (LGRP_PLAT_PROBE_PGCPY
|LGRP_PLAT_PROBE_VENDOR
);
1622 if (probe_op
== 0 ||
1623 probe_op
== (LGRP_PLAT_PROBE_PGCPY
|LGRP_PLAT_PROBE_VENDOR
)) {
1624 lgrp_plat_probe_flags
&=
1625 ~(LGRP_PLAT_PROBE_PGCPY
|LGRP_PLAT_PROBE_VENDOR
);
1627 lgrp_plat_probe_flags
|=
1628 LGRP_PLAT_PROBE_VENDOR
;
1630 lgrp_plat_probe_flags
|= LGRP_PLAT_PROBE_PGCPY
;
1634 * Probing errors can mess up the lgroup topology and
1635 * force us fall back to a 2 level lgroup topology.
1636 * Here we bound how tall the lgroup topology can grow
1637 * in hopes of avoiding any anamolies in probing from
1638 * messing up the lgroup topology by limiting the
1639 * accuracy of the latency topology.
1641 * Assume that nodes will at least be configured in a
1642 * ring, so limit height of lgroup topology to be less
1643 * than number of nodes on a system with 4 or more
1646 if (lgrp_plat_node_cnt
>= 4 && lgrp_topo_ht_limit() ==
1647 lgrp_topo_ht_limit_default())
1648 (void) lgrp_topo_ht_limit_set(lgrp_plat_node_cnt
- 1);
1653 * Latencies must be within 1/(2**LGRP_LAT_TOLERANCE_SHIFT) of each other to
1654 * be considered same
1656 #define LGRP_LAT_TOLERANCE_SHIFT 4
1658 int lgrp_plat_probe_lt_shift
= LGRP_LAT_TOLERANCE_SHIFT
;
1662 * Adjust latencies between nodes to be symmetric, normalize latencies between
1663 * any nodes that are within some tolerance to be same, and make local
1667 lgrp_plat_latency_adjust(memnode_phys_addr_map_t
*memnode_info
,
1668 lgrp_plat_latency_stats_t
*lat_stats
, lgrp_plat_probe_stats_t
*probe_stats
)
1679 const lgrp_config_flag_t cflag
= LGRP_CONFIG_LAT_CHANGE_ALL
;
1680 int lat_corrected
[MAX_NODES
][MAX_NODES
];
1683 * Nothing to do when this is an UMA machine or don't have args needed
1685 if (max_mem_nodes
== 1)
1688 ASSERT(memnode_info
!= NULL
&& lat_stats
!= NULL
&&
1689 probe_stats
!= NULL
);
1692 * Make sure that latencies are symmetric between any two nodes
1693 * (ie. latency(node0, node1) == latency(node1, node0))
1695 for (i
= 0; i
< lgrp_plat_node_cnt
; i
++) {
1696 if (!memnode_info
[i
].exists
)
1699 for (j
= 0; j
< lgrp_plat_node_cnt
; j
++) {
1700 if (!memnode_info
[j
].exists
)
1703 t1
= lat_stats
->latencies
[i
][j
];
1704 t2
= lat_stats
->latencies
[j
][i
];
1706 if (t1
== 0 || t2
== 0 || t1
== t2
)
1710 * Latencies should be same
1711 * - Use minimum of two latencies which should be same
1712 * - Track suspect probe times not within tolerance of
1714 * - Remember how much values are corrected by
1718 probe_stats
->probe_errors
[i
][j
] += t1
- t2
;
1719 if (t1
- t2
> t2
>> lgrp_plat_probe_lt_shift
) {
1720 probe_stats
->probe_suspect
[i
][j
]++;
1721 probe_stats
->probe_suspect
[j
][i
]++;
1723 } else if (t2
> t1
) {
1725 probe_stats
->probe_errors
[j
][i
] += t2
- t1
;
1726 if (t2
- t1
> t1
>> lgrp_plat_probe_lt_shift
) {
1727 probe_stats
->probe_suspect
[i
][j
]++;
1728 probe_stats
->probe_suspect
[j
][i
]++;
1732 lat_stats
->latencies
[i
][j
] =
1733 lat_stats
->latencies
[j
][i
] = t
;
1734 lgrp_config(cflag
, t1
, t
);
1735 lgrp_config(cflag
, t2
, t
);
1740 * Keep track of which latencies get corrected
1742 for (i
= 0; i
< MAX_NODES
; i
++)
1743 for (j
= 0; j
< MAX_NODES
; j
++)
1744 lat_corrected
[i
][j
] = 0;
1747 * For every two nodes, see whether there is another pair of nodes which
1748 * are about the same distance apart and make the latencies be the same
1749 * if they are close enough together
1751 for (i
= 0; i
< lgrp_plat_node_cnt
; i
++) {
1752 for (j
= 0; j
< lgrp_plat_node_cnt
; j
++) {
1753 if (!memnode_info
[j
].exists
)
1756 * Pick one pair of nodes (i, j)
1757 * and get latency between them
1759 t1
= lat_stats
->latencies
[i
][j
];
1762 * Skip this pair of nodes if there isn't a latency
1768 for (k
= 0; k
< lgrp_plat_node_cnt
; k
++) {
1769 for (l
= 0; l
< lgrp_plat_node_cnt
; l
++) {
1770 if (!memnode_info
[l
].exists
)
1773 * Pick another pair of nodes (k, l)
1774 * not same as (i, j) and get latency
1777 if (k
== i
&& l
== j
)
1780 t2
= lat_stats
->latencies
[k
][l
];
1783 * Skip this pair of nodes if there
1784 * isn't a latency for it yet
1791 * Skip nodes (k, l) if they already
1792 * have same latency as (i, j) or
1793 * their latency isn't close enough to
1794 * be considered/made the same
1796 if (t1
== t2
|| (t1
> t2
&& t1
- t2
>
1797 t1
>> lgrp_plat_probe_lt_shift
) ||
1798 (t2
> t1
&& t2
- t1
>
1799 t2
>> lgrp_plat_probe_lt_shift
))
1803 * Make latency(i, j) same as
1804 * latency(k, l), try to use latency
1805 * that has been adjusted already to get
1806 * more consistency (if possible), and
1807 * remember which latencies were
1808 * adjusted for next time
1810 if (lat_corrected
[i
][j
]) {
1812 lgrp_config(cflag
, t2
, t
);
1814 } else if (lat_corrected
[k
][l
]) {
1816 lgrp_config(cflag
, t1
, t
);
1823 lgrp_config(cflag
, t1
, t
);
1824 lgrp_config(cflag
, t2
, t
);
1828 lat_stats
->latencies
[i
][j
] =
1829 lat_stats
->latencies
[k
][l
] = t
;
1831 lat_corrected
[i
][j
] =
1832 lat_corrected
[k
][l
] = 1;
1839 * Local latencies should be same
1840 * - Find min and max local latencies
1841 * - Make all local latencies be minimum
1845 for (i
= 0; i
< lgrp_plat_node_cnt
; i
++) {
1846 if (!memnode_info
[i
].exists
)
1848 t
= lat_stats
->latencies
[i
][i
];
1851 if (min
== -1 || t
< min
)
1857 for (i
= 0; i
< lgrp_plat_node_cnt
; i
++) {
1860 if (!memnode_info
[i
].exists
)
1863 local
= lat_stats
->latencies
[i
][i
];
1868 * Track suspect probe times that aren't within
1869 * tolerance of minimum local latency and how much
1870 * probe times are corrected by
1872 if (local
- min
> min
>> lgrp_plat_probe_lt_shift
)
1873 probe_stats
->probe_suspect
[i
][i
]++;
1875 probe_stats
->probe_errors
[i
][i
] += local
- min
;
1878 * Make local latencies be minimum
1880 lgrp_config(LGRP_CONFIG_LAT_CHANGE
, i
, min
);
1881 lat_stats
->latencies
[i
][i
] = min
;
1886 * Determine max probe time again since just adjusted latencies
1888 lat_stats
->latency_max
= 0;
1889 for (i
= 0; i
< lgrp_plat_node_cnt
; i
++) {
1890 for (j
= 0; j
< lgrp_plat_node_cnt
; j
++) {
1891 if (!memnode_info
[j
].exists
)
1893 t
= lat_stats
->latencies
[i
][j
];
1894 if (t
> lat_stats
->latency_max
)
1895 lat_stats
->latency_max
= t
;
1902 * Verify following about latencies between nodes:
1904 * - Latencies should be symmetric (ie. latency(a, b) == latency(b, a))
1905 * - Local latencies same
1907 * - Number of latencies seen is reasonable
1908 * - Number of occurrences of a given latency should be more than 1
1913 * -2 Local latencies not same
1914 * -3 Local >= remote
1917 lgrp_plat_latency_verify(memnode_phys_addr_map_t
*memnode_info
,
1918 lgrp_plat_latency_stats_t
*lat_stats
)
1925 ASSERT(memnode_info
!= NULL
&& lat_stats
!= NULL
);
1928 * Nothing to do when this is an UMA machine, lgroup topology is
1929 * limited to 2 levels, or there aren't any probe times yet
1931 if (max_mem_nodes
== 1 || lgrp_topo_levels
< 2 ||
1932 lat_stats
->latencies
[0][0] == 0)
1936 * Make sure that latencies are symmetric between any two nodes
1937 * (ie. latency(node0, node1) == latency(node1, node0))
1939 for (i
= 0; i
< lgrp_plat_node_cnt
; i
++) {
1940 if (!memnode_info
[i
].exists
)
1942 for (j
= 0; j
< lgrp_plat_node_cnt
; j
++) {
1943 if (!memnode_info
[j
].exists
)
1945 t1
= lat_stats
->latencies
[i
][j
];
1946 t2
= lat_stats
->latencies
[j
][i
];
1948 if (t1
== 0 || t2
== 0 || t1
== t2
)
1956 * Local latencies should be same
1958 t1
= lat_stats
->latencies
[0][0];
1959 for (i
= 1; i
< lgrp_plat_node_cnt
; i
++) {
1960 if (!memnode_info
[i
].exists
)
1963 t2
= lat_stats
->latencies
[i
][i
];
1977 * Local latencies should be less than remote
1980 for (i
= 0; i
< lgrp_plat_node_cnt
; i
++) {
1981 for (j
= 0; j
< lgrp_plat_node_cnt
; j
++) {
1982 if (!memnode_info
[j
].exists
)
1984 t2
= lat_stats
->latencies
[i
][j
];
1985 if (i
== j
|| t2
== 0)
1999 * Platform-specific initialization
2002 lgrp_plat_main_init(void)
2009 * Print a notice that MPO is disabled when memory is interleaved
2010 * across nodes....Would do this when it is discovered, but can't
2011 * because it happens way too early during boot....
2013 if (lgrp_plat_mem_intrlv
)
2015 "MPO disabled because memory is interleaved\n");
2018 * Don't bother to do any probing if it is disabled, there is only one
2019 * node, or the height of the lgroup topology less than or equal to 2
2021 ht_limit
= lgrp_topo_ht_limit();
2022 if (!(lgrp_plat_probe_flags
& LGRP_PLAT_PROBE_ENABLE
) ||
2023 max_mem_nodes
== 1 || ht_limit
<= 2) {
2025 * Setup lgroup latencies for 2 level lgroup topology
2026 * (ie. local and remote only) if they haven't been set yet
2028 if (ht_limit
== 2 && lgrp_plat_lat_stats
.latency_min
== -1 &&
2029 lgrp_plat_lat_stats
.latency_max
== 0)
2030 lgrp_plat_2level_setup(&lgrp_plat_lat_stats
);
2034 if (lgrp_plat_probe_flags
& LGRP_PLAT_PROBE_VENDOR
) {
2036 * Should have been able to probe from CPU 0 when it was added
2037 * to lgroup hierarchy, but may not have been able to then
2038 * because it happens so early in boot that gethrtime() hasn't
2039 * been initialized. (:-(
2041 curnode
= lgrp_plat_cpu_to_node(CPU
, lgrp_plat_cpu_node
,
2042 lgrp_plat_cpu_node_nentries
);
2043 ASSERT(curnode
>= 0 && curnode
< lgrp_plat_node_cnt
);
2044 if (lgrp_plat_lat_stats
.latencies
[curnode
][curnode
] == 0)
2051 * When probing memory, use one page for every sample to determine
2052 * lgroup topology and taking multiple samples
2054 if (lgrp_plat_probe_mem_config
.probe_memsize
== 0)
2055 lgrp_plat_probe_mem_config
.probe_memsize
= PAGESIZE
*
2056 lgrp_plat_probe_nsamples
;
2059 * Map memory in each node needed for probing to determine latency
2062 for (i
= 0; i
< lgrp_plat_node_cnt
; i
++) {
2066 * Skip this node and leave its probe page NULL
2067 * if it doesn't have any memory
2070 if (!mem_node_config
[mnode
].exists
) {
2071 lgrp_plat_probe_mem_config
.probe_va
[i
] = NULL
;
2076 * Allocate one kernel virtual page
2078 lgrp_plat_probe_mem_config
.probe_va
[i
] = vmem_alloc(heap_arena
,
2079 lgrp_plat_probe_mem_config
.probe_memsize
, VM_NOSLEEP
);
2080 if (lgrp_plat_probe_mem_config
.probe_va
[i
] == NULL
) {
2082 "lgrp_plat_main_init: couldn't allocate memory");
2087 * Get PFN for first page in each node
2089 lgrp_plat_probe_mem_config
.probe_pfn
[i
] =
2090 mem_node_config
[mnode
].physbase
;
2093 * Map virtual page to first page in node
2095 hat_devload(kas
.a_hat
, lgrp_plat_probe_mem_config
.probe_va
[i
],
2096 lgrp_plat_probe_mem_config
.probe_memsize
,
2097 lgrp_plat_probe_mem_config
.probe_pfn
[i
],
2098 PROT_READ
| PROT_WRITE
| HAT_PLAT_NOCACHE
,
2099 HAT_LOAD_NOCONSIST
);
2103 * Probe from current CPU
2110 * Return the number of free, allocatable, or installed
2111 * pages in an lgroup
2112 * This is a copy of the MAX_MEM_NODES == 1 version of the routine
2113 * used when MPO is disabled (i.e. single lgroup) or this is the root lgroup
2116 lgrp_plat_mem_size_default(lgrp_handle_t lgrphand
, lgrp_mem_query_t query
)
2118 _NOTE(ARGUNUSED(lgrphand
));
2120 struct memlist
*mlist
;
2122 extern struct memlist
*phys_avail
;
2123 extern struct memlist
*phys_install
;
2126 case LGRP_MEM_SIZE_FREE
:
2127 return ((pgcnt_t
)freemem
);
2128 case LGRP_MEM_SIZE_AVAIL
:
2129 memlist_read_lock();
2130 for (mlist
= phys_avail
; mlist
; mlist
= mlist
->ml_next
)
2131 npgs
+= btop(mlist
->ml_size
);
2132 memlist_read_unlock();
2134 case LGRP_MEM_SIZE_INSTALL
:
2135 memlist_read_lock();
2136 for (mlist
= phys_install
; mlist
; mlist
= mlist
->ml_next
)
2137 npgs
+= btop(mlist
->ml_size
);
2138 memlist_read_unlock();
2141 return ((pgcnt_t
)0);
2147 * Update node to proximity domain mappings for given domain and return node ID
2150 lgrp_plat_node_domain_update(node_domain_map_t
*node_domain
, int node_cnt
,
2157 * Hash proximity domain ID into node to domain mapping table (array)
2158 * and add entry for it into first non-existent or matching entry found
2160 node
= start
= NODE_DOMAIN_HASH(domain
, node_cnt
);
2163 * Entry doesn't exist yet, so create one for this proximity
2164 * domain and return node ID which is index into mapping table.
2166 if (!node_domain
[node
].exists
) {
2167 node_domain
[node
].prox_domain
= domain
;
2169 node_domain
[node
].exists
= 1;
2174 * Entry exists for this proximity domain already, so just
2175 * return node ID (index into table).
2177 if (node_domain
[node
].prox_domain
== domain
)
2179 node
= NODE_DOMAIN_HASH(node
+ 1, node_cnt
);
2180 } while (node
!= start
);
2183 * Ran out of supported number of entries which shouldn't happen....
2185 ASSERT(node
!= start
);
2190 * Update node memory information for given proximity domain with specified
2191 * starting and ending physical address range (and return positive numbers for
2192 * success and negative ones for errors)
2195 lgrp_plat_memnode_info_update(node_domain_map_t
*node_domain
, int node_cnt
,
2196 memnode_phys_addr_map_t
*memnode_info
, int memnode_cnt
, uint64_t start
,
2197 uint64_t end
, uint32_t domain
, uint32_t device_id
)
2202 * Get node number for proximity domain
2204 node
= lgrp_plat_domain_to_node(node_domain
, node_cnt
, domain
);
2206 node
= lgrp_plat_node_domain_update(node_domain
, node_cnt
,
2213 * This function is called during boot if device_id is
2214 * ACPI_MEMNODE_DEVID_BOOT, otherwise it's called at runtime for
2215 * memory DR operations.
2217 if (device_id
!= ACPI_MEMNODE_DEVID_BOOT
) {
2218 ASSERT(lgrp_plat_max_mem_node
<= memnode_cnt
);
2220 for (mnode
= lgrp_plat_node_cnt
;
2221 mnode
< lgrp_plat_max_mem_node
; mnode
++) {
2222 if (memnode_info
[mnode
].exists
&&
2223 memnode_info
[mnode
].prox_domain
== domain
&&
2224 memnode_info
[mnode
].device_id
== device_id
) {
2225 if (btop(start
) < memnode_info
[mnode
].start
)
2226 memnode_info
[mnode
].start
= btop(start
);
2227 if (btop(end
) > memnode_info
[mnode
].end
)
2228 memnode_info
[mnode
].end
= btop(end
);
2233 if (lgrp_plat_max_mem_node
>= memnode_cnt
) {
2236 lgrp_plat_max_mem_node
++;
2237 memnode_info
[mnode
].start
= btop(start
);
2238 memnode_info
[mnode
].end
= btop(end
);
2239 memnode_info
[mnode
].prox_domain
= domain
;
2240 memnode_info
[mnode
].device_id
= device_id
;
2241 memnode_info
[mnode
].lgrphand
= node
;
2243 memnode_info
[mnode
].exists
= 1;
2249 * Create entry in table for node if it doesn't exist
2251 ASSERT(node
< memnode_cnt
);
2252 if (!memnode_info
[node
].exists
) {
2253 memnode_info
[node
].start
= btop(start
);
2254 memnode_info
[node
].end
= btop(end
);
2255 memnode_info
[node
].prox_domain
= domain
;
2256 memnode_info
[node
].device_id
= device_id
;
2257 memnode_info
[node
].lgrphand
= node
;
2259 memnode_info
[node
].exists
= 1;
2264 * Entry already exists for this proximity domain
2266 * There may be more than one SRAT memory entry for a domain, so we may
2267 * need to update existing start or end address for the node.
2269 if (memnode_info
[node
].prox_domain
== domain
) {
2270 if (btop(start
) < memnode_info
[node
].start
)
2271 memnode_info
[node
].start
= btop(start
);
2272 if (btop(end
) > memnode_info
[node
].end
)
2273 memnode_info
[node
].end
= btop(end
);
2281 * Have to sort nodes by starting physical address because plat_mnode_xcheck()
2282 * assumes and expects memnodes to be sorted in ascending order by physical
2286 lgrp_plat_node_sort(node_domain_map_t
*node_domain
, int node_cnt
,
2287 cpu_node_map_t
*cpu_node
, int cpu_count
,
2288 memnode_phys_addr_map_t
*memnode_info
)
2297 if (!lgrp_plat_node_sort_enable
|| node_cnt
<= 1 ||
2298 node_domain
== NULL
|| memnode_info
== NULL
)
2305 for (i
= 0; i
< node_cnt
- 1; i
++) {
2307 * Skip entries that don't exist
2309 if (!memnode_info
[i
].exists
)
2313 * Try to find next existing entry to compare against
2316 for (j
= i
+ 1; j
< node_cnt
; j
++) {
2317 if (memnode_info
[j
].exists
) {
2324 * Done if no more existing entries to compare against
2326 if (found
== B_FALSE
)
2330 * Not sorted if starting address of current entry is bigger
2331 * than starting address of next existing entry
2333 if (memnode_info
[i
].start
> memnode_info
[j
].start
) {
2340 * Don't need to sort if sorted already
2342 if (sorted
== B_TRUE
)
2346 * Just use bubble sort since number of nodes is small
2352 for (i
= 0; i
< n
; i
++) {
2354 * Skip entries that don't exist
2356 if (!memnode_info
[i
].exists
)
2360 * Try to find next existing entry to compare against
2363 for (j
= i
+ 1; j
<= n
; j
++) {
2364 if (memnode_info
[j
].exists
) {
2371 * Done if no more existing entries to compare against
2373 if (found
== B_FALSE
)
2376 if (memnode_info
[i
].start
> memnode_info
[j
].start
) {
2377 memnode_phys_addr_map_t save_addr
;
2378 node_domain_map_t save_node
;
2381 * Swap node to proxmity domain ID assignments
2383 bcopy(&node_domain
[i
], &save_node
,
2384 sizeof (node_domain_map_t
));
2385 bcopy(&node_domain
[j
], &node_domain
[i
],
2386 sizeof (node_domain_map_t
));
2387 bcopy(&save_node
, &node_domain
[j
],
2388 sizeof (node_domain_map_t
));
2391 * Swap node to physical memory assignments
2393 bcopy(&memnode_info
[i
], &save_addr
,
2394 sizeof (memnode_phys_addr_map_t
));
2395 bcopy(&memnode_info
[j
], &memnode_info
[i
],
2396 sizeof (memnode_phys_addr_map_t
));
2397 bcopy(&save_addr
, &memnode_info
[j
],
2398 sizeof (memnode_phys_addr_map_t
));
2402 } while (swapped
== B_TRUE
);
2405 * Check to make sure that CPUs assigned to correct node IDs now since
2406 * node to proximity domain ID assignments may have been changed above
2408 if (n
== node_cnt
- 1 || cpu_node
== NULL
|| cpu_count
< 1)
2410 for (i
= 0; i
< cpu_count
; i
++) {
2413 node
= lgrp_plat_domain_to_node(node_domain
, node_cnt
,
2414 cpu_node
[i
].prox_domain
);
2415 if (cpu_node
[i
].node
!= node
)
2416 cpu_node
[i
].node
= node
;
2423 * Return time needed to probe from current CPU to memory in given node
2426 lgrp_plat_probe_time(int to
, cpu_node_map_t
*cpu_node
, int cpu_node_nentries
,
2427 lgrp_plat_probe_mem_config_t
*probe_mem_config
,
2428 lgrp_plat_latency_stats_t
*lat_stats
, lgrp_plat_probe_stats_t
*probe_stats
)
2439 extern int use_sse_pagecopy
;
2442 * Determine ID of node containing current CPU
2444 from
= lgrp_plat_cpu_to_node(CPU
, cpu_node
, cpu_node_nentries
);
2445 ASSERT(from
>= 0 && from
< lgrp_plat_node_cnt
);
2448 * Do common work for probing main memory
2450 if (lgrp_plat_probe_flags
& LGRP_PLAT_PROBE_PGCPY
) {
2452 * Skip probing any nodes without memory and
2453 * set probe time to 0
2455 if (probe_mem_config
->probe_va
[to
] == NULL
) {
2456 lat_stats
->latencies
[from
][to
] = 0;
2461 * Invalidate caches once instead of once every sample
2462 * which should cut cost of probing by a lot
2464 probe_stats
->flush_cost
= gethrtime();
2466 probe_stats
->flush_cost
= gethrtime() -
2467 probe_stats
->flush_cost
;
2468 probe_stats
->probe_cost_total
+= probe_stats
->flush_cost
;
2472 * Probe from current CPU to given memory using specified operation
2473 * and take specified number of samples
2477 for (i
= 0; i
< lgrp_plat_probe_nsamples
; i
++) {
2478 probe_stats
->probe_cost
= gethrtime();
2481 * Can't measure probe time if gethrtime() isn't working yet
2483 if (probe_stats
->probe_cost
== 0 && gethrtime() == 0)
2486 if (lgrp_plat_probe_flags
& LGRP_PLAT_PROBE_VENDOR
) {
2488 * Measure how long it takes to read vendor ID from
2491 elapsed
= opt_probe_vendor(to
, lgrp_plat_probe_nreads
);
2494 * Measure how long it takes to copy page
2497 buf
= probe_mem_config
->probe_va
[to
] + (i
* PAGESIZE
);
2501 start
= gethrtime();
2502 if (use_sse_pagecopy
)
2503 hwblkpagecopy(buf
, buf
);
2505 bcopy(buf
, buf
, PAGESIZE
);
2507 elapsed
= end
- start
;
2512 probe_stats
->probe_cost
= gethrtime() -
2513 probe_stats
->probe_cost
;
2514 probe_stats
->probe_cost_total
+= probe_stats
->probe_cost
;
2516 if (min
== -1 || elapsed
< min
)
2523 * Update minimum and maximum probe times between
2526 if (min
< probe_stats
->probe_min
[from
][to
] ||
2527 probe_stats
->probe_min
[from
][to
] == 0)
2528 probe_stats
->probe_min
[from
][to
] = min
;
2530 if (max
> probe_stats
->probe_max
[from
][to
])
2531 probe_stats
->probe_max
[from
][to
] = max
;
2538 * Read boot property with CPU to APIC ID array, fill in CPU to node ID
2539 * mapping table with APIC ID for each CPU (if pointer to table isn't NULL),
2540 * and return number of CPU APIC IDs.
2542 * NOTE: This code assumes that CPU IDs are assigned in order that they appear
2543 * in in cpu_apicid_array boot property which is based on and follows
2544 * same ordering as processor list in ACPI MADT. If the code in
2545 * usr/src/uts/i86pc/io/pcplusmp/apic.c that reads MADT and assigns
2546 * CPU IDs ever changes, then this code will need to change too....
2549 lgrp_plat_process_cpu_apicids(cpu_node_map_t
*cpu_node
)
2552 char *boot_prop_name
= BP_CPU_APICID_ARRAY
;
2553 uint32_t *cpu_apicid_array
;
2558 * Check length of property value
2560 boot_prop_len
= BOP_GETPROPLEN(bootops
, boot_prop_name
);
2561 if (boot_prop_len
<= 0)
2565 * Calculate number of entries in array and return when the system is
2566 * not very interesting for NUMA. It's not interesting for NUMA if
2567 * system has only one CPU and doesn't support CPU hotplug.
2569 n
= boot_prop_len
/ sizeof (*cpu_apicid_array
);
2570 if (n
== 1 && !plat_dr_support_cpu())
2573 cpu_apicid_array
= (uint32_t *)BOP_ALLOC(bootops
, NULL
, boot_prop_len
,
2574 sizeof (*cpu_apicid_array
));
2576 * Get CPU to APIC ID property value
2578 if (cpu_apicid_array
== NULL
||
2579 BOP_GETPROP(bootops
, boot_prop_name
, cpu_apicid_array
) < 0)
2583 * Just return number of CPU APIC IDs if CPU to node mapping table is
2586 if (cpu_node
== NULL
) {
2587 if (plat_dr_support_cpu() && n
>= boot_ncpus
) {
2588 return (boot_ncpus
);
2595 * Fill in CPU to node ID mapping table with APIC ID for each CPU
2597 for (i
= 0; i
< n
; i
++) {
2598 /* Only add boot CPUs into the map if CPU DR is enabled. */
2599 if (plat_dr_support_cpu() && i
>= boot_ncpus
)
2601 cpu_node
[i
].exists
= 1;
2602 cpu_node
[i
].apicid
= cpu_apicid_array
[i
];
2603 cpu_node
[i
].prox_domain
= UINT32_MAX
;
2604 cpu_node
[i
].node
= UINT_MAX
;
2608 * Return number of CPUs based on number of APIC IDs
2615 * Read ACPI System Locality Information Table (SLIT) to determine how far each
2616 * NUMA node is from each other
2619 lgrp_plat_process_slit(ACPI_TABLE_SLIT
*tp
,
2620 node_domain_map_t
*node_domain
, uint_t node_cnt
,
2621 memnode_phys_addr_map_t
*memnode_info
, lgrp_plat_latency_stats_t
*lat_stats
)
2631 uint8_t *slit_entries
;
2633 if (tp
== NULL
|| !lgrp_plat_slit_enable
)
2636 if (lat_stats
== NULL
)
2639 localities
= tp
->LocalityCount
;
2641 min
= lat_stats
->latency_min
;
2642 max
= lat_stats
->latency_max
;
2645 * Fill in latency matrix based on SLIT entries
2647 slit_entries
= tp
->Entry
;
2648 for (i
= 0; i
< localities
; i
++) {
2649 src
= lgrp_plat_domain_to_node(node_domain
,
2654 for (j
= 0; j
< localities
; j
++) {
2657 dst
= lgrp_plat_domain_to_node(node_domain
,
2662 latency
= slit_entries
[(i
* localities
) + j
];
2663 lat_stats
->latencies
[src
][dst
] = latency
;
2664 if (latency
< min
|| min
== -1)
2672 * Verify that latencies/distances given in SLIT look reasonable
2674 retval
= lgrp_plat_latency_verify(memnode_info
, lat_stats
);
2678 * Reinitialize (zero) latency table since SLIT doesn't look
2681 for (i
= 0; i
< localities
; i
++) {
2682 for (j
= 0; j
< localities
; j
++)
2683 lat_stats
->latencies
[i
][j
] = 0;
2687 * Update min and max latencies seen since SLIT looks valid
2689 lat_stats
->latency_min
= min
;
2690 lat_stats
->latency_max
= max
;
2698 * Update lgrp latencies according to information returned by ACPI _SLI method.
2701 lgrp_plat_process_sli(uint32_t domain_id
, uchar_t
*sli_info
,
2702 uint32_t sli_cnt
, node_domain_map_t
*node_domain
, uint_t node_cnt
,
2703 lgrp_plat_latency_stats_t
*lat_stats
)
2710 if (lat_stats
== NULL
|| sli_info
== NULL
||
2711 sli_cnt
== 0 || domain_id
>= sli_cnt
)
2714 src
= lgrp_plat_domain_to_node(node_domain
, node_cnt
, domain_id
);
2716 src
= lgrp_plat_node_domain_update(node_domain
, node_cnt
,
2723 * Don't update latency info if topology has been flattened to 2 levels.
2725 if (lgrp_plat_topo_flatten
!= 0) {
2730 * Latency information for proximity domain is ready.
2731 * TODO: support adjusting latency information at runtime.
2733 if (lat_stats
->latencies
[src
][src
] != 0) {
2737 /* Validate latency information. */
2738 for (i
= 0; i
< sli_cnt
; i
++) {
2739 if (i
== domain_id
) {
2740 if (sli_info
[i
] != ACPI_SLIT_SELF_LATENCY
||
2741 sli_info
[sli_cnt
+ i
] != ACPI_SLIT_SELF_LATENCY
) {
2745 if (sli_info
[i
] <= ACPI_SLIT_SELF_LATENCY
||
2746 sli_info
[sli_cnt
+ i
] <= ACPI_SLIT_SELF_LATENCY
||
2747 sli_info
[i
] != sli_info
[sli_cnt
+ i
]) {
2753 min
= lat_stats
->latency_min
;
2754 max
= lat_stats
->latency_max
;
2755 for (i
= 0; i
< sli_cnt
; i
++) {
2756 dst
= lgrp_plat_domain_to_node(node_domain
, node_cnt
, i
);
2760 ASSERT(sli_info
[i
] == sli_info
[sli_cnt
+ i
]);
2762 /* Update row in latencies matrix. */
2763 latency
= sli_info
[i
];
2764 lat_stats
->latencies
[src
][dst
] = latency
;
2765 if (latency
< min
|| min
== -1)
2770 /* Update column in latencies matrix. */
2771 latency
= sli_info
[sli_cnt
+ i
];
2772 lat_stats
->latencies
[dst
][src
] = latency
;
2773 if (latency
< min
|| min
== -1)
2778 lat_stats
->latency_min
= min
;
2779 lat_stats
->latency_max
= max
;
2786 * Read ACPI System Resource Affinity Table (SRAT) to determine which CPUs
2787 * and memory are local to each other in the same NUMA node and return number
2791 lgrp_plat_process_srat(ACPI_TABLE_SRAT
*tp
, ACPI_TABLE_MSCT
*mp
,
2792 uint32_t *prox_domain_min
, node_domain_map_t
*node_domain
,
2793 cpu_node_map_t
*cpu_node
, int cpu_count
,
2794 memnode_phys_addr_map_t
*memnode_info
)
2796 ACPI_SUBTABLE_HEADER
*item
, *srat_end
;
2799 int proc_entry_count
;
2803 * Nothing to do when no SRAT or disabled
2805 if (tp
== NULL
|| !lgrp_plat_srat_enable
)
2809 * Try to get domain information from MSCT table.
2810 * ACPI4.0: OSPM will use information provided by the MSCT only
2811 * when the System Resource Affinity Table (SRAT) exists.
2813 node_cnt
= lgrp_plat_msct_domains(mp
, prox_domain_min
);
2814 if (node_cnt
<= 0) {
2816 * Determine number of nodes by counting number of proximity
2819 node_cnt
= lgrp_plat_srat_domains(tp
, prox_domain_min
);
2822 * Return if number of nodes is 1 or less since don't need to read SRAT.
2826 else if (node_cnt
<= 0)
2830 * Walk through SRAT, examining each CPU and memory entry to determine
2831 * which CPUs and memory belong to which node.
2833 item
= (ACPI_SUBTABLE_HEADER
*)((uintptr_t)tp
+ sizeof (*tp
));
2834 srat_end
= (ACPI_SUBTABLE_HEADER
*)(tp
->Header
.Length
+ (uintptr_t)tp
);
2835 proc_entry_count
= 0;
2836 while (item
< srat_end
) {
2843 switch (item
->Type
) {
2844 case ACPI_SRAT_TYPE_CPU_AFFINITY
: { /* CPU entry */
2845 ACPI_SRAT_CPU_AFFINITY
*cpu
=
2846 (ACPI_SRAT_CPU_AFFINITY
*) item
;
2848 if (!(cpu
->Flags
& ACPI_SRAT_CPU_ENABLED
) ||
2853 * Calculate domain (node) ID and fill in APIC ID to
2854 * domain/node mapping table
2856 domain
= cpu
->ProximityDomainLo
;
2857 for (i
= 0; i
< 3; i
++) {
2858 domain
+= cpu
->ProximityDomainHi
[i
] <<
2861 apic_id
= cpu
->ApicId
;
2863 rc
= lgrp_plat_cpu_node_update(node_domain
, node_cnt
,
2864 cpu_node
, cpu_count
, apic_id
, domain
);
2871 case ACPI_SRAT_TYPE_MEMORY_AFFINITY
: { /* memory entry */
2872 ACPI_SRAT_MEM_AFFINITY
*mem
=
2873 (ACPI_SRAT_MEM_AFFINITY
*)item
;
2875 if (!(mem
->Flags
& ACPI_SRAT_MEM_ENABLED
) ||
2876 memnode_info
== NULL
)
2880 * Get domain (node) ID and fill in domain/node
2881 * to memory mapping table
2883 domain
= mem
->ProximityDomain
;
2884 start
= mem
->BaseAddress
;
2885 length
= mem
->Length
;
2886 end
= start
+ length
- 1;
2889 * According to ACPI 4.0, both ENABLE and HOTPLUG flags
2890 * may be set for memory address range entries in SRAT
2891 * table which are reserved for memory hot plug.
2892 * We intersect memory address ranges in SRAT table
2893 * with memory ranges in physinstalled to filter out
2894 * memory address ranges reserved for hot plug.
2896 if (mem
->Flags
& ACPI_SRAT_MEM_HOT_PLUGGABLE
) {
2897 uint64_t rstart
= UINT64_MAX
;
2900 extern struct bootops
*bootops
;
2902 memlist_read_lock();
2903 for (ml
= bootops
->boot_mem
->physinstalled
;
2904 ml
; ml
= ml
->ml_next
) {
2905 uint64_t tstart
= ml
->ml_address
;
2908 tend
= ml
->ml_address
+ ml
->ml_size
;
2909 if (tstart
> end
|| tend
< start
)
2913 if (rstart
> tstart
)
2920 memlist_read_unlock();
2923 /* Skip this entry if no memory installed. */
2928 if (lgrp_plat_memnode_info_update(node_domain
,
2929 node_cnt
, memnode_info
, node_cnt
,
2930 start
, end
, domain
, ACPI_MEMNODE_DEVID_BOOT
) < 0)
2934 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY
: { /* x2apic CPU */
2935 ACPI_SRAT_X2APIC_CPU_AFFINITY
*x2cpu
=
2936 (ACPI_SRAT_X2APIC_CPU_AFFINITY
*) item
;
2938 if (!(x2cpu
->Flags
& ACPI_SRAT_CPU_ENABLED
) ||
2943 * Calculate domain (node) ID and fill in APIC ID to
2944 * domain/node mapping table
2946 domain
= x2cpu
->ProximityDomain
;
2947 apic_id
= x2cpu
->ApicId
;
2949 rc
= lgrp_plat_cpu_node_update(node_domain
, node_cnt
,
2950 cpu_node
, cpu_count
, apic_id
, domain
);
2961 item
= (ACPI_SUBTABLE_HEADER
*)((uintptr_t)item
+ item
->Length
);
2965 * Should have seen at least as many SRAT processor entries as CPUs
2967 if (proc_entry_count
< cpu_count
)
2971 * Need to sort nodes by starting physical address since VM system
2972 * assumes and expects memnodes to be sorted in ascending order by
2975 lgrp_plat_node_sort(node_domain
, node_cnt
, cpu_node
, cpu_count
,
2983 * Allocate permanent memory for any temporary memory that we needed to
2984 * allocate using BOP_ALLOC() before kmem_alloc() and VM system were
2985 * initialized and copy everything from temporary to permanent memory since
2986 * temporary boot memory will eventually be released during boot
2989 lgrp_plat_release_bootstrap(void)
2994 if (lgrp_plat_cpu_node_nentries
> 0) {
2995 size
= lgrp_plat_cpu_node_nentries
* sizeof (cpu_node_map_t
);
2996 buf
= kmem_alloc(size
, KM_SLEEP
);
2997 bcopy(lgrp_plat_cpu_node
, buf
, size
);
2998 lgrp_plat_cpu_node
= buf
;
3004 * Return number of proximity domains given in ACPI SRAT
3007 lgrp_plat_srat_domains(ACPI_TABLE_SRAT
*tp
, uint32_t *prox_domain_min
)
3010 uint32_t domain_min
;
3011 ACPI_SUBTABLE_HEADER
*item
, *end
;
3013 node_domain_map_t node_domain
[MAX_NODES
];
3016 if (tp
== NULL
|| !lgrp_plat_srat_enable
)
3020 * Walk through SRAT to find minimum proximity domain ID
3022 domain_min
= UINT32_MAX
;
3023 item
= (ACPI_SUBTABLE_HEADER
*)((uintptr_t)tp
+ sizeof (*tp
));
3024 end
= (ACPI_SUBTABLE_HEADER
*)(tp
->Header
.Length
+ (uintptr_t)tp
);
3025 while (item
< end
) {
3028 switch (item
->Type
) {
3029 case ACPI_SRAT_TYPE_CPU_AFFINITY
: { /* CPU entry */
3030 ACPI_SRAT_CPU_AFFINITY
*cpu
=
3031 (ACPI_SRAT_CPU_AFFINITY
*) item
;
3033 if (!(cpu
->Flags
& ACPI_SRAT_CPU_ENABLED
)) {
3034 item
= (ACPI_SUBTABLE_HEADER
*)
3035 ((uintptr_t)item
+ item
->Length
);
3038 domain
= cpu
->ProximityDomainLo
;
3039 for (i
= 0; i
< 3; i
++) {
3040 domain
+= cpu
->ProximityDomainHi
[i
] <<
3045 case ACPI_SRAT_TYPE_MEMORY_AFFINITY
: { /* memory entry */
3046 ACPI_SRAT_MEM_AFFINITY
*mem
=
3047 (ACPI_SRAT_MEM_AFFINITY
*)item
;
3049 if (!(mem
->Flags
& ACPI_SRAT_MEM_ENABLED
)) {
3050 item
= (ACPI_SUBTABLE_HEADER
*)
3051 ((uintptr_t)item
+ item
->Length
);
3054 domain
= mem
->ProximityDomain
;
3057 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY
: { /* x2apic CPU */
3058 ACPI_SRAT_X2APIC_CPU_AFFINITY
*x2cpu
=
3059 (ACPI_SRAT_X2APIC_CPU_AFFINITY
*) item
;
3061 if (!(x2cpu
->Flags
& ACPI_SRAT_CPU_ENABLED
)) {
3062 item
= (ACPI_SUBTABLE_HEADER
*)
3063 ((uintptr_t)item
+ item
->Length
);
3066 domain
= x2cpu
->ProximityDomain
;
3070 item
= (ACPI_SUBTABLE_HEADER
*)((uintptr_t)item
+
3076 * Keep track of minimum proximity domain ID
3078 if (domain
< domain_min
)
3079 domain_min
= domain
;
3081 item
= (ACPI_SUBTABLE_HEADER
*)((uintptr_t)item
+ item
->Length
);
3083 if (lgrp_plat_domain_min_enable
&& prox_domain_min
!= NULL
)
3084 *prox_domain_min
= domain_min
;
3087 * Walk through SRAT, examining each CPU and memory entry to determine
3088 * proximity domain ID for each.
3091 item
= (ACPI_SUBTABLE_HEADER
*)((uintptr_t)tp
+ sizeof (*tp
));
3092 end
= (ACPI_SUBTABLE_HEADER
*)(tp
->Header
.Length
+ (uintptr_t)tp
);
3093 bzero(node_domain
, MAX_NODES
* sizeof (node_domain_map_t
));
3094 while (item
< end
) {
3099 switch (item
->Type
) {
3100 case ACPI_SRAT_TYPE_CPU_AFFINITY
: { /* CPU entry */
3101 ACPI_SRAT_CPU_AFFINITY
*cpu
=
3102 (ACPI_SRAT_CPU_AFFINITY
*) item
;
3104 if (!(cpu
->Flags
& ACPI_SRAT_CPU_ENABLED
)) {
3105 item
= (ACPI_SUBTABLE_HEADER
*)
3106 ((uintptr_t)item
+ item
->Length
);
3109 domain
= cpu
->ProximityDomainLo
;
3110 for (i
= 0; i
< 3; i
++) {
3111 domain
+= cpu
->ProximityDomainHi
[i
] <<
3116 case ACPI_SRAT_TYPE_MEMORY_AFFINITY
: { /* memory entry */
3117 ACPI_SRAT_MEM_AFFINITY
*mem
=
3118 (ACPI_SRAT_MEM_AFFINITY
*)item
;
3120 if (!(mem
->Flags
& ACPI_SRAT_MEM_ENABLED
)) {
3121 item
= (ACPI_SUBTABLE_HEADER
*)
3122 ((uintptr_t)item
+ item
->Length
);
3125 domain
= mem
->ProximityDomain
;
3128 case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY
: { /* x2apic CPU */
3129 ACPI_SRAT_X2APIC_CPU_AFFINITY
*x2cpu
=
3130 (ACPI_SRAT_X2APIC_CPU_AFFINITY
*) item
;
3132 if (!(x2cpu
->Flags
& ACPI_SRAT_CPU_ENABLED
)) {
3133 item
= (ACPI_SUBTABLE_HEADER
*)
3134 ((uintptr_t)item
+ item
->Length
);
3137 domain
= x2cpu
->ProximityDomain
;
3141 item
= (ACPI_SUBTABLE_HEADER
*)((uintptr_t)item
+
3147 * Count and keep track of which proximity domain IDs seen
3149 start
= i
= domain
% MAX_NODES
;
3153 * Create entry for proximity domain and increment
3154 * count when no entry exists where proximity domain
3157 if (!node_domain
[i
].exists
) {
3158 node_domain
[i
].exists
= 1;
3159 node_domain
[i
].prox_domain
= domain
;
3166 * Nothing to do when proximity domain seen already
3167 * and its entry exists
3169 if (node_domain
[i
].prox_domain
== domain
) {
3175 * Entry exists where proximity domain hashed, but for
3176 * different proximity domain so keep search for empty
3177 * slot to put it or matching entry whichever comes
3180 i
= (i
+ 1) % MAX_NODES
;
3181 } while (i
!= start
);
3184 * Didn't find empty or matching entry which means have more
3185 * proximity domains than supported nodes (:-(
3187 ASSERT(overflow
!= B_TRUE
);
3188 if (overflow
== B_TRUE
)
3191 item
= (ACPI_SUBTABLE_HEADER
*)((uintptr_t)item
+ item
->Length
);
3193 return (domain_cnt
);
3198 * Parse domain information in ACPI Maximum System Capability Table (MSCT).
3199 * MSCT table has been verified in function process_msct() in fakebop.c.
3202 lgrp_plat_msct_domains(ACPI_TABLE_MSCT
*tp
, uint32_t *prox_domain_min
)
3205 uint32_t proxmin
= UINT32_MAX
;
3206 ACPI_MSCT_PROXIMITY
*item
, *end
;
3208 if (tp
== NULL
|| lgrp_plat_msct_enable
== 0)
3211 if (tp
->MaxProximityDomains
>= MAX_NODES
) {
3213 "?lgrp: too many proximity domains (%d), max %d supported, "
3214 "disable support of CPU/memory DR operations.",
3215 tp
->MaxProximityDomains
+ 1, MAX_NODES
);
3216 plat_dr_disable_cpu();
3217 plat_dr_disable_memory();
3221 if (prox_domain_min
!= NULL
) {
3222 end
= (void *)(tp
->Header
.Length
+ (uintptr_t)tp
);
3223 for (item
= (void *)((uintptr_t)tp
+
3224 tp
->ProximityOffset
); item
< end
;
3225 item
= (void *)(item
->Length
+ (uintptr_t)item
)) {
3226 if (item
->RangeStart
< proxmin
) {
3227 proxmin
= item
->RangeStart
;
3230 last_seen
= item
->RangeEnd
- item
->RangeStart
+ 1;
3232 * Break out if all proximity domains have been
3233 * processed. Some BIOSes may have unused items
3234 * at the end of MSCT table.
3236 if (last_seen
> tp
->MaxProximityDomains
) {
3240 *prox_domain_min
= proxmin
;
3243 return (tp
->MaxProximityDomains
+ 1);
3248 * Set lgroup latencies for 2 level lgroup topology
3251 lgrp_plat_2level_setup(lgrp_plat_latency_stats_t
*lat_stats
)
3255 ASSERT(lat_stats
!= NULL
);
3257 if (lgrp_plat_node_cnt
>= 4)
3259 "MPO only optimizing for local and remote\n");
3260 for (i
= 0; i
< lgrp_plat_node_cnt
; i
++) {
3261 for (j
= 0; j
< lgrp_plat_node_cnt
; j
++) {
3263 lat_stats
->latencies
[i
][j
] = 2;
3265 lat_stats
->latencies
[i
][j
] = 3;
3268 lat_stats
->latency_min
= 2;
3269 lat_stats
->latency_max
= 3;
3270 /* TODO: check it. */
3271 lgrp_config(LGRP_CONFIG_FLATTEN
, 2, 0);
3272 lgrp_plat_topo_flatten
= 1;
3277 * The following Opteron specific constants, macros, types, and routines define
3278 * PCI configuration space registers and how to read them to determine the NUMA
3279 * configuration of *supported* Opteron processors. They provide the same
3280 * information that may be gotten from the ACPI System Resource Affinity Table
3281 * (SRAT) if it exists on the machine of interest.
3283 * The AMD BIOS and Kernel Developer's Guide (BKDG) for the processor family
3284 * of interest describes all of these registers and their contents. The main
3285 * registers used by this code to determine the NUMA configuration of the
3286 * machine are the node ID register for the number of NUMA nodes and the DRAM
3287 * address map registers for the physical address range of each node.
3289 * NOTE: The format and how to determine the NUMA configuration using PCI
3290 * config space registers may change or may not be supported in future
3291 * Opteron processor families.
3295 * How many bits to shift Opteron DRAM Address Map base and limit registers
3296 * to get actual value
3298 #define OPT_DRAMADDR_HI_LSHIFT_ADDR 40 /* shift left for address */
3299 #define OPT_DRAMADDR_LO_LSHIFT_ADDR 8 /* shift left for address */
3301 #define OPT_DRAMADDR_HI_MASK_ADDR 0x000000FF /* address bits 47-40 */
3302 #define OPT_DRAMADDR_LO_MASK_ADDR 0xFFFF0000 /* address bits 39-24 */
3304 #define OPT_DRAMADDR_LO_MASK_OFF 0xFFFFFF /* offset for address */
3307 * Macros to derive addresses from Opteron DRAM Address Map registers
3309 #define OPT_DRAMADDR_HI(reg) \
3310 (((u_longlong_t)reg & OPT_DRAMADDR_HI_MASK_ADDR) << \
3311 OPT_DRAMADDR_HI_LSHIFT_ADDR)
3313 #define OPT_DRAMADDR_LO(reg) \
3314 (((u_longlong_t)reg & OPT_DRAMADDR_LO_MASK_ADDR) << \
3315 OPT_DRAMADDR_LO_LSHIFT_ADDR)
3317 #define OPT_DRAMADDR(high, low) \
3318 (OPT_DRAMADDR_HI(high) | OPT_DRAMADDR_LO(low))
3321 * Bit masks defining what's in Opteron DRAM Address Map base register
3323 #define OPT_DRAMBASE_LO_MASK_RE 0x1 /* read enable */
3324 #define OPT_DRAMBASE_LO_MASK_WE 0x2 /* write enable */
3325 #define OPT_DRAMBASE_LO_MASK_INTRLVEN 0x700 /* interleave */
3328 * Bit masks defining what's in Opteron DRAM Address Map limit register
3330 #define OPT_DRAMLIMIT_LO_MASK_DSTNODE 0x7 /* destination node */
3331 #define OPT_DRAMLIMIT_LO_MASK_INTRLVSEL 0x700 /* interleave select */
3335 * Opteron Node ID register in PCI configuration space contains
3336 * number of nodes in system, etc. for Opteron K8. The following
3337 * constants and macros define its contents, structure, and access.
3341 * Bit masks defining what's in Opteron Node ID register
3343 #define OPT_NODE_MASK_ID 0x7 /* node ID */
3344 #define OPT_NODE_MASK_CNT 0x70 /* node count */
3345 #define OPT_NODE_MASK_IONODE 0x700 /* Hypertransport I/O hub node ID */
3346 #define OPT_NODE_MASK_LCKNODE 0x7000 /* lock controller node ID */
3347 #define OPT_NODE_MASK_CPUCNT 0xF0000 /* CPUs in system (0 means 1 CPU) */
3350 * How many bits in Opteron Node ID register to shift right to get actual value
3352 #define OPT_NODE_RSHIFT_CNT 0x4 /* shift right for node count value */
3355 * Macros to get values from Opteron Node ID register
3357 #define OPT_NODE_CNT(reg) \
3358 ((reg & OPT_NODE_MASK_CNT) >> OPT_NODE_RSHIFT_CNT)
3361 * Macro to setup PCI Extended Configuration Space (ECS) address to give to
3362 * "in/out" instructions
3364 * NOTE: Should only be used in lgrp_plat_init() before MMIO setup because any
3365 * other uses should just do MMIO to access PCI ECS.
3366 * Must enable special bit in Northbridge Configuration Register on
3367 * Greyhound for extended CF8 space access to be able to access PCI ECS
3368 * using "in/out" instructions and restore special bit after done
3369 * accessing PCI ECS.
3371 #define OPT_PCI_ECS_ADDR(bus, device, function, reg) \
3372 (PCI_CONE | (((bus) & 0xff) << 16) | (((device & 0x1f)) << 11) | \
3373 (((function) & 0x7) << 8) | ((reg) & 0xfc) | \
3374 ((((reg) >> 8) & 0xf) << 24))
3377 * PCI configuration space registers accessed by specifying
3378 * a bus, device, function, and offset. The following constants
3379 * define the values needed to access Opteron K8 configuration
3380 * info to determine its node topology
3383 #define OPT_PCS_BUS_CONFIG 0 /* Hypertransport config space bus */
3386 * Opteron PCI configuration space register function values
3388 #define OPT_PCS_FUNC_HT 0 /* Hypertransport configuration */
3389 #define OPT_PCS_FUNC_ADDRMAP 1 /* Address map configuration */
3390 #define OPT_PCS_FUNC_DRAM 2 /* DRAM configuration */
3391 #define OPT_PCS_FUNC_MISC 3 /* Miscellaneous configuration */
3394 * PCI Configuration Space register offsets
3396 #define OPT_PCS_OFF_VENDOR 0x0 /* device/vendor ID register */
3397 #define OPT_PCS_OFF_DRAMBASE_HI 0x140 /* DRAM Base register (node 0) */
3398 #define OPT_PCS_OFF_DRAMBASE_LO 0x40 /* DRAM Base register (node 0) */
3399 #define OPT_PCS_OFF_NODEID 0x60 /* Node ID register */
3402 * Opteron PCI Configuration Space device IDs for nodes
3404 #define OPT_PCS_DEV_NODE0 24 /* device number for node 0 */
3408 * Opteron DRAM address map gives base and limit for physical memory in a node
3410 typedef struct opt_dram_addr_map
{
3415 } opt_dram_addr_map_t
;
3419 * Supported AMD processor families
3421 #define AMD_FAMILY_HAMMER 15
3422 #define AMD_FAMILY_GREYHOUND 16
3425 * Whether to have is_opteron() return 1 even when processor isn't supported
3427 uint_t is_opteron_override
= 0;
3430 * AMD processor family for current CPU
3432 uint_t opt_family
= 0;
3436 * Determine whether we're running on a supported AMD Opteron since reading
3437 * node count and DRAM address map registers may have different format or
3438 * may not be supported across processor families
3444 if (x86_vendor
!= X86_VENDOR_AMD
)
3447 opt_family
= cpuid_getfamily(CPU
);
3448 if (opt_family
== AMD_FAMILY_HAMMER
||
3449 opt_family
== AMD_FAMILY_GREYHOUND
|| is_opteron_override
)
3457 * Determine NUMA configuration for Opteron from registers that live in PCI
3458 * configuration space
3461 opt_get_numa_config(uint_t
*node_cnt
, int *mem_intrlv
,
3462 memnode_phys_addr_map_t
*memnode_info
)
3466 struct opt_dram_addr_map dram_map
[MAX_NODES
];
3468 uint_t node_info
[MAX_NODES
];
3471 uint64_t nb_cfg_reg
;
3474 * Read configuration registers from PCI configuration space to
3475 * determine node information, which memory is in each node, etc.
3477 * Write to PCI configuration space address register to specify
3478 * which configuration register to read and read/write PCI
3479 * configuration space data register to get/set contents
3481 bus
= OPT_PCS_BUS_CONFIG
;
3482 dev
= OPT_PCS_DEV_NODE0
;
3483 off_hi
= OPT_PCS_OFF_DRAMBASE_HI
;
3484 off_lo
= OPT_PCS_OFF_DRAMBASE_LO
;
3487 * Read node ID register for node 0 to get node count
3489 node_info
[0] = pci_getl_func(bus
, dev
, OPT_PCS_FUNC_HT
,
3490 OPT_PCS_OFF_NODEID
);
3491 *node_cnt
= OPT_NODE_CNT(node_info
[0]) + 1;
3494 * If number of nodes is more than maximum supported, then set node
3495 * count to 1 and treat system as UMA instead of NUMA.
3497 if (*node_cnt
> MAX_NODES
) {
3503 * For Greyhound, PCI Extended Configuration Space must be enabled to
3504 * read high DRAM address map base and limit registers
3506 if (opt_family
== AMD_FAMILY_GREYHOUND
) {
3507 nb_cfg_reg
= rdmsr(MSR_AMD_NB_CFG
);
3508 if ((nb_cfg_reg
& AMD_GH_NB_CFG_EN_ECS
) == 0)
3509 wrmsr(MSR_AMD_NB_CFG
,
3510 nb_cfg_reg
| AMD_GH_NB_CFG_EN_ECS
);
3513 for (node
= 0; node
< *node_cnt
; node
++) {
3520 * Read node ID register (except for node 0 which we just read)
3523 node_info
[node
] = pci_getl_func(bus
, dev
,
3524 OPT_PCS_FUNC_HT
, OPT_PCS_OFF_NODEID
);
3528 * Read DRAM base and limit registers which specify
3529 * physical memory range of each node
3531 if (opt_family
!= AMD_FAMILY_GREYHOUND
)
3534 outl(PCI_CONFADD
, OPT_PCI_ECS_ADDR(bus
, dev
,
3535 OPT_PCS_FUNC_ADDRMAP
, off_hi
));
3536 base_hi
= dram_map
[node
].base_hi
=
3539 base_lo
= dram_map
[node
].base_lo
= pci_getl_func(bus
, dev
,
3540 OPT_PCS_FUNC_ADDRMAP
, off_lo
);
3542 if ((dram_map
[node
].base_lo
& OPT_DRAMBASE_LO_MASK_INTRLVEN
) &&
3544 *mem_intrlv
= *mem_intrlv
+ 1;
3546 off_hi
+= 4; /* high limit register offset */
3547 if (opt_family
!= AMD_FAMILY_GREYHOUND
)
3550 outl(PCI_CONFADD
, OPT_PCI_ECS_ADDR(bus
, dev
,
3551 OPT_PCS_FUNC_ADDRMAP
, off_hi
));
3552 limit_hi
= dram_map
[node
].limit_hi
=
3556 off_lo
+= 4; /* low limit register offset */
3557 limit_lo
= dram_map
[node
].limit_lo
= pci_getl_func(bus
,
3558 dev
, OPT_PCS_FUNC_ADDRMAP
, off_lo
);
3561 * Increment device number to next node and register offsets
3562 * for DRAM base register of next node
3569 * Both read and write enable bits must be enabled in DRAM
3570 * address map base register for physical memory to exist in
3573 if ((base_lo
& OPT_DRAMBASE_LO_MASK_RE
) == 0 ||
3574 (base_lo
& OPT_DRAMBASE_LO_MASK_WE
) == 0) {
3576 * Mark node memory as non-existent and set start and
3577 * end addresses to be same in memnode_info[]
3579 memnode_info
[node
].exists
= 0;
3580 memnode_info
[node
].start
= memnode_info
[node
].end
=
3586 * Mark node memory as existing and remember physical address
3587 * range of each node for use later
3589 memnode_info
[node
].exists
= 1;
3591 memnode_info
[node
].start
= btop(OPT_DRAMADDR(base_hi
, base_lo
));
3593 memnode_info
[node
].end
= btop(OPT_DRAMADDR(limit_hi
, limit_lo
) |
3594 OPT_DRAMADDR_LO_MASK_OFF
);
3598 * Restore PCI Extended Configuration Space enable bit
3600 if (opt_family
== AMD_FAMILY_GREYHOUND
) {
3601 if ((nb_cfg_reg
& AMD_GH_NB_CFG_EN_ECS
) == 0)
3602 wrmsr(MSR_AMD_NB_CFG
, nb_cfg_reg
);
3608 * Return average amount of time to read vendor ID register on Northbridge
3609 * N times on specified destination node from current CPU
3612 opt_probe_vendor(int dest_node
, int nreads
)
3616 /* LINTED: set but not used in function */
3617 volatile uint_t dev_vendor
;
3623 dev
= OPT_PCS_DEV_NODE0
+ dest_node
;
3626 outl(PCI_CONFADD
, PCI_CADDR1(0, dev
, OPT_PCS_FUNC_DRAM
,
3627 OPT_PCS_OFF_VENDOR
));
3628 start
= gethrtime();
3629 for (cnt
= 0; cnt
< nreads
; cnt
++)
3630 dev_vendor
= inl(PCI_CONFDATA
);
3632 elapsed
= (end
- start
) / nreads
;