Revert "kqueue: Return value of knote_release is no longer useful."
[dragonfly.git] / sys / kern / subr_cpu_topology.c
blobd5babeddafb5c557adab9e2773da93e5baa6ba37
1 /*
2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/sbuf.h>
38 #include <sys/cpu_topology.h>
40 #include <machine/smp.h>
42 #ifndef NAPICID
43 #define NAPICID 256
44 #endif
46 #define INDENT_BUF_SIZE LEVEL_NO*3
47 #define INVALID_ID -1
49 /* Per-cpu sysctl nodes and info */
50 struct per_cpu_sysctl_info {
51 struct sysctl_ctx_list sysctl_ctx;
52 struct sysctl_oid *sysctl_tree;
53 char cpu_name[32];
54 int physical_id;
55 int core_id;
56 char physical_siblings[8*MAXCPU];
57 char core_siblings[8*MAXCPU];
59 typedef struct per_cpu_sysctl_info per_cpu_sysctl_info_t;
61 static cpu_node_t cpu_topology_nodes[MAXCPU]; /* Memory for topology */
62 static cpu_node_t *cpu_root_node; /* Root node pointer */
64 static struct sysctl_ctx_list cpu_topology_sysctl_ctx;
65 static struct sysctl_oid *cpu_topology_sysctl_tree;
66 static char cpu_topology_members[8*MAXCPU];
67 static per_cpu_sysctl_info_t *pcpu_sysctl;
68 static void sbuf_print_cpuset(struct sbuf *sb, cpumask_t *mask);
70 int cpu_topology_levels_number = 1;
71 cpu_node_t *root_cpu_node;
73 MALLOC_DEFINE(M_PCPUSYS, "pcpusys", "pcpu sysctl topology");
76 /* Get the next valid apicid starting
77 * from current apicid (curr_apicid
79 static int
80 get_next_valid_apicid(int curr_apicid)
82 int next_apicid = curr_apicid;
83 do {
84 next_apicid++;
86 while(get_cpuid_from_apicid(next_apicid) == -1 &&
87 next_apicid < NAPICID);
88 if (next_apicid == NAPICID) {
89 kprintf("Warning: No next valid APICID found. Returning -1\n");
90 return -1;
92 return next_apicid;
95 /* Generic topology tree. The parameters have the following meaning:
96 * - children_no_per_level : the number of children on each level
97 * - level_types : the type of the level (THREAD, CORE, CHIP, etc)
98 * - cur_level : the current level of the tree
99 * - node : the current node
100 * - last_free_node : the last free node in the global array.
101 * - cpuid : basicly this are the ids of the leafs
103 static void
104 build_topology_tree(int *children_no_per_level,
105 uint8_t *level_types,
106 int cur_level,
107 cpu_node_t *node,
108 cpu_node_t **last_free_node,
109 int *apicid)
111 int i;
113 node->child_no = children_no_per_level[cur_level];
114 node->type = level_types[cur_level];
115 CPUMASK_ASSZERO(node->members);
116 node->compute_unit_id = -1;
118 if (node->child_no == 0) {
119 *apicid = get_next_valid_apicid(*apicid);
120 CPUMASK_ASSBIT(node->members, get_cpuid_from_apicid(*apicid));
121 return;
124 if (node->parent_node == NULL)
125 root_cpu_node = node;
127 for (i = 0; i < node->child_no; i++) {
128 node->child_node[i] = *last_free_node;
129 (*last_free_node)++;
131 node->child_node[i]->parent_node = node;
133 build_topology_tree(children_no_per_level,
134 level_types,
135 cur_level + 1,
136 node->child_node[i],
137 last_free_node,
138 apicid);
140 CPUMASK_ORMASK(node->members, node->child_node[i]->members);
144 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
145 static void
146 migrate_elements(cpu_node_t **a, int n, int pos)
148 int i;
150 for (i = pos; i < n - 1 ; i++) {
151 a[i] = a[i+1];
153 a[i] = NULL;
155 #endif
157 /* Build CPU topology. The detection is made by comparing the
158 * chip, core and logical IDs of each CPU with the IDs of the
159 * BSP. When we found a match, at that level the CPUs are siblings.
161 static void
162 build_cpu_topology(void)
164 detect_cpu_topology();
165 int i;
166 int BSPID = 0;
167 int threads_per_core = 0;
168 int cores_per_chip = 0;
169 int chips_per_package = 0;
170 int children_no_per_level[LEVEL_NO];
171 uint8_t level_types[LEVEL_NO];
172 int apicid = -1;
174 cpu_node_t *root = &cpu_topology_nodes[0];
175 cpu_node_t *last_free_node = root + 1;
177 /* Assume that the topology is uniform.
178 * Find the number of siblings within chip
179 * and witin core to build up the topology
181 for (i = 0; i < ncpus; i++) {
182 cpumask_t mask;
184 CPUMASK_ASSBIT(mask, i);
186 if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
187 continue;
189 if (get_chip_ID(BSPID) == get_chip_ID(i))
190 cores_per_chip++;
191 else
192 continue;
194 if (get_core_number_within_chip(BSPID) ==
195 get_core_number_within_chip(i))
196 threads_per_core++;
199 cores_per_chip /= threads_per_core;
200 chips_per_package = ncpus / (cores_per_chip * threads_per_core);
202 if (bootverbose)
203 kprintf("CPU Topology: cores_per_chip: %d; threads_per_core: %d; chips_per_package: %d;\n",
204 cores_per_chip, threads_per_core, chips_per_package);
206 if (threads_per_core > 1) { /* HT available - 4 levels */
208 children_no_per_level[0] = chips_per_package;
209 children_no_per_level[1] = cores_per_chip;
210 children_no_per_level[2] = threads_per_core;
211 children_no_per_level[3] = 0;
213 level_types[0] = PACKAGE_LEVEL;
214 level_types[1] = CHIP_LEVEL;
215 level_types[2] = CORE_LEVEL;
216 level_types[3] = THREAD_LEVEL;
218 build_topology_tree(children_no_per_level,
219 level_types,
221 root,
222 &last_free_node,
223 &apicid);
225 cpu_topology_levels_number = 4;
227 } else if (cores_per_chip > 1) { /* No HT available - 3 levels */
229 children_no_per_level[0] = chips_per_package;
230 children_no_per_level[1] = cores_per_chip;
231 children_no_per_level[2] = 0;
233 level_types[0] = PACKAGE_LEVEL;
234 level_types[1] = CHIP_LEVEL;
235 level_types[2] = CORE_LEVEL;
237 build_topology_tree(children_no_per_level,
238 level_types,
240 root,
241 &last_free_node,
242 &apicid);
244 cpu_topology_levels_number = 3;
246 } else { /* No HT and no Multi-Core - 2 levels */
248 children_no_per_level[0] = chips_per_package;
249 children_no_per_level[1] = 0;
251 level_types[0] = PACKAGE_LEVEL;
252 level_types[1] = CHIP_LEVEL;
254 build_topology_tree(children_no_per_level,
255 level_types,
257 root,
258 &last_free_node,
259 &apicid);
261 cpu_topology_levels_number = 2;
265 cpu_root_node = root;
268 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
269 if (fix_amd_topology() == 0) {
270 int visited[MAXCPU], i, j, pos, cpuid;
271 cpu_node_t *leaf, *parent;
273 bzero(visited, MAXCPU * sizeof(int));
275 for (i = 0; i < ncpus; i++) {
276 if (visited[i] == 0) {
277 pos = 0;
278 visited[i] = 1;
279 leaf = get_cpu_node_by_cpuid(i);
281 if (leaf->type == CORE_LEVEL) {
282 parent = leaf->parent_node;
284 last_free_node->child_node[0] = leaf;
285 last_free_node->child_no = 1;
286 last_free_node->members = leaf->members;
287 last_free_node->compute_unit_id = leaf->compute_unit_id;
288 last_free_node->parent_node = parent;
289 last_free_node->type = CORE_LEVEL;
292 for (j = 0; j < parent->child_no; j++) {
293 if (parent->child_node[j] != leaf) {
295 cpuid = BSFCPUMASK(parent->child_node[j]->members);
296 if (visited[cpuid] == 0 &&
297 parent->child_node[j]->compute_unit_id == leaf->compute_unit_id) {
299 last_free_node->child_node[last_free_node->child_no] = parent->child_node[j];
300 last_free_node->child_no++;
301 CPUMASK_ORMASK(last_free_node->members, parent->child_node[j]->members);
303 parent->child_node[j]->type = THREAD_LEVEL;
304 parent->child_node[j]->parent_node = last_free_node;
305 visited[cpuid] = 1;
307 migrate_elements(parent->child_node, parent->child_no, j);
308 parent->child_no--;
309 j--;
311 } else {
312 pos = j;
315 if (last_free_node->child_no > 1) {
316 parent->child_node[pos] = last_free_node;
317 leaf->type = THREAD_LEVEL;
318 leaf->parent_node = last_free_node;
319 last_free_node++;
325 #endif
328 /* Recursive function helper to print the CPU topology tree */
329 static void
330 print_cpu_topology_tree_sysctl_helper(cpu_node_t *node,
331 struct sbuf *sb,
332 char * buf,
333 int buf_len,
334 int last)
336 int i;
337 int bsr_member;
339 sbuf_bcat(sb, buf, buf_len);
340 if (last) {
341 sbuf_printf(sb, "\\-");
342 buf[buf_len] = ' ';buf_len++;
343 buf[buf_len] = ' ';buf_len++;
344 } else {
345 sbuf_printf(sb, "|-");
346 buf[buf_len] = '|';buf_len++;
347 buf[buf_len] = ' ';buf_len++;
350 bsr_member = BSRCPUMASK(node->members);
352 if (node->type == PACKAGE_LEVEL) {
353 sbuf_printf(sb,"PACKAGE MEMBERS: ");
354 } else if (node->type == CHIP_LEVEL) {
355 sbuf_printf(sb,"CHIP ID %d: ",
356 get_chip_ID(bsr_member));
357 } else if (node->type == CORE_LEVEL) {
358 if (node->compute_unit_id != (uint8_t)-1) {
359 sbuf_printf(sb,"Compute Unit ID %d: ",
360 node->compute_unit_id);
361 } else {
362 sbuf_printf(sb,"CORE ID %d: ",
363 get_core_number_within_chip(bsr_member));
365 } else if (node->type == THREAD_LEVEL) {
366 if (node->compute_unit_id != (uint8_t)-1) {
367 sbuf_printf(sb,"CORE ID %d: ",
368 get_core_number_within_chip(bsr_member));
369 } else {
370 sbuf_printf(sb,"THREAD ID %d: ",
371 get_logical_CPU_number_within_core(bsr_member));
373 } else {
374 sbuf_printf(sb,"UNKNOWN: ");
376 sbuf_print_cpuset(sb, &node->members);
377 sbuf_printf(sb,"\n");
379 for (i = 0; i < node->child_no; i++) {
380 print_cpu_topology_tree_sysctl_helper(node->child_node[i],
381 sb, buf, buf_len, i == (node->child_no -1));
385 /* SYSCTL PROCEDURE for printing the CPU Topology tree */
386 static int
387 print_cpu_topology_tree_sysctl(SYSCTL_HANDLER_ARGS)
389 struct sbuf *sb;
390 int ret;
391 char buf[INDENT_BUF_SIZE];
393 KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
395 sb = sbuf_new(NULL, NULL, 500, SBUF_AUTOEXTEND);
396 if (sb == NULL) {
397 return (ENOMEM);
399 sbuf_printf(sb,"\n");
400 print_cpu_topology_tree_sysctl_helper(cpu_root_node, sb, buf, 0, 1);
402 sbuf_finish(sb);
404 ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
406 sbuf_delete(sb);
408 return ret;
411 /* SYSCTL PROCEDURE for printing the CPU Topology level description */
412 static int
413 print_cpu_topology_level_description_sysctl(SYSCTL_HANDLER_ARGS)
415 struct sbuf *sb;
416 int ret;
418 sb = sbuf_new(NULL, NULL, 500, SBUF_AUTOEXTEND);
419 if (sb == NULL)
420 return (ENOMEM);
422 if (cpu_topology_levels_number == 4) /* HT available */
423 sbuf_printf(sb, "0 - thread; 1 - core; 2 - socket; 3 - anything");
424 else if (cpu_topology_levels_number == 3) /* No HT available */
425 sbuf_printf(sb, "0 - core; 1 - socket; 2 - anything");
426 else if (cpu_topology_levels_number == 2) /* No HT and no Multi-Core */
427 sbuf_printf(sb, "0 - socket; 1 - anything");
428 else
429 sbuf_printf(sb, "Unknown");
431 sbuf_finish(sb);
433 ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
435 sbuf_delete(sb);
437 return ret;
440 /* Find a cpu_node_t by a mask */
441 static cpu_node_t *
442 get_cpu_node_by_cpumask(cpu_node_t * node,
443 cpumask_t mask) {
445 cpu_node_t * found = NULL;
446 int i;
448 if (CPUMASK_CMPMASKEQ(node->members, mask))
449 return node;
451 for (i = 0; i < node->child_no; i++) {
452 found = get_cpu_node_by_cpumask(node->child_node[i], mask);
453 if (found != NULL) {
454 return found;
457 return NULL;
460 cpu_node_t *
461 get_cpu_node_by_cpuid(int cpuid) {
462 cpumask_t mask;
464 CPUMASK_ASSBIT(mask, cpuid);
466 KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
468 return get_cpu_node_by_cpumask(cpu_root_node, mask);
471 /* Get the mask of siblings for level_type of a cpuid */
472 cpumask_t
473 get_cpumask_from_level(int cpuid,
474 uint8_t level_type)
476 cpu_node_t * node;
477 cpumask_t mask;
479 CPUMASK_ASSBIT(mask, cpuid);
481 KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
483 node = get_cpu_node_by_cpumask(cpu_root_node, mask);
485 if (node == NULL) {
486 CPUMASK_ASSZERO(mask);
487 return mask;
490 while (node != NULL) {
491 if (node->type == level_type) {
492 return node->members;
494 node = node->parent_node;
496 CPUMASK_ASSZERO(mask);
498 return mask;
501 static const cpu_node_t *
502 get_cpu_node_by_chipid2(const cpu_node_t *node, int chip_id)
504 int cpuid;
506 if (node->type != CHIP_LEVEL) {
507 const cpu_node_t *ret = NULL;
508 int i;
510 for (i = 0; i < node->child_no; ++i) {
511 ret = get_cpu_node_by_chipid2(node->child_node[i],
512 chip_id);
513 if (ret != NULL)
514 break;
516 return ret;
519 cpuid = BSRCPUMASK(node->members);
520 if (get_chip_ID(cpuid) == chip_id)
521 return node;
522 return NULL;
525 const cpu_node_t *
526 get_cpu_node_by_chipid(int chip_id)
528 KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
529 return get_cpu_node_by_chipid2(cpu_root_node, chip_id);
532 /* init pcpu_sysctl structure info */
533 static void
534 init_pcpu_topology_sysctl(void)
536 int i;
537 cpumask_t mask;
538 struct sbuf sb;
540 pcpu_sysctl = kmalloc(sizeof(*pcpu_sysctl) * MAXCPU, M_PCPUSYS,
541 M_INTWAIT | M_ZERO);
543 for (i = 0; i < ncpus; i++) {
544 sbuf_new(&sb, pcpu_sysctl[i].cpu_name,
545 sizeof(pcpu_sysctl[i].cpu_name), SBUF_FIXEDLEN);
546 sbuf_printf(&sb,"cpu%d", i);
547 sbuf_finish(&sb);
550 /* Get physical siblings */
551 mask = get_cpumask_from_level(i, CHIP_LEVEL);
552 if (CPUMASK_TESTZERO(mask)) {
553 pcpu_sysctl[i].physical_id = INVALID_ID;
554 continue;
557 sbuf_new(&sb, pcpu_sysctl[i].physical_siblings,
558 sizeof(pcpu_sysctl[i].physical_siblings), SBUF_FIXEDLEN);
559 sbuf_print_cpuset(&sb, &mask);
560 sbuf_trim(&sb);
561 sbuf_finish(&sb);
563 pcpu_sysctl[i].physical_id = get_chip_ID(i);
565 /* Get core siblings */
566 mask = get_cpumask_from_level(i, CORE_LEVEL);
567 if (CPUMASK_TESTZERO(mask)) {
568 pcpu_sysctl[i].core_id = INVALID_ID;
569 continue;
572 sbuf_new(&sb, pcpu_sysctl[i].core_siblings,
573 sizeof(pcpu_sysctl[i].core_siblings), SBUF_FIXEDLEN);
574 sbuf_print_cpuset(&sb, &mask);
575 sbuf_trim(&sb);
576 sbuf_finish(&sb);
578 pcpu_sysctl[i].core_id = get_core_number_within_chip(i);
583 /* Build SYSCTL structure for revealing
584 * the CPU Topology to user-space.
586 static void
587 build_sysctl_cpu_topology(void)
589 int i;
590 struct sbuf sb;
592 /* SYSCTL new leaf for "cpu_topology" */
593 sysctl_ctx_init(&cpu_topology_sysctl_ctx);
594 cpu_topology_sysctl_tree = SYSCTL_ADD_NODE(&cpu_topology_sysctl_ctx,
595 SYSCTL_STATIC_CHILDREN(_hw),
596 OID_AUTO,
597 "cpu_topology",
598 CTLFLAG_RD, 0, "");
600 /* SYSCTL cpu_topology "tree" entry */
601 SYSCTL_ADD_PROC(&cpu_topology_sysctl_ctx,
602 SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
603 OID_AUTO, "tree", CTLTYPE_STRING | CTLFLAG_RD,
604 NULL, 0, print_cpu_topology_tree_sysctl, "A",
605 "Tree print of CPU topology");
607 /* SYSCTL cpu_topology "level_description" entry */
608 SYSCTL_ADD_PROC(&cpu_topology_sysctl_ctx,
609 SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
610 OID_AUTO, "level_description", CTLTYPE_STRING | CTLFLAG_RD,
611 NULL, 0, print_cpu_topology_level_description_sysctl, "A",
612 "Level description of CPU topology");
614 /* SYSCTL cpu_topology "members" entry */
615 sbuf_new(&sb, cpu_topology_members,
616 sizeof(cpu_topology_members), SBUF_FIXEDLEN);
617 sbuf_print_cpuset(&sb, &cpu_root_node->members);
618 sbuf_trim(&sb);
619 sbuf_finish(&sb);
620 SYSCTL_ADD_STRING(&cpu_topology_sysctl_ctx,
621 SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
622 OID_AUTO, "members", CTLFLAG_RD,
623 cpu_topology_members, 0,
624 "Members of the CPU Topology");
626 /* SYSCTL per_cpu info */
627 for (i = 0; i < ncpus; i++) {
628 /* New leaf : hw.cpu_topology.cpux */
629 sysctl_ctx_init(&pcpu_sysctl[i].sysctl_ctx);
630 pcpu_sysctl[i].sysctl_tree = SYSCTL_ADD_NODE(&pcpu_sysctl[i].sysctl_ctx,
631 SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
632 OID_AUTO,
633 pcpu_sysctl[i].cpu_name,
634 CTLFLAG_RD, 0, "");
636 /* Check if the physical_id found is valid */
637 if (pcpu_sysctl[i].physical_id == INVALID_ID) {
638 continue;
641 /* Add physical id info */
642 SYSCTL_ADD_INT(&pcpu_sysctl[i].sysctl_ctx,
643 SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
644 OID_AUTO, "physical_id", CTLFLAG_RD,
645 &pcpu_sysctl[i].physical_id, 0,
646 "Physical ID");
648 /* Add physical siblings */
649 SYSCTL_ADD_STRING(&pcpu_sysctl[i].sysctl_ctx,
650 SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
651 OID_AUTO, "physical_siblings", CTLFLAG_RD,
652 pcpu_sysctl[i].physical_siblings, 0,
653 "Physical siblings");
655 /* Check if the core_id found is valid */
656 if (pcpu_sysctl[i].core_id == INVALID_ID) {
657 continue;
660 /* Add core id info */
661 SYSCTL_ADD_INT(&pcpu_sysctl[i].sysctl_ctx,
662 SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
663 OID_AUTO, "core_id", CTLFLAG_RD,
664 &pcpu_sysctl[i].core_id, 0,
665 "Core ID");
667 /*Add core siblings */
668 SYSCTL_ADD_STRING(&pcpu_sysctl[i].sysctl_ctx,
669 SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
670 OID_AUTO, "core_siblings", CTLFLAG_RD,
671 pcpu_sysctl[i].core_siblings, 0,
672 "Core siblings");
676 static
677 void
678 sbuf_print_cpuset(struct sbuf *sb, cpumask_t *mask)
680 int i;
681 int b = -1;
682 int e = -1;
683 int more = 0;
685 sbuf_printf(sb, "cpus(");
686 CPUSET_FOREACH(i, *mask) {
687 if (b < 0) {
688 b = i;
689 e = b + 1;
690 continue;
692 if (e == i) {
693 ++e;
694 continue;
696 if (more)
697 sbuf_printf(sb, ", ");
698 if (b == e - 1) {
699 sbuf_printf(sb, "%d", b);
700 } else {
701 sbuf_printf(sb, "%d-%d", b, e - 1);
703 more = 1;
704 b = i;
705 e = b + 1;
707 if (more)
708 sbuf_printf(sb, ", ");
709 if (b >= 0) {
710 if (b == e - 1) {
711 sbuf_printf(sb, "%d", b);
712 } else {
713 sbuf_printf(sb, "%d-%d", b, e - 1);
716 sbuf_printf(sb, ") ");
719 /* Build the CPU Topology and SYSCTL Topology tree */
720 static void
721 init_cpu_topology(void)
723 build_cpu_topology();
725 init_pcpu_topology_sysctl();
726 build_sysctl_cpu_topology();
728 SYSINIT(cpu_topology, SI_BOOT2_CPU_TOPOLOGY, SI_ORDER_FIRST,
729 init_cpu_topology, NULL);