1 /* mdesc.c: Sun4V machine description handling.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
5 #include <linux/kernel.h>
6 #include <linux/types.h>
8 #include <linux/log2.h>
9 #include <linux/list.h>
10 #include <linux/slab.h>
12 #include <linux/miscdevice.h>
14 #include <asm/cpudata.h>
15 #include <asm/hypervisor.h>
16 #include <asm/mdesc.h>
18 #include <asm/oplib.h>
21 /* Unlike the OBP device tree, the machine description is a full-on
22 * DAG. An arbitrary number of ARCs are possible from one
23 * node to other nodes and thus we can't use the OBP device_node
24 * data structure to represent these nodes inside of the kernel.
26 * Actually, it isn't even a DAG, because there are back pointers
27 * which create cycles in the graph.
29 * mdesc_hdr and mdesc_elem describe the layout of the data structure
30 * we get from the Hypervisor.
33 u32 version
; /* Transport version */
34 u32 node_sz
; /* node block size */
35 u32 name_sz
; /* name block size */
36 u32 data_sz
; /* data block size */
37 } __attribute__((aligned(16)));
41 #define MD_LIST_END 0x00
43 #define MD_NODE_END 0x45
45 #define MD_PROP_ARC 0x61
46 #define MD_PROP_VAL 0x76
47 #define MD_PROP_STR 0x73
48 #define MD_PROP_DATA 0x64
61 struct mdesc_mem_ops
{
62 struct mdesc_handle
*(*alloc
)(unsigned int mdesc_size
);
63 void (*free
)(struct mdesc_handle
*handle
);
67 struct list_head list
;
68 struct mdesc_mem_ops
*mops
;
71 unsigned int handle_size
;
72 struct mdesc_hdr mdesc
;
75 static void mdesc_handle_init(struct mdesc_handle
*hp
,
76 unsigned int handle_size
,
79 BUG_ON(((unsigned long)&hp
->mdesc
) & (16UL - 1));
81 memset(hp
, 0, handle_size
);
82 INIT_LIST_HEAD(&hp
->list
);
84 atomic_set(&hp
->refcnt
, 1);
85 hp
->handle_size
= handle_size
;
88 static struct mdesc_handle
* __init
mdesc_lmb_alloc(unsigned int mdesc_size
)
90 unsigned int handle_size
, alloc_size
;
91 struct mdesc_handle
*hp
;
94 handle_size
= (sizeof(struct mdesc_handle
) -
95 sizeof(struct mdesc_hdr
) +
97 alloc_size
= PAGE_ALIGN(handle_size
);
99 paddr
= lmb_alloc(alloc_size
, PAGE_SIZE
);
104 mdesc_handle_init(hp
, handle_size
, hp
);
109 static void mdesc_lmb_free(struct mdesc_handle
*hp
)
111 unsigned int alloc_size
, handle_size
= hp
->handle_size
;
112 unsigned long start
, end
;
114 BUG_ON(atomic_read(&hp
->refcnt
) != 0);
115 BUG_ON(!list_empty(&hp
->list
));
117 alloc_size
= PAGE_ALIGN(handle_size
);
119 start
= (unsigned long) hp
;
120 end
= start
+ alloc_size
;
122 while (start
< end
) {
125 p
= virt_to_page(start
);
126 ClearPageReserved(p
);
132 static struct mdesc_mem_ops lmb_mdesc_ops
= {
133 .alloc
= mdesc_lmb_alloc
,
134 .free
= mdesc_lmb_free
,
137 static struct mdesc_handle
*mdesc_kmalloc(unsigned int mdesc_size
)
139 unsigned int handle_size
;
142 handle_size
= (sizeof(struct mdesc_handle
) -
143 sizeof(struct mdesc_hdr
) +
146 base
= kmalloc(handle_size
+ 15, GFP_KERNEL
| __GFP_NOFAIL
);
148 struct mdesc_handle
*hp
;
151 addr
= (unsigned long)base
;
152 addr
= (addr
+ 15UL) & ~15UL;
153 hp
= (struct mdesc_handle
*) addr
;
155 mdesc_handle_init(hp
, handle_size
, base
);
162 static void mdesc_kfree(struct mdesc_handle
*hp
)
164 BUG_ON(atomic_read(&hp
->refcnt
) != 0);
165 BUG_ON(!list_empty(&hp
->list
));
167 kfree(hp
->self_base
);
170 static struct mdesc_mem_ops kmalloc_mdesc_memops
= {
171 .alloc
= mdesc_kmalloc
,
175 static struct mdesc_handle
*mdesc_alloc(unsigned int mdesc_size
,
176 struct mdesc_mem_ops
*mops
)
178 struct mdesc_handle
*hp
= mops
->alloc(mdesc_size
);
186 static void mdesc_free(struct mdesc_handle
*hp
)
191 static struct mdesc_handle
*cur_mdesc
;
192 static LIST_HEAD(mdesc_zombie_list
);
193 static DEFINE_SPINLOCK(mdesc_lock
);
195 struct mdesc_handle
*mdesc_grab(void)
197 struct mdesc_handle
*hp
;
200 spin_lock_irqsave(&mdesc_lock
, flags
);
203 atomic_inc(&hp
->refcnt
);
204 spin_unlock_irqrestore(&mdesc_lock
, flags
);
208 EXPORT_SYMBOL(mdesc_grab
);
210 void mdesc_release(struct mdesc_handle
*hp
)
214 spin_lock_irqsave(&mdesc_lock
, flags
);
215 if (atomic_dec_and_test(&hp
->refcnt
)) {
216 list_del_init(&hp
->list
);
219 spin_unlock_irqrestore(&mdesc_lock
, flags
);
221 EXPORT_SYMBOL(mdesc_release
);
223 static DEFINE_MUTEX(mdesc_mutex
);
224 static struct mdesc_notifier_client
*client_list
;
226 void mdesc_register_notifier(struct mdesc_notifier_client
*client
)
230 mutex_lock(&mdesc_mutex
);
231 client
->next
= client_list
;
232 client_list
= client
;
234 mdesc_for_each_node_by_name(cur_mdesc
, node
, client
->node_name
)
235 client
->add(cur_mdesc
, node
);
237 mutex_unlock(&mdesc_mutex
);
240 static const u64
*parent_cfg_handle(struct mdesc_handle
*hp
, u64 node
)
246 mdesc_for_each_arc(a
, hp
, node
, MDESC_ARC_TYPE_BACK
) {
249 target
= mdesc_arc_target(hp
, a
);
250 id
= mdesc_get_property(hp
, target
,
259 /* Run 'func' on nodes which are in A but not in B. */
260 static void invoke_on_missing(const char *name
,
261 struct mdesc_handle
*a
,
262 struct mdesc_handle
*b
,
263 void (*func
)(struct mdesc_handle
*, u64
))
267 mdesc_for_each_node_by_name(a
, node
, name
) {
268 int found
= 0, is_vdc_port
= 0;
269 const char *name_prop
;
273 name_prop
= mdesc_get_property(a
, node
, "name", NULL
);
274 if (name_prop
&& !strcmp(name_prop
, "vdc-port")) {
276 id
= parent_cfg_handle(a
, node
);
278 id
= mdesc_get_property(a
, node
, "id", NULL
);
281 printk(KERN_ERR
"MD: Cannot find ID for %s node.\n",
282 (name_prop
? name_prop
: name
));
286 mdesc_for_each_node_by_name(b
, fnode
, name
) {
290 name_prop
= mdesc_get_property(b
, fnode
,
293 strcmp(name_prop
, "vdc-port"))
295 fid
= parent_cfg_handle(b
, fnode
);
297 printk(KERN_ERR
"MD: Cannot find ID "
298 "for vdc-port node.\n");
302 fid
= mdesc_get_property(b
, fnode
,
315 static void notify_one(struct mdesc_notifier_client
*p
,
316 struct mdesc_handle
*old_hp
,
317 struct mdesc_handle
*new_hp
)
319 invoke_on_missing(p
->node_name
, old_hp
, new_hp
, p
->remove
);
320 invoke_on_missing(p
->node_name
, new_hp
, old_hp
, p
->add
);
323 static void mdesc_notify_clients(struct mdesc_handle
*old_hp
,
324 struct mdesc_handle
*new_hp
)
326 struct mdesc_notifier_client
*p
= client_list
;
329 notify_one(p
, old_hp
, new_hp
);
334 void mdesc_update(void)
336 unsigned long len
, real_len
, status
;
337 struct mdesc_handle
*hp
, *orig_hp
;
340 mutex_lock(&mdesc_mutex
);
342 (void) sun4v_mach_desc(0UL, 0UL, &len
);
344 hp
= mdesc_alloc(len
, &kmalloc_mdesc_memops
);
346 printk(KERN_ERR
"MD: mdesc alloc fails\n");
350 status
= sun4v_mach_desc(__pa(&hp
->mdesc
), len
, &real_len
);
351 if (status
!= HV_EOK
|| real_len
> len
) {
352 printk(KERN_ERR
"MD: mdesc reread fails with %lu\n",
354 atomic_dec(&hp
->refcnt
);
359 spin_lock_irqsave(&mdesc_lock
, flags
);
362 spin_unlock_irqrestore(&mdesc_lock
, flags
);
364 mdesc_notify_clients(orig_hp
, hp
);
366 spin_lock_irqsave(&mdesc_lock
, flags
);
367 if (atomic_dec_and_test(&orig_hp
->refcnt
))
370 list_add(&orig_hp
->list
, &mdesc_zombie_list
);
371 spin_unlock_irqrestore(&mdesc_lock
, flags
);
374 mutex_unlock(&mdesc_mutex
);
377 static struct mdesc_elem
*node_block(struct mdesc_hdr
*mdesc
)
379 return (struct mdesc_elem
*) (mdesc
+ 1);
382 static void *name_block(struct mdesc_hdr
*mdesc
)
384 return ((void *) node_block(mdesc
)) + mdesc
->node_sz
;
387 static void *data_block(struct mdesc_hdr
*mdesc
)
389 return ((void *) name_block(mdesc
)) + mdesc
->name_sz
;
392 u64
mdesc_node_by_name(struct mdesc_handle
*hp
,
393 u64 from_node
, const char *name
)
395 struct mdesc_elem
*ep
= node_block(&hp
->mdesc
);
396 const char *names
= name_block(&hp
->mdesc
);
397 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
400 if (from_node
== MDESC_NODE_NULL
) {
402 } else if (from_node
>= last_node
) {
403 return MDESC_NODE_NULL
;
405 ret
= ep
[from_node
].d
.val
;
408 while (ret
< last_node
) {
409 if (ep
[ret
].tag
!= MD_NODE
)
410 return MDESC_NODE_NULL
;
411 if (!strcmp(names
+ ep
[ret
].name_offset
, name
))
415 if (ret
>= last_node
)
416 ret
= MDESC_NODE_NULL
;
419 EXPORT_SYMBOL(mdesc_node_by_name
);
421 const void *mdesc_get_property(struct mdesc_handle
*hp
, u64 node
,
422 const char *name
, int *lenp
)
424 const char *names
= name_block(&hp
->mdesc
);
425 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
426 void *data
= data_block(&hp
->mdesc
);
427 struct mdesc_elem
*ep
;
429 if (node
== MDESC_NODE_NULL
|| node
>= last_node
)
432 ep
= node_block(&hp
->mdesc
) + node
;
434 for (; ep
->tag
!= MD_NODE_END
; ep
++) {
446 val
= data
+ ep
->d
.data
.data_offset
;
447 len
= ep
->d
.data
.data_len
;
456 if (!strcmp(names
+ ep
->name_offset
, name
)) {
465 EXPORT_SYMBOL(mdesc_get_property
);
467 u64
mdesc_next_arc(struct mdesc_handle
*hp
, u64 from
, const char *arc_type
)
469 struct mdesc_elem
*ep
, *base
= node_block(&hp
->mdesc
);
470 const char *names
= name_block(&hp
->mdesc
);
471 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
473 if (from
== MDESC_NODE_NULL
|| from
>= last_node
)
474 return MDESC_NODE_NULL
;
479 for (; ep
->tag
!= MD_NODE_END
; ep
++) {
480 if (ep
->tag
!= MD_PROP_ARC
)
483 if (strcmp(names
+ ep
->name_offset
, arc_type
))
489 return MDESC_NODE_NULL
;
491 EXPORT_SYMBOL(mdesc_next_arc
);
493 u64
mdesc_arc_target(struct mdesc_handle
*hp
, u64 arc
)
495 struct mdesc_elem
*ep
, *base
= node_block(&hp
->mdesc
);
501 EXPORT_SYMBOL(mdesc_arc_target
);
503 const char *mdesc_node_name(struct mdesc_handle
*hp
, u64 node
)
505 struct mdesc_elem
*ep
, *base
= node_block(&hp
->mdesc
);
506 const char *names
= name_block(&hp
->mdesc
);
507 u64 last_node
= hp
->mdesc
.node_sz
/ 16;
509 if (node
== MDESC_NODE_NULL
|| node
>= last_node
)
513 if (ep
->tag
!= MD_NODE
)
516 return names
+ ep
->name_offset
;
518 EXPORT_SYMBOL(mdesc_node_name
);
520 static void __init
report_platform_properties(void)
522 struct mdesc_handle
*hp
= mdesc_grab();
523 u64 pn
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "platform");
527 if (pn
== MDESC_NODE_NULL
) {
528 prom_printf("No platform node in machine-description.\n");
532 s
= mdesc_get_property(hp
, pn
, "banner-name", NULL
);
533 printk("PLATFORM: banner-name [%s]\n", s
);
534 s
= mdesc_get_property(hp
, pn
, "name", NULL
);
535 printk("PLATFORM: name [%s]\n", s
);
537 v
= mdesc_get_property(hp
, pn
, "hostid", NULL
);
539 printk("PLATFORM: hostid [%08llx]\n", *v
);
540 v
= mdesc_get_property(hp
, pn
, "serial#", NULL
);
542 printk("PLATFORM: serial# [%08llx]\n", *v
);
543 v
= mdesc_get_property(hp
, pn
, "stick-frequency", NULL
);
544 printk("PLATFORM: stick-frequency [%08llx]\n", *v
);
545 v
= mdesc_get_property(hp
, pn
, "mac-address", NULL
);
547 printk("PLATFORM: mac-address [%llx]\n", *v
);
548 v
= mdesc_get_property(hp
, pn
, "watchdog-resolution", NULL
);
550 printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v
);
551 v
= mdesc_get_property(hp
, pn
, "watchdog-max-timeout", NULL
);
553 printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v
);
554 v
= mdesc_get_property(hp
, pn
, "max-cpus", NULL
);
556 printk("PLATFORM: max-cpus [%llu]\n", *v
);
564 if (max_cpu
> NR_CPUS
)
569 for (i
= 0; i
< max_cpu
; i
++)
570 set_cpu_possible(i
, true);
577 static void __cpuinit
fill_in_one_cache(cpuinfo_sparc
*c
,
578 struct mdesc_handle
*hp
,
581 const u64
*level
= mdesc_get_property(hp
, mp
, "level", NULL
);
582 const u64
*size
= mdesc_get_property(hp
, mp
, "size", NULL
);
583 const u64
*line_size
= mdesc_get_property(hp
, mp
, "line-size", NULL
);
587 type
= mdesc_get_property(hp
, mp
, "type", &type_len
);
591 if (of_find_in_proplist(type
, "instn", type_len
)) {
592 c
->icache_size
= *size
;
593 c
->icache_line_size
= *line_size
;
594 } else if (of_find_in_proplist(type
, "data", type_len
)) {
595 c
->dcache_size
= *size
;
596 c
->dcache_line_size
= *line_size
;
601 c
->ecache_size
= *size
;
602 c
->ecache_line_size
= *line_size
;
612 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_FWD
) {
613 u64 target
= mdesc_arc_target(hp
, a
);
614 const char *name
= mdesc_node_name(hp
, target
);
616 if (!strcmp(name
, "cache"))
617 fill_in_one_cache(c
, hp
, target
);
622 static void __cpuinit
mark_core_ids(struct mdesc_handle
*hp
, u64 mp
, int core_id
)
626 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_BACK
) {
627 u64 t
= mdesc_arc_target(hp
, a
);
631 name
= mdesc_node_name(hp
, t
);
632 if (!strcmp(name
, "cpu")) {
633 id
= mdesc_get_property(hp
, t
, "id", NULL
);
635 cpu_data(*id
).core_id
= core_id
;
639 mdesc_for_each_arc(j
, hp
, t
, MDESC_ARC_TYPE_BACK
) {
640 u64 n
= mdesc_arc_target(hp
, j
);
643 n_name
= mdesc_node_name(hp
, n
);
644 if (strcmp(n_name
, "cpu"))
647 id
= mdesc_get_property(hp
, n
, "id", NULL
);
649 cpu_data(*id
).core_id
= core_id
;
655 static void __cpuinit
set_core_ids(struct mdesc_handle
*hp
)
661 mdesc_for_each_node_by_name(hp
, mp
, "cache") {
666 level
= mdesc_get_property(hp
, mp
, "level", NULL
);
670 type
= mdesc_get_property(hp
, mp
, "type", &len
);
671 if (!of_find_in_proplist(type
, "instn", len
))
674 mark_core_ids(hp
, mp
, idx
);
680 static void __cpuinit
mark_proc_ids(struct mdesc_handle
*hp
, u64 mp
, int proc_id
)
684 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_BACK
) {
685 u64 t
= mdesc_arc_target(hp
, a
);
689 name
= mdesc_node_name(hp
, t
);
690 if (strcmp(name
, "cpu"))
693 id
= mdesc_get_property(hp
, t
, "id", NULL
);
695 cpu_data(*id
).proc_id
= proc_id
;
699 static void __cpuinit
__set_proc_ids(struct mdesc_handle
*hp
, const char *exec_unit_name
)
705 mdesc_for_each_node_by_name(hp
, mp
, exec_unit_name
) {
709 type
= mdesc_get_property(hp
, mp
, "type", &len
);
710 if (!of_find_in_proplist(type
, "int", len
) &&
711 !of_find_in_proplist(type
, "integer", len
))
714 mark_proc_ids(hp
, mp
, idx
);
720 static void __cpuinit
set_proc_ids(struct mdesc_handle
*hp
)
722 __set_proc_ids(hp
, "exec_unit");
723 __set_proc_ids(hp
, "exec-unit");
726 static void __cpuinit
get_one_mondo_bits(const u64
*p
, unsigned int *mask
,
735 if (!val
|| val
>= 64)
738 *mask
= ((1U << val
) * 64U) - 1U;
742 *mask
= ((1U << def
) * 64U) - 1U;
745 static void __cpuinit
get_mondo_data(struct mdesc_handle
*hp
, u64 mp
,
746 struct trap_per_cpu
*tb
)
750 val
= mdesc_get_property(hp
, mp
, "q-cpu-mondo-#bits", NULL
);
751 get_one_mondo_bits(val
, &tb
->cpu_mondo_qmask
, 7);
753 val
= mdesc_get_property(hp
, mp
, "q-dev-mondo-#bits", NULL
);
754 get_one_mondo_bits(val
, &tb
->dev_mondo_qmask
, 7);
756 val
= mdesc_get_property(hp
, mp
, "q-resumable-#bits", NULL
);
757 get_one_mondo_bits(val
, &tb
->resum_qmask
, 6);
759 val
= mdesc_get_property(hp
, mp
, "q-nonresumable-#bits", NULL
);
760 get_one_mondo_bits(val
, &tb
->nonresum_qmask
, 2);
763 static void * __cpuinit
mdesc_iterate_over_cpus(void *(*func
)(struct mdesc_handle
*, u64
, int, void *), void *arg
, cpumask_t
*mask
)
765 struct mdesc_handle
*hp
= mdesc_grab();
769 mdesc_for_each_node_by_name(hp
, mp
, "cpu") {
770 const u64
*id
= mdesc_get_property(hp
, mp
, "id", NULL
);
774 if (cpuid
>= NR_CPUS
) {
775 printk(KERN_WARNING
"Ignoring CPU %d which is "
780 if (!cpu_isset(cpuid
, *mask
))
784 ret
= func(hp
, mp
, cpuid
, arg
);
793 static void * __cpuinit
record_one_cpu(struct mdesc_handle
*hp
, u64 mp
, int cpuid
, void *arg
)
797 set_cpu_present(cpuid
, true);
802 void __cpuinit
mdesc_populate_present_mask(cpumask_t
*mask
)
804 if (tlb_type
!= hypervisor
)
808 mdesc_iterate_over_cpus(record_one_cpu
, NULL
, mask
);
811 static void * __cpuinit
fill_in_one_cpu(struct mdesc_handle
*hp
, u64 mp
, int cpuid
, void *arg
)
813 const u64
*cfreq
= mdesc_get_property(hp
, mp
, "clock-frequency", NULL
);
814 struct trap_per_cpu
*tb
;
819 /* On uniprocessor we only want the values for the
820 * real physical cpu the kernel booted onto, however
821 * cpu_data() only has one entry at index 0.
823 if (cpuid
!= real_hard_smp_processor_id())
828 c
= &cpu_data(cpuid
);
829 c
->clock_tick
= *cfreq
;
831 tb
= &trap_block
[cpuid
];
832 get_mondo_data(hp
, mp
, tb
);
834 mdesc_for_each_arc(a
, hp
, mp
, MDESC_ARC_TYPE_FWD
) {
835 u64 j
, t
= mdesc_arc_target(hp
, a
);
838 t_name
= mdesc_node_name(hp
, t
);
839 if (!strcmp(t_name
, "cache")) {
840 fill_in_one_cache(c
, hp
, t
);
844 mdesc_for_each_arc(j
, hp
, t
, MDESC_ARC_TYPE_FWD
) {
845 u64 n
= mdesc_arc_target(hp
, j
);
848 n_name
= mdesc_node_name(hp
, n
);
849 if (!strcmp(n_name
, "cache"))
850 fill_in_one_cache(c
, hp
, n
);
860 void __cpuinit
mdesc_fill_in_cpu_data(cpumask_t
*mask
)
862 struct mdesc_handle
*hp
;
864 mdesc_iterate_over_cpus(fill_in_one_cpu
, NULL
, mask
);
867 sparc64_multi_core
= 1;
877 smp_fill_in_sib_core_maps();
880 static ssize_t
mdesc_read(struct file
*file
, char __user
*buf
,
881 size_t len
, loff_t
*offp
)
883 struct mdesc_handle
*hp
= mdesc_grab();
889 err
= hp
->handle_size
;
890 if (len
< hp
->handle_size
)
892 else if (copy_to_user(buf
, &hp
->mdesc
, hp
->handle_size
))
899 static const struct file_operations mdesc_fops
= {
901 .owner
= THIS_MODULE
,
904 static struct miscdevice mdesc_misc
= {
905 .minor
= MISC_DYNAMIC_MINOR
,
910 static int __init
mdesc_misc_init(void)
912 return misc_register(&mdesc_misc
);
915 __initcall(mdesc_misc_init
);
917 void __init
sun4v_mdesc_init(void)
919 struct mdesc_handle
*hp
;
920 unsigned long len
, real_len
, status
;
922 (void) sun4v_mach_desc(0UL, 0UL, &len
);
924 printk("MDESC: Size is %lu bytes.\n", len
);
926 hp
= mdesc_alloc(len
, &lmb_mdesc_ops
);
928 prom_printf("MDESC: alloc of %lu bytes failed.\n", len
);
932 status
= sun4v_mach_desc(__pa(&hp
->mdesc
), len
, &real_len
);
933 if (status
!= HV_EOK
|| real_len
> len
) {
934 prom_printf("sun4v_mach_desc fails, err(%lu), "
935 "len(%lu), real_len(%lu)\n",
936 status
, len
, real_len
);
943 report_platform_properties();