2 * Procedures for creating, accessing and interpreting the device tree.
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/init.h>
22 #include <linux/threads.h>
23 #include <linux/spinlock.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/stringify.h>
27 #include <linux/delay.h>
28 #include <linux/initrd.h>
29 #include <linux/bitops.h>
30 #include <linux/module.h>
31 #include <linux/kexec.h>
32 #include <linux/debugfs.h>
33 #include <linux/irq.h>
34 #include <linux/memblock.h>
39 #include <asm/processor.h>
42 #include <asm/kdump.h>
44 #include <asm/system.h>
47 #include <asm/pgtable.h>
49 #include <asm/iommu.h>
50 #include <asm/btext.h>
51 #include <asm/sections.h>
52 #include <asm/machdep.h>
53 #include <asm/pSeries_reconfig.h>
54 #include <asm/pci-bridge.h>
55 #include <asm/phyp_dump.h>
56 #include <asm/kexec.h>
57 #include <mm/mmu_decl.h>
60 #define DBG(fmt...) printk(KERN_ERR fmt)
66 int __initdata iommu_is_off
;
67 int __initdata iommu_force_on
;
68 unsigned long tce_alloc_start
, tce_alloc_end
;
71 static phys_addr_t first_memblock_size
;
73 static int __init
early_parse_mem(char *p
)
78 memory_limit
= PAGE_ALIGN(memparse(p
, &p
));
79 DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit
);
83 early_param("mem", early_parse_mem
);
86 * overlaps_initrd - check for overlap with page aligned extension of
89 static inline int overlaps_initrd(unsigned long start
, unsigned long size
)
91 #ifdef CONFIG_BLK_DEV_INITRD
95 return (start
+ size
) > _ALIGN_DOWN(initrd_start
, PAGE_SIZE
) &&
96 start
<= _ALIGN_UP(initrd_end
, PAGE_SIZE
);
103 * move_device_tree - move tree to an unused area, if needed.
105 * The device tree may be allocated beyond our memory limit, or inside the
106 * crash kernel region for kdump, or within the page aligned range of initrd.
107 * If so, move it out of the way.
109 static void __init
move_device_tree(void)
111 unsigned long start
, size
;
114 DBG("-> move_device_tree\n");
116 start
= __pa(initial_boot_params
);
117 size
= be32_to_cpu(initial_boot_params
->totalsize
);
119 if ((memory_limit
&& (start
+ size
) > PHYSICAL_START
+ memory_limit
) ||
120 overlaps_crashkernel(start
, size
) ||
121 overlaps_initrd(start
, size
)) {
122 p
= __va(memblock_alloc(size
, PAGE_SIZE
));
123 memcpy(p
, initial_boot_params
, size
);
124 initial_boot_params
= (struct boot_param_header
*)p
;
125 DBG("Moved device tree to 0x%p\n", p
);
128 DBG("<- move_device_tree\n");
132 * ibm,pa-features is a per-cpu property that contains a string of
133 * attribute descriptors, each of which has a 2 byte header plus up
134 * to 254 bytes worth of processor attribute bits. First header
135 * byte specifies the number of bytes following the header.
136 * Second header byte is an "attribute-specifier" type, of which
137 * zero is the only currently-defined value.
138 * Implementation: Pass in the byte and bit offset for the feature
139 * that we are interested in. The function will return -1 if the
140 * pa-features property is missing, or a 1/0 to indicate if the feature
141 * is supported/not supported. Note that the bit numbers are
142 * big-endian to match the definition in PAPR.
144 static struct ibm_pa_feature
{
145 unsigned long cpu_features
; /* CPU_FTR_xxx bit */
146 unsigned long mmu_features
; /* MMU_FTR_xxx bit */
147 unsigned int cpu_user_ftrs
; /* PPC_FEATURE_xxx bit */
148 unsigned char pabyte
; /* byte number in ibm,pa-features */
149 unsigned char pabit
; /* bit number (big-endian) */
150 unsigned char invert
; /* if 1, pa bit set => clear feature */
151 } ibm_pa_features
[] __initdata
= {
152 {0, 0, PPC_FEATURE_HAS_MMU
, 0, 0, 0},
153 {0, 0, PPC_FEATURE_HAS_FPU
, 0, 1, 0},
154 {0, MMU_FTR_SLB
, 0, 0, 2, 0},
155 {CPU_FTR_CTRL
, 0, 0, 0, 3, 0},
156 {CPU_FTR_NOEXECUTE
, 0, 0, 0, 6, 0},
157 {CPU_FTR_NODSISRALIGN
, 0, 0, 1, 1, 1},
158 {0, MMU_FTR_CI_LARGE_PAGE
, 0, 1, 2, 0},
159 {CPU_FTR_REAL_LE
, PPC_FEATURE_TRUE_LE
, 5, 0, 0},
162 static void __init
scan_features(unsigned long node
, unsigned char *ftrs
,
163 unsigned long tablelen
,
164 struct ibm_pa_feature
*fp
,
165 unsigned long ft_size
)
167 unsigned long i
, len
, bit
;
169 /* find descriptor with type == 0 */
175 return; /* descriptor 0 not found */
182 /* loop over bits we know about */
183 for (i
= 0; i
< ft_size
; ++i
, ++fp
) {
184 if (fp
->pabyte
>= ftrs
[0])
186 bit
= (ftrs
[2 + fp
->pabyte
] >> (7 - fp
->pabit
)) & 1;
187 if (bit
^ fp
->invert
) {
188 cur_cpu_spec
->cpu_features
|= fp
->cpu_features
;
189 cur_cpu_spec
->cpu_user_features
|= fp
->cpu_user_ftrs
;
190 cur_cpu_spec
->mmu_features
|= fp
->mmu_features
;
192 cur_cpu_spec
->cpu_features
&= ~fp
->cpu_features
;
193 cur_cpu_spec
->cpu_user_features
&= ~fp
->cpu_user_ftrs
;
194 cur_cpu_spec
->mmu_features
&= ~fp
->mmu_features
;
199 static void __init
check_cpu_pa_features(unsigned long node
)
201 unsigned char *pa_ftrs
;
202 unsigned long tablelen
;
204 pa_ftrs
= of_get_flat_dt_prop(node
, "ibm,pa-features", &tablelen
);
208 scan_features(node
, pa_ftrs
, tablelen
,
209 ibm_pa_features
, ARRAY_SIZE(ibm_pa_features
));
212 #ifdef CONFIG_PPC_STD_MMU_64
213 static void __init
check_cpu_slb_size(unsigned long node
)
217 slb_size_ptr
= of_get_flat_dt_prop(node
, "slb-size", NULL
);
218 if (slb_size_ptr
!= NULL
) {
219 mmu_slb_size
= *slb_size_ptr
;
222 slb_size_ptr
= of_get_flat_dt_prop(node
, "ibm,slb-size", NULL
);
223 if (slb_size_ptr
!= NULL
) {
224 mmu_slb_size
= *slb_size_ptr
;
228 #define check_cpu_slb_size(node) do { } while(0)
231 static struct feature_property
{
234 unsigned long cpu_feature
;
235 unsigned long cpu_user_ftr
;
236 } feature_properties
[] __initdata
= {
237 #ifdef CONFIG_ALTIVEC
238 {"altivec", 0, CPU_FTR_ALTIVEC
, PPC_FEATURE_HAS_ALTIVEC
},
239 {"ibm,vmx", 1, CPU_FTR_ALTIVEC
, PPC_FEATURE_HAS_ALTIVEC
},
240 #endif /* CONFIG_ALTIVEC */
242 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
243 {"ibm,vmx", 2, CPU_FTR_VSX
, PPC_FEATURE_HAS_VSX
},
244 #endif /* CONFIG_VSX */
246 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP
},
247 {"ibm,purr", 1, CPU_FTR_PURR
, 0},
248 {"ibm,spurr", 1, CPU_FTR_SPURR
, 0},
249 #endif /* CONFIG_PPC64 */
252 #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
253 static inline void identical_pvr_fixup(unsigned long node
)
256 char *model
= of_get_flat_dt_prop(node
, "model", NULL
);
259 * Since 440GR(x)/440EP(x) processors have the same pvr,
260 * we check the node path and set bit 28 in the cur_cpu_spec
261 * pvr for EP(x) processor version. This bit is always 0 in
262 * the "real" pvr. Then we call identify_cpu again with
263 * the new logical pvr to enable FPU support.
265 if (model
&& strstr(model
, "440EP")) {
266 pvr
= cur_cpu_spec
->pvr_value
| 0x8;
267 identify_cpu(0, pvr
);
268 DBG("Using logical pvr %x for %s\n", pvr
, model
);
272 #define identical_pvr_fixup(node) do { } while(0)
275 static void __init
check_cpu_feature_properties(unsigned long node
)
278 struct feature_property
*fp
= feature_properties
;
281 for (i
= 0; i
< ARRAY_SIZE(feature_properties
); ++i
, ++fp
) {
282 prop
= of_get_flat_dt_prop(node
, fp
->name
, NULL
);
283 if (prop
&& *prop
>= fp
->min_value
) {
284 cur_cpu_spec
->cpu_features
|= fp
->cpu_feature
;
285 cur_cpu_spec
->cpu_user_features
|= fp
->cpu_user_ftr
;
290 static int __init
early_init_dt_scan_cpus(unsigned long node
,
291 const char *uname
, int depth
,
294 char *type
= of_get_flat_dt_prop(node
, "device_type", NULL
);
300 int found_thread
= 0;
302 /* We are scanning "cpu" nodes only */
303 if (type
== NULL
|| strcmp(type
, "cpu") != 0)
306 /* Get physical cpuid */
307 intserv
= of_get_flat_dt_prop(node
, "ibm,ppc-interrupt-server#s", &len
);
309 nthreads
= len
/ sizeof(int);
311 intserv
= of_get_flat_dt_prop(node
, "reg", NULL
);
316 * Now see if any of these threads match our boot cpu.
317 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
319 for (i
= 0; i
< nthreads
; i
++) {
321 * version 2 of the kexec param format adds the phys cpuid of
324 if (initial_boot_params
->version
>= 2) {
325 if (intserv
[i
] == initial_boot_params
->boot_cpuid_phys
) {
326 found
= boot_cpu_count
;
331 * Check if it's the boot-cpu, set it's hw index now,
332 * unfortunately this format did not support booting
333 * off secondary threads.
335 if (of_get_flat_dt_prop(node
,
336 "linux,boot-cpu", NULL
) != NULL
)
337 found
= boot_cpu_count
;
340 /* logical cpu id is always 0 on UP kernels */
346 DBG("boot cpu: logical %d physical %d\n", found
,
347 intserv
[found_thread
]);
349 set_hard_smp_processor_id(found
, intserv
[found_thread
]);
352 * PAPR defines "logical" PVR values for cpus that
353 * meet various levels of the architecture:
354 * 0x0f000001 Architecture version 2.04
355 * 0x0f000002 Architecture version 2.05
356 * If the cpu-version property in the cpu node contains
357 * such a value, we call identify_cpu again with the
358 * logical PVR value in order to use the cpu feature
359 * bits appropriate for the architecture level.
361 * A POWER6 partition in "POWER6 architected" mode
362 * uses the 0x0f000002 PVR value; in POWER5+ mode
363 * it uses 0x0f000001.
365 prop
= of_get_flat_dt_prop(node
, "cpu-version", NULL
);
366 if (prop
&& (*prop
& 0xff000000) == 0x0f000000)
367 identify_cpu(0, *prop
);
369 identical_pvr_fixup(node
);
372 check_cpu_feature_properties(node
);
373 check_cpu_pa_features(node
);
374 check_cpu_slb_size(node
);
376 #ifdef CONFIG_PPC_PSERIES
378 cur_cpu_spec
->cpu_features
|= CPU_FTR_SMT
;
380 cur_cpu_spec
->cpu_features
&= ~CPU_FTR_SMT
;
386 int __init
early_init_dt_scan_chosen_ppc(unsigned long node
, const char *uname
,
387 int depth
, void *data
)
389 unsigned long *lprop
;
391 /* Use common scan routine to determine if this is the chosen node */
392 if (early_init_dt_scan_chosen(node
, uname
, depth
, data
) == 0)
396 /* check if iommu is forced on or off */
397 if (of_get_flat_dt_prop(node
, "linux,iommu-off", NULL
) != NULL
)
399 if (of_get_flat_dt_prop(node
, "linux,iommu-force-on", NULL
) != NULL
)
403 /* mem=x on the command line is the preferred mechanism */
404 lprop
= of_get_flat_dt_prop(node
, "linux,memory-limit", NULL
);
406 memory_limit
= *lprop
;
409 lprop
= of_get_flat_dt_prop(node
, "linux,tce-alloc-start", NULL
);
411 tce_alloc_start
= *lprop
;
412 lprop
= of_get_flat_dt_prop(node
, "linux,tce-alloc-end", NULL
);
414 tce_alloc_end
= *lprop
;
418 lprop
= of_get_flat_dt_prop(node
, "linux,crashkernel-base", NULL
);
420 crashk_res
.start
= *lprop
;
422 lprop
= of_get_flat_dt_prop(node
, "linux,crashkernel-size", NULL
);
424 crashk_res
.end
= crashk_res
.start
+ *lprop
- 1;
431 #ifdef CONFIG_PPC_PSERIES
433 * Interpret the ibm,dynamic-memory property in the
434 * /ibm,dynamic-reconfiguration-memory node.
435 * This contains a list of memory blocks along with NUMA affinity
438 static int __init
early_init_dt_scan_drconf_memory(unsigned long node
)
440 __be32
*dm
, *ls
, *usm
;
441 unsigned long l
, n
, flags
;
442 u64 base
, size
, memblock_size
;
443 unsigned int is_kexec_kdump
= 0, rngs
;
445 ls
= of_get_flat_dt_prop(node
, "ibm,lmb-size", &l
);
446 if (ls
== NULL
|| l
< dt_root_size_cells
* sizeof(__be32
))
448 memblock_size
= dt_mem_next_cell(dt_root_size_cells
, &ls
);
450 dm
= of_get_flat_dt_prop(node
, "ibm,dynamic-memory", &l
);
451 if (dm
== NULL
|| l
< sizeof(__be32
))
454 n
= *dm
++; /* number of entries */
455 if (l
< (n
* (dt_root_addr_cells
+ 4) + 1) * sizeof(__be32
))
458 /* check if this is a kexec/kdump kernel. */
459 usm
= of_get_flat_dt_prop(node
, "linux,drconf-usable-memory",
464 for (; n
!= 0; --n
) {
465 base
= dt_mem_next_cell(dt_root_addr_cells
, &dm
);
467 /* skip DRC index, pad, assoc. list index, flags */
469 /* skip this block if the reserved bit is set in flags (0x80)
470 or if the block is not assigned to this partition (0x8) */
471 if ((flags
& 0x80) || !(flags
& 0x8))
473 size
= memblock_size
;
475 if (is_kexec_kdump
) {
477 * For each memblock in ibm,dynamic-memory, a corresponding
478 * entry in linux,drconf-usable-memory property contains
479 * a counter 'p' followed by 'p' (base, size) duple.
480 * Now read the counter from
481 * linux,drconf-usable-memory property
483 rngs
= dt_mem_next_cell(dt_root_size_cells
, &usm
);
484 if (!rngs
) /* there are no (base, size) duple */
488 if (is_kexec_kdump
) {
489 base
= dt_mem_next_cell(dt_root_addr_cells
,
491 size
= dt_mem_next_cell(dt_root_size_cells
,
495 if (base
>= 0x80000000ul
)
497 if ((base
+ size
) > 0x80000000ul
)
498 size
= 0x80000000ul
- base
;
500 memblock_add(base
, size
);
507 #define early_init_dt_scan_drconf_memory(node) 0
508 #endif /* CONFIG_PPC_PSERIES */
510 static int __init
early_init_dt_scan_memory_ppc(unsigned long node
,
512 int depth
, void *data
)
515 strcmp(uname
, "ibm,dynamic-reconfiguration-memory") == 0)
516 return early_init_dt_scan_drconf_memory(node
);
518 return early_init_dt_scan_memory(node
, uname
, depth
, data
);
521 void __init
early_init_dt_add_memory_arch(u64 base
, u64 size
)
525 if (base
>= 0x80000000ul
)
527 if ((base
+ size
) > 0x80000000ul
)
528 size
= 0x80000000ul
- base
;
531 /* Keep track of the beginning of memory -and- the size of
532 * the very first block in the device-tree as it represents
533 * the RMA on ppc64 server
535 if (base
< memstart_addr
) {
536 memstart_addr
= base
;
537 first_memblock_size
= size
;
540 /* Add the chunk to the MEMBLOCK list */
541 memblock_add(base
, size
);
544 void * __init
early_init_dt_alloc_memory_arch(u64 size
, u64 align
)
546 return __va(memblock_alloc(size
, align
));
549 #ifdef CONFIG_BLK_DEV_INITRD
550 void __init
early_init_dt_setup_initrd_arch(unsigned long start
,
553 initrd_start
= (unsigned long)__va(start
);
554 initrd_end
= (unsigned long)__va(end
);
555 initrd_below_start_ok
= 1;
559 static void __init
early_reserve_mem(void)
563 unsigned long self_base
;
564 unsigned long self_size
;
566 reserve_map
= (u64
*)(((unsigned long)initial_boot_params
) +
567 initial_boot_params
->off_mem_rsvmap
);
569 /* before we do anything, lets reserve the dt blob */
570 self_base
= __pa((unsigned long)initial_boot_params
);
571 self_size
= initial_boot_params
->totalsize
;
572 memblock_reserve(self_base
, self_size
);
574 #ifdef CONFIG_BLK_DEV_INITRD
575 /* then reserve the initrd, if any */
576 if (initrd_start
&& (initrd_end
> initrd_start
))
577 memblock_reserve(_ALIGN_DOWN(__pa(initrd_start
), PAGE_SIZE
),
578 _ALIGN_UP(initrd_end
, PAGE_SIZE
) -
579 _ALIGN_DOWN(initrd_start
, PAGE_SIZE
));
580 #endif /* CONFIG_BLK_DEV_INITRD */
584 * Handle the case where we might be booting from an old kexec
585 * image that setup the mem_rsvmap as pairs of 32-bit values
587 if (*reserve_map
> 0xffffffffull
) {
588 u32 base_32
, size_32
;
589 u32
*reserve_map_32
= (u32
*)reserve_map
;
592 base_32
= *(reserve_map_32
++);
593 size_32
= *(reserve_map_32
++);
596 /* skip if the reservation is for the blob */
597 if (base_32
== self_base
&& size_32
== self_size
)
599 DBG("reserving: %x -> %x\n", base_32
, size_32
);
600 memblock_reserve(base_32
, size_32
);
606 base
= *(reserve_map
++);
607 size
= *(reserve_map
++);
610 DBG("reserving: %llx -> %llx\n", base
, size
);
611 memblock_reserve(base
, size
);
615 #ifdef CONFIG_PHYP_DUMP
617 * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
619 * Function to find the largest size we need to reserve
620 * during early boot process.
622 * It either looks for boot param and returns that OR
623 * returns larger of 256 or 5% rounded down to multiples of 256MB.
626 static inline unsigned long phyp_dump_calculate_reserve_size(void)
630 if (phyp_dump_info
->reserve_bootvar
)
631 return phyp_dump_info
->reserve_bootvar
;
633 /* divide by 20 to get 5% of value */
634 tmp
= memblock_end_of_DRAM();
637 /* round it down in multiples of 256 */
638 tmp
= tmp
& ~0x0FFFFFFFUL
;
640 return (tmp
> PHYP_DUMP_RMR_END
? tmp
: PHYP_DUMP_RMR_END
);
644 * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
646 * This routine may reserve memory regions in the kernel only
647 * if the system is supported and a dump was taken in last
648 * boot instance or if the hardware is supported and the
649 * scratch area needs to be setup. In other instances it returns
650 * without reserving anything. The memory in case of dump being
651 * active is freed when the dump is collected (by userland tools).
653 static void __init
phyp_dump_reserve_mem(void)
655 unsigned long base
, size
;
656 unsigned long variable_reserve_size
;
658 if (!phyp_dump_info
->phyp_dump_configured
) {
659 printk(KERN_ERR
"Phyp-dump not supported on this hardware\n");
663 if (!phyp_dump_info
->phyp_dump_at_boot
) {
664 printk(KERN_INFO
"Phyp-dump disabled at boot time\n");
668 variable_reserve_size
= phyp_dump_calculate_reserve_size();
670 if (phyp_dump_info
->phyp_dump_is_active
) {
671 /* Reserve *everything* above RMR.Area freed by userland tools*/
672 base
= variable_reserve_size
;
673 size
= memblock_end_of_DRAM() - base
;
675 /* XXX crashed_ram_end is wrong, since it may be beyond
676 * the memory_limit, it will need to be adjusted. */
677 memblock_reserve(base
, size
);
679 phyp_dump_info
->init_reserve_start
= base
;
680 phyp_dump_info
->init_reserve_size
= size
;
682 size
= phyp_dump_info
->cpu_state_size
+
683 phyp_dump_info
->hpte_region_size
+
684 variable_reserve_size
;
685 base
= memblock_end_of_DRAM() - size
;
686 memblock_reserve(base
, size
);
687 phyp_dump_info
->init_reserve_start
= base
;
688 phyp_dump_info
->init_reserve_size
= size
;
692 static inline void __init
phyp_dump_reserve_mem(void) {}
693 #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
695 void __init
early_init_devtree(void *params
)
699 DBG(" -> early_init_devtree(%p)\n", params
);
701 /* Setup flat device-tree pointer */
702 initial_boot_params
= params
;
704 #ifdef CONFIG_PPC_RTAS
705 /* Some machines might need RTAS info for debugging, grab it now. */
706 of_scan_flat_dt(early_init_dt_scan_rtas
, NULL
);
709 #ifdef CONFIG_PHYP_DUMP
710 /* scan tree to see if dump occurred during last boot */
711 of_scan_flat_dt(early_init_dt_scan_phyp_dump
, NULL
);
714 /* Retrieve various informations from the /chosen node of the
715 * device-tree, including the platform type, initrd location and
716 * size, TCE reserve, and more ...
718 of_scan_flat_dt(early_init_dt_scan_chosen_ppc
, cmd_line
);
720 /* Scan memory nodes and rebuild MEMBLOCKs */
723 of_scan_flat_dt(early_init_dt_scan_root
, NULL
);
724 of_scan_flat_dt(early_init_dt_scan_memory_ppc
, NULL
);
725 setup_initial_memory_limit(memstart_addr
, first_memblock_size
);
727 /* Save command line for /proc/cmdline and then parse parameters */
728 strlcpy(boot_command_line
, cmd_line
, COMMAND_LINE_SIZE
);
731 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
732 memblock_reserve(PHYSICAL_START
, __pa(klimit
) - PHYSICAL_START
);
733 /* If relocatable, reserve first 32k for interrupt vectors etc. */
734 if (PHYSICAL_START
> MEMORY_START
)
735 memblock_reserve(MEMORY_START
, 0x8000);
736 reserve_kdump_trampoline();
737 reserve_crashkernel();
739 phyp_dump_reserve_mem();
741 limit
= memory_limit
;
745 /* Ensure that total memory size is page-aligned, because
746 * otherwise mark_bootmem() gets upset. */
748 memsize
= memblock_phys_mem_size();
749 if ((memsize
& PAGE_MASK
) != memsize
)
750 limit
= memsize
& PAGE_MASK
;
752 memblock_enforce_memory_limit(limit
);
757 DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
759 /* We may need to relocate the flat tree, do it now.
760 * FIXME .. and the initrd too? */
765 DBG("Scanning CPUs ...\n");
767 /* Retrieve CPU related informations from the flat tree
768 * (altivec support, boot CPU ID, ...)
770 of_scan_flat_dt(early_init_dt_scan_cpus
, NULL
);
772 DBG(" <- early_init_devtree()\n");
777 * New implementation of the OF "find" APIs, return a refcounted
778 * object, call of_node_put() when done. The device tree and list
779 * are protected by a rw_lock.
781 * Note that property management will need some locking as well,
782 * this isn't dealt with yet.
787 * of_find_next_cache_node - Find a node's subsidiary cache
788 * @np: node of type "cpu" or "cache"
790 * Returns a node pointer with refcount incremented, use
791 * of_node_put() on it when done. Caller should hold a reference
794 struct device_node
*of_find_next_cache_node(struct device_node
*np
)
796 struct device_node
*child
;
797 const phandle
*handle
;
799 handle
= of_get_property(np
, "l2-cache", NULL
);
801 handle
= of_get_property(np
, "next-level-cache", NULL
);
804 return of_find_node_by_phandle(*handle
);
806 /* OF on pmac has nodes instead of properties named "l2-cache"
809 if (!strcmp(np
->type
, "cpu"))
810 for_each_child_of_node(np
, child
)
811 if (!strcmp(child
->type
, "cache"))
817 #ifdef CONFIG_PPC_PSERIES
819 * Fix up the uninitialized fields in a new device node:
820 * name, type and pci-specific fields
823 static int of_finish_dynamic_node(struct device_node
*node
)
825 struct device_node
*parent
= of_get_parent(node
);
827 const phandle
*ibm_phandle
;
829 node
->name
= of_get_property(node
, "name", NULL
);
830 node
->type
= of_get_property(node
, "device_type", NULL
);
833 node
->name
= "<NULL>";
835 node
->type
= "<NULL>";
842 /* We don't support that function on PowerMac, at least
845 if (machine_is(powermac
))
848 /* fix up new node's phandle field */
849 if ((ibm_phandle
= of_get_property(node
, "ibm,phandle", NULL
)))
850 node
->phandle
= *ibm_phandle
;
857 static int prom_reconfig_notifier(struct notifier_block
*nb
,
858 unsigned long action
, void *node
)
863 case PSERIES_RECONFIG_ADD
:
864 err
= of_finish_dynamic_node(node
);
866 printk(KERN_ERR
"finish_node returned %d\n", err
);
877 static struct notifier_block prom_reconfig_nb
= {
878 .notifier_call
= prom_reconfig_notifier
,
879 .priority
= 10, /* This one needs to run first */
882 static int __init
prom_reconfig_setup(void)
884 return pSeries_reconfig_notifier_register(&prom_reconfig_nb
);
886 __initcall(prom_reconfig_setup
);
889 /* Find the device node for a given logical cpu number, also returns the cpu
890 * local thread number (index in ibm,interrupt-server#s) if relevant and
891 * asked for (non NULL)
893 struct device_node
*of_get_cpu_node(int cpu
, unsigned int *thread
)
896 struct device_node
*np
;
898 hardid
= get_hard_smp_processor_id(cpu
);
900 for_each_node_by_type(np
, "cpu") {
902 unsigned int plen
, t
;
904 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
905 * fallback to "reg" property and assume no threads
907 intserv
= of_get_property(np
, "ibm,ppc-interrupt-server#s",
909 if (intserv
== NULL
) {
910 const u32
*reg
= of_get_property(np
, "reg", NULL
);
913 if (*reg
== hardid
) {
920 for (t
= 0; t
< plen
; t
++) {
921 if (hardid
== intserv
[t
]) {
931 EXPORT_SYMBOL(of_get_cpu_node
);
933 #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
934 static struct debugfs_blob_wrapper flat_dt_blob
;
936 static int __init
export_flat_device_tree(void)
940 flat_dt_blob
.data
= initial_boot_params
;
941 flat_dt_blob
.size
= initial_boot_params
->totalsize
;
943 d
= debugfs_create_blob("flat-device-tree", S_IFREG
| S_IRUSR
,
944 powerpc_debugfs_root
, &flat_dt_blob
);
950 __initcall(export_flat_device_tree
);