1 #include <linux/config.h>
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/sched.h>
5 #include <linux/mmzone.h> /* for numnodes */
7 #include <asm/pgalloc.h>
8 #include <asm/pgtable.h>
9 #include <asm/sn/types.h>
10 #include <asm/sn/sn0/addrs.h>
11 #include <asm/sn/sn0/hubni.h>
12 #include <asm/sn/sn0/hubio.h>
13 #include <asm/sn/klconfig.h>
14 #include <asm/sn/ioc3.h>
15 #include <asm/mipsregs.h>
16 #include <asm/sn/gda.h>
17 #include <asm/sn/intr.h>
18 #include <asm/current.h>
20 #include <asm/processor.h>
21 #include <asm/mmu_context.h>
22 #include <asm/sn/launch.h>
23 #include <asm/sn/sn_private.h>
24 #include <asm/sn/sn0/ip27.h>
25 #include <asm/sn/mapped_kernel.h>
27 #define CPU_NONE (cpuid_t)-1
29 #define CPUMASK_CLRALL(p) (p) = 0
30 #define CPUMASK_SETB(p, bit) (p) |= 1 << (bit)
31 #define CPUMASK_CLRB(p, bit) (p) &= ~(1ULL << (bit))
32 #define CPUMASK_TSTB(p, bit) ((p) & (1ULL << (bit)))
34 #define CNODEMASK_CLRALL(p) (p) = 0
35 #define CNODEMASK_TSTB(p, bit) ((p) & (1ULL << (bit)))
36 #define CNODEMASK_SETB(p, bit) ((p) |= 1ULL << (bit))
38 cpumask_t boot_cpumask
;
39 hubreg_t region_mask
= 0;
40 static int fine_mode
= 0;
42 static spinlock_t hub_mask_lock
= SPIN_LOCK_UNLOCKED
;
43 static cnodemask_t hub_init_mask
;
44 static atomic_t numstarted
= ATOMIC_INIT(1);
45 nasid_t master_nasid
= INVALID_NASID
;
47 cnodeid_t nasid_to_compact_node
[MAX_NASIDS
];
48 nasid_t compact_to_nasid_node
[MAX_COMPACT_NODES
];
49 cnodeid_t cpuid_to_compact_node
[MAXCPUS
];
51 hubreg_t
get_region(cnodeid_t cnode
)
54 return COMPACT_TO_NASID_NODEID(cnode
) >> NASID_TO_FINEREG_SHFT
;
56 return COMPACT_TO_NASID_NODEID(cnode
) >> NASID_TO_COARSEREG_SHFT
;
59 static void gen_region_mask(hubreg_t
*region_mask
, int maxnodes
)
64 for (cnode
= 0; cnode
< maxnodes
; cnode
++) {
65 (*region_mask
) |= 1ULL << get_region(cnode
);
69 int is_fine_dirmode(void)
71 return (((LOCAL_HUB_L(NI_STATUS_REV_ID
) & NSRI_REGIONSIZE_MASK
)
72 >> NSRI_REGIONSIZE_SHFT
) & REGIONSIZE_FINE
);
75 nasid_t
get_actual_nasid(lboard_t
*brd
)
82 /* find out if we are a completely disabled brd. */
83 hub
= (klhub_t
*)find_first_component(brd
, KLSTRUCT_HUB
);
86 if (!(hub
->hub_info
.flags
& KLINFO_ENABLE
)) /* disabled node brd */
87 return hub
->hub_info
.physid
;
89 return brd
->brd_nasid
;
92 int do_cpumask(cnodeid_t cnode
, nasid_t nasid
, cpumask_t
*boot_cpumask
,
100 brd
= find_lboard((lboard_t
*)KL_CONFIG_INFO(nasid
), KLTYPE_IP27
);
103 acpu
= (klcpu_t
*)find_first_component(brd
, KLSTRUCT_CPU
);
105 cpuid
= acpu
->cpu_info
.virtid
;
106 /* cnode is not valid for completely disabled brds */
107 if (get_actual_nasid(brd
) == brd
->brd_nasid
)
108 cpuid_to_compact_node
[cpuid
] = cnode
;
109 if (cpuid
> *highest
)
111 /* Only let it join in if it's marked enabled */
112 if (acpu
->cpu_info
.flags
& KLINFO_ENABLE
) {
113 CPUMASK_SETB(*boot_cpumask
, cpuid
);
116 acpu
= (klcpu_t
*)find_component(brd
, (klinfo_t
*)acpu
,
119 brd
= KLCF_NEXT(brd
);
121 brd
= find_lboard(brd
,KLTYPE_IP27
);
129 cpuid_t
cpu_node_probe(cpumask_t
*boot_cpumask
, int *numnodes
)
131 int i
, cpus
= 0, highest
= 0;
136 * Initialize the arrays to invalid nodeid (-1)
138 for (i
= 0; i
< MAX_COMPACT_NODES
; i
++)
139 compact_to_nasid_node
[i
] = INVALID_NASID
;
140 for (i
= 0; i
< MAX_NASIDS
; i
++)
141 nasid_to_compact_node
[i
] = INVALID_CNODEID
;
142 for (i
= 0; i
< MAXCPUS
; i
++)
143 cpuid_to_compact_node
[i
] = INVALID_CNODEID
;
146 for (i
= 0; i
< MAX_COMPACT_NODES
; i
++) {
147 if ((nasid
= gdap
->g_nasidtable
[i
]) == INVALID_NASID
) {
150 compact_to_nasid_node
[i
] = nasid
;
151 nasid_to_compact_node
[nasid
] = i
;
153 cpus
+= do_cpumask(i
, nasid
, boot_cpumask
, &highest
);
158 * Cpus are numbered in order of cnodes. Currently, disabled
159 * cpus are not numbered.
165 int cpu_enabled(cpuid_t cpu
)
169 return (CPUMASK_TSTB(boot_cpumask
, cpu
) != 0);
176 master_nasid
= get_nasid();
177 fine_mode
= is_fine_dirmode();
180 * Probe for all CPUs - this creates the cpumask and
181 * sets up the mapping tables.
183 CPUMASK_CLRALL(boot_cpumask
);
184 maxcpus
= cpu_node_probe(&boot_cpumask
, &numnodes
);
185 printk("Discovered %d cpus on %d nodes\n", maxcpus
, numnodes
);
187 gen_region_mask(®ion_mask
, numnodes
);
188 CNODEMASK_CLRALL(hub_init_mask
);
190 setup_replication_mask(numnodes
);
193 * Set all nodes' calias sizes to 8k
195 for (i
= 0; i
< numnodes
; i
++) {
198 nasid
= COMPACT_TO_NASID_NODEID(i
);
201 * Always have node 0 in the region mask, otherwise
202 * CALIAS accesses get exceptions since the hub
203 * thinks it is a node 0 address.
205 REMOTE_HUB_S(nasid
, PI_REGION_PRESENT
, (region_mask
| 1));
206 #ifdef CONFIG_REPLICATE_EXHANDLERS
207 REMOTE_HUB_S(nasid
, PI_CALIAS_SIZE
, PI_CALIAS_SIZE_8K
);
209 REMOTE_HUB_S(nasid
, PI_CALIAS_SIZE
, PI_CALIAS_SIZE_0
);
214 * Set up all hubs to have a big window pointing at
215 * widget 0. Memory mode, widget 0, offset 0
217 REMOTE_HUB_S(nasid
, IIO_ITTE(SWIN0_BIGWIN
),
218 ((HUB_PIO_MAP_TO_MEM
<< IIO_ITTE_IOSP_SHIFT
) |
219 (0 << IIO_ITTE_WIDGET_SHIFT
)));
225 void intr_clear_bits(nasid_t nasid
, volatile hubreg_t
*pend
, int base_level
,
228 volatile hubreg_t bits
;
231 /* Check pending interrupts */
232 if ((bits
= HUB_L(pend
)) != 0)
233 for (i
= 0; i
< N_INTPEND_BITS
; i
++)
235 LOCAL_HUB_CLR_INTR(base_level
+ i
);
238 void intr_clear_all(nasid_t nasid
)
240 REMOTE_HUB_S(nasid
, PI_INT_MASK0_A
, 0);
241 REMOTE_HUB_S(nasid
, PI_INT_MASK0_B
, 0);
242 REMOTE_HUB_S(nasid
, PI_INT_MASK1_A
, 0);
243 REMOTE_HUB_S(nasid
, PI_INT_MASK1_B
, 0);
244 intr_clear_bits(nasid
, REMOTE_HUB_ADDR(nasid
, PI_INT_PEND0
),
245 INT_PEND0_BASELVL
, "INT_PEND0");
246 intr_clear_bits(nasid
, REMOTE_HUB_ADDR(nasid
, PI_INT_PEND1
),
247 INT_PEND1_BASELVL
, "INT_PEND1");
250 void sn_mp_setup(void)
257 for (cnode
= 0; cnode
< numnodes
; cnode
++) {
259 init_platform_nodepda();
261 intr_clear_all(COMPACT_TO_NASID_NODEID(cnode
));
264 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
270 void per_hub_init(cnodeid_t cnode
)
272 extern void pcibr_setup(cnodeid_t
);
276 nasid
= COMPACT_TO_NASID_NODEID(cnode
);
278 spin_lock(&hub_mask_lock
);
280 if (!(done
= CNODEMASK_TSTB(hub_init_mask
, cnode
))) {
281 /* Turn our bit on in the mask. */
282 CNODEMASK_SETB(hub_init_mask
, cnode
);
284 * Do the actual initialization if it hasn't been done yet.
285 * We don't need to hold a lock for this work.
288 * Set CRB timeout at 5ms, (< PI timeout of 10ms)
290 REMOTE_HUB_S(nasid
, IIO_ICTP
, 0x800);
291 REMOTE_HUB_S(nasid
, IIO_ICTO
, 0xff);
294 #ifdef CONFIG_REPLICATE_EXHANDLERS
296 * If this is not a headless node initialization,
297 * copy over the caliased exception handlers.
299 if (get_compact_nodeid() == cnode
) {
300 extern char except_vec0
, except_vec1_r10k
;
301 extern char except_vec2_generic
, except_vec3_generic
;
303 memcpy((void *)(KSEG0
+ 0x100), &except_vec2_generic
,
305 memcpy((void *)(KSEG0
+ 0x180), &except_vec3_generic
,
307 memcpy((void *)KSEG0
, &except_vec0
, 0x80);
308 memcpy((void *)KSEG0
+ 0x080, &except_vec1_r10k
, 0x80);
309 memcpy((void *)(KSEG0
+ 0x100), (void *) KSEG0
, 0x80);
310 memcpy((void *)(KSEG0
+ 0x180), &except_vec3_generic
,
317 spin_unlock(&hub_mask_lock
);
321 * This is similar to hard_smp_processor_id().
323 cpuid_t
getcpuid(void)
327 klcpu
= nasid_slice_to_cpuinfo(get_nasid(),LOCAL_HUB_L(PI_CPU_NUM
));
328 return klcpu
->cpu_info
.virtid
;
331 void per_cpu_init(void)
333 extern void install_cpu_nmi_handler(int slice
);
334 extern void load_mmu(void);
335 static int is_slave
= 0;
336 int cpu
= smp_processor_id();
337 cnodeid_t cnode
= get_compact_nodeid();
339 current_cpu_data
.asid_cache
= ASID_FIRST_VERSION
;
340 TLBMISS_HANDLER_SETUP();
344 set_cp0_status(ST0_IM
, 0);
347 if (smp_processor_id()) /* master can't do this early, no kmalloc */
348 install_cpuintr(cpu
);
349 /* Install our NMI handler if symmon hasn't installed one. */
350 install_cpu_nmi_handler(cputoslice(cpu
));
352 install_tlbintr(cpu
);
354 set_cp0_status(SRB_DEV0
| SRB_DEV1
, SRB_DEV0
| SRB_DEV1
);
356 set_cp0_status(ST0_BEV
, 0);
358 set_cp0_status(ST0_XX
, ST0_XX
);
359 set_cp0_status(ST0_KX
|ST0_SX
|ST0_UX
, ST0_KX
|ST0_SX
|ST0_UX
);
362 atomic_inc(&numstarted
);
368 cnodeid_t
get_compact_nodeid(void)
374 * Map the physical node id to a virtual node id (virtual node ids
377 return NASID_TO_COMPACT_NODEID(nasid
);
383 * Takes as first input the PROM assigned cpu id, and the kernel
384 * assigned cpu id as the second.
386 static void alloc_cpupda(cpuid_t cpu
, int cpunum
)
391 node
= get_cpu_cnode(cpu
);
392 nasid
= COMPACT_TO_NASID_NODEID(node
);
394 cputonasid(cpunum
) = nasid
;
395 cputocnode(cpunum
) = node
;
396 cputoslice(cpunum
) = get_cpu_slice(cpu
);
397 cpu_data
[cpunum
].p_cpuid
= cpu
;
400 void __init
smp_callin(void)
404 smp_store_cpu_info(cpuid
);
408 int __init
start_secondary(void)
410 extern int cpu_idle(void);
411 extern atomic_t smp_commenced
;
414 while (!atomic_read(&smp_commenced
));
418 static volatile cpumask_t boot_barrier
;
422 CPUMASK_CLRB(boot_barrier
, getcpuid()); /* needs atomicity */
438 cpuid_t cpu
, mycpuid
= getcpuid();
440 extern void bootstrap(void);
443 /* Master has already done per_cpu_init() */
444 install_cpuintr(smp_processor_id());
450 replicate_kernel_text(numnodes
);
451 boot_barrier
= boot_cpumask
;
453 for (cpu
= 0; cpu
< maxcpus
; cpu
++) {
454 if (cpu
== mycpuid
) {
455 alloc_cpupda(cpu
, num_cpus
);
457 /* We're already started, clear our bit */
458 CPUMASK_CLRB(boot_barrier
, cpu
);
462 /* Skip holes in CPU space */
463 if (CPUMASK_TSTB(boot_cpumask
, cpu
)) {
464 struct task_struct
*p
;
467 * The following code is purely to make sure
468 * Linux can schedule processes on this slave.
470 kernel_thread(0, NULL
, CLONE_PID
);
471 p
= init_task
.prev_task
;
472 sprintf(p
->comm
, "%s%d", "Idle", num_cpus
);
473 init_tasks
[num_cpus
] = p
;
474 alloc_cpupda(cpu
, num_cpus
);
475 p
->processor
= num_cpus
;
476 p
->has_cpu
= 1; /* we schedule the first task manually */
477 del_from_runqueue(p
);
479 /* Attach to the address space of init_task. */
480 atomic_inc(&init_mm
.mm_count
);
481 p
->active_mm
= &init_mm
;
484 * Launch a slave into bootstrap().
485 * It doesn't take an argument, and we
486 * set sp to the kernel stack of the newly
487 * created idle process, gp to the proc struct
488 * (so that current-> works).
490 LAUNCH_SLAVE(cputonasid(num_cpus
),cputoslice(num_cpus
),
491 (launch_proc_t
)MAPPED_KERN_RW_TO_K0(bootstrap
),
492 0, (void *)((unsigned long)p
+
493 KERNEL_STACK_SIZE
- 32), (void *)p
);
496 * Now optimistically set the mapping arrays. We
497 * need to wait here, verify the cpu booted up, then
498 * fire up the next cpu.
500 __cpu_number_map
[cpu
] = num_cpus
;
501 __cpu_logical_map
[num_cpus
] = cpu
;
504 * Wait this cpu to start up and initialize its hub,
505 * and discover the io devices it will control.
507 * XXX: We really want to fire up launch all the CPUs
508 * at once. We have to preserve the order of the
509 * devices on the bridges first though.
511 while(atomic_read(&numstarted
) != num_cpus
);
517 Wait logic goes here
.
519 for (cnode
= 0; cnode
< numnodes
; cnode
++) {
521 if (cnodetocpu(cnode
) == -1) {
522 printk("Initializing headless hub,cnode %d", cnode
);
531 smp_num_cpus
= num_cpus
;
534 #else /* CONFIG_SMP */
536 #endif /* CONFIG_SMP */