- Linus: more PageDirty / swapcache handling
[davej-history.git] / arch / mips64 / sgi-ip27 / ip27-init.c
blobfdc3fdf276b4e85859013e8a65d62d747eb8eee4
1 #include <linux/config.h>
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/sched.h>
5 #include <linux/mmzone.h> /* for numnodes */
6 #include <linux/mm.h>
7 #include <asm/pgalloc.h>
8 #include <asm/pgtable.h>
9 #include <asm/sn/types.h>
10 #include <asm/sn/sn0/addrs.h>
11 #include <asm/sn/sn0/hubni.h>
12 #include <asm/sn/sn0/hubio.h>
13 #include <asm/sn/klconfig.h>
14 #include <asm/sn/ioc3.h>
15 #include <asm/mipsregs.h>
16 #include <asm/sn/gda.h>
17 #include <asm/sn/intr.h>
18 #include <asm/current.h>
19 #include <asm/smp.h>
20 #include <asm/processor.h>
21 #include <asm/mmu_context.h>
22 #include <asm/sn/launch.h>
23 #include <asm/sn/sn_private.h>
24 #include <asm/sn/sn0/ip27.h>
25 #include <asm/sn/mapped_kernel.h>
27 #define CPU_NONE (cpuid_t)-1
29 #define CPUMASK_CLRALL(p) (p) = 0
30 #define CPUMASK_SETB(p, bit) (p) |= 1 << (bit)
31 #define CPUMASK_CLRB(p, bit) (p) &= ~(1ULL << (bit))
32 #define CPUMASK_TSTB(p, bit) ((p) & (1ULL << (bit)))
34 #define CNODEMASK_CLRALL(p) (p) = 0
35 #define CNODEMASK_TSTB(p, bit) ((p) & (1ULL << (bit)))
36 #define CNODEMASK_SETB(p, bit) ((p) |= 1ULL << (bit))
38 cpumask_t boot_cpumask;
39 hubreg_t region_mask = 0;
40 static int fine_mode = 0;
41 int maxcpus;
42 static spinlock_t hub_mask_lock = SPIN_LOCK_UNLOCKED;
43 static cnodemask_t hub_init_mask;
44 static atomic_t numstarted = ATOMIC_INIT(1);
45 nasid_t master_nasid = INVALID_NASID;
47 cnodeid_t nasid_to_compact_node[MAX_NASIDS];
48 nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
49 cnodeid_t cpuid_to_compact_node[MAXCPUS];
51 hubreg_t get_region(cnodeid_t cnode)
53 if (fine_mode)
54 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
55 else
56 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
59 static void gen_region_mask(hubreg_t *region_mask, int maxnodes)
61 cnodeid_t cnode;
63 (*region_mask) = 0;
64 for (cnode = 0; cnode < maxnodes; cnode++) {
65 (*region_mask) |= 1ULL << get_region(cnode);
69 int is_fine_dirmode(void)
71 return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
72 >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
75 nasid_t get_actual_nasid(lboard_t *brd)
77 klhub_t *hub;
79 if (!brd)
80 return INVALID_NASID;
82 /* find out if we are a completely disabled brd. */
83 hub = (klhub_t *)find_first_component(brd, KLSTRUCT_HUB);
84 if (!hub)
85 return INVALID_NASID;
86 if (!(hub->hub_info.flags & KLINFO_ENABLE)) /* disabled node brd */
87 return hub->hub_info.physid;
88 else
89 return brd->brd_nasid;
92 int do_cpumask(cnodeid_t cnode, nasid_t nasid, cpumask_t *boot_cpumask,
93 int *highest)
95 lboard_t *brd;
96 klcpu_t *acpu;
97 int cpus_found = 0;
98 cpuid_t cpuid;
100 brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
102 do {
103 acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU);
104 while (acpu) {
105 cpuid = acpu->cpu_info.virtid;
106 /* cnode is not valid for completely disabled brds */
107 if (get_actual_nasid(brd) == brd->brd_nasid)
108 cpuid_to_compact_node[cpuid] = cnode;
109 if (cpuid > *highest)
110 *highest = cpuid;
111 /* Only let it join in if it's marked enabled */
112 if (acpu->cpu_info.flags & KLINFO_ENABLE) {
113 CPUMASK_SETB(*boot_cpumask, cpuid);
114 cpus_found++;
116 acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu,
117 KLSTRUCT_CPU);
119 brd = KLCF_NEXT(brd);
120 if (brd)
121 brd = find_lboard(brd,KLTYPE_IP27);
122 else
123 break;
124 } while (brd);
126 return cpus_found;
129 cpuid_t cpu_node_probe(cpumask_t *boot_cpumask, int *numnodes)
131 int i, cpus = 0, highest = 0;
132 gda_t *gdap = GDA;
133 nasid_t nasid;
136 * Initialize the arrays to invalid nodeid (-1)
138 for (i = 0; i < MAX_COMPACT_NODES; i++)
139 compact_to_nasid_node[i] = INVALID_NASID;
140 for (i = 0; i < MAX_NASIDS; i++)
141 nasid_to_compact_node[i] = INVALID_CNODEID;
142 for (i = 0; i < MAXCPUS; i++)
143 cpuid_to_compact_node[i] = INVALID_CNODEID;
145 *numnodes = 0;
146 for (i = 0; i < MAX_COMPACT_NODES; i++) {
147 if ((nasid = gdap->g_nasidtable[i]) == INVALID_NASID) {
148 break;
149 } else {
150 compact_to_nasid_node[i] = nasid;
151 nasid_to_compact_node[nasid] = i;
152 (*numnodes)++;
153 cpus += do_cpumask(i, nasid, boot_cpumask, &highest);
158 * Cpus are numbered in order of cnodes. Currently, disabled
159 * cpus are not numbered.
162 return(highest + 1);
165 int cpu_enabled(cpuid_t cpu)
167 if (cpu == CPU_NONE)
168 return 0;
169 return (CPUMASK_TSTB(boot_cpumask, cpu) != 0);
172 void mlreset (void)
174 int i;
176 master_nasid = get_nasid();
177 fine_mode = is_fine_dirmode();
180 * Probe for all CPUs - this creates the cpumask and
181 * sets up the mapping tables.
183 CPUMASK_CLRALL(boot_cpumask);
184 maxcpus = cpu_node_probe(&boot_cpumask, &numnodes);
185 printk("Discovered %d cpus on %d nodes\n", maxcpus, numnodes);
187 gen_region_mask(&region_mask, numnodes);
188 CNODEMASK_CLRALL(hub_init_mask);
190 setup_replication_mask(numnodes);
193 * Set all nodes' calias sizes to 8k
195 for (i = 0; i < numnodes; i++) {
196 nasid_t nasid;
198 nasid = COMPACT_TO_NASID_NODEID(i);
201 * Always have node 0 in the region mask, otherwise
202 * CALIAS accesses get exceptions since the hub
203 * thinks it is a node 0 address.
205 REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
206 #ifdef CONFIG_REPLICATE_EXHANDLERS
207 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_8K);
208 #else
209 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
210 #endif
212 #ifdef LATER
214 * Set up all hubs to have a big window pointing at
215 * widget 0. Memory mode, widget 0, offset 0
217 REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
218 ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
219 (0 << IIO_ITTE_WIDGET_SHIFT)));
220 #endif
225 void intr_clear_bits(nasid_t nasid, volatile hubreg_t *pend, int base_level,
226 char *name)
228 volatile hubreg_t bits;
229 int i;
231 /* Check pending interrupts */
232 if ((bits = HUB_L(pend)) != 0)
233 for (i = 0; i < N_INTPEND_BITS; i++)
234 if (bits & (1 << i))
235 LOCAL_HUB_CLR_INTR(base_level + i);
238 void intr_clear_all(nasid_t nasid)
240 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, 0);
241 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, 0);
242 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, 0);
243 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, 0);
244 intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND0),
245 INT_PEND0_BASELVL, "INT_PEND0");
246 intr_clear_bits(nasid, REMOTE_HUB_ADDR(nasid, PI_INT_PEND1),
247 INT_PEND1_BASELVL, "INT_PEND1");
250 void sn_mp_setup(void)
252 cnodeid_t cnode;
253 #if 0
254 cpuid_t cpu;
255 #endif
257 for (cnode = 0; cnode < numnodes; cnode++) {
258 #if 0
259 init_platform_nodepda();
260 #endif
261 intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
263 #if 0
264 for (cpu = 0; cpu < maxcpus; cpu++) {
265 init_platform_pda();
267 #endif
270 void per_hub_init(cnodeid_t cnode)
272 extern void pcibr_setup(cnodeid_t);
273 cnodemask_t done;
274 nasid_t nasid;
276 nasid = COMPACT_TO_NASID_NODEID(cnode);
278 spin_lock(&hub_mask_lock);
279 /* Test our bit. */
280 if (!(done = CNODEMASK_TSTB(hub_init_mask, cnode))) {
281 /* Turn our bit on in the mask. */
282 CNODEMASK_SETB(hub_init_mask, cnode);
284 * Do the actual initialization if it hasn't been done yet.
285 * We don't need to hold a lock for this work.
288 * Set CRB timeout at 5ms, (< PI timeout of 10ms)
290 REMOTE_HUB_S(nasid, IIO_ICTP, 0x800);
291 REMOTE_HUB_S(nasid, IIO_ICTO, 0xff);
292 hub_rtc_init(cnode);
293 pcibr_setup(cnode);
294 #ifdef CONFIG_REPLICATE_EXHANDLERS
296 * If this is not a headless node initialization,
297 * copy over the caliased exception handlers.
299 if (get_compact_nodeid() == cnode) {
300 extern char except_vec0, except_vec1_r10k;
301 extern char except_vec2_generic, except_vec3_generic;
303 memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic,
304 0x80);
305 memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic,
306 0x80);
307 memcpy((void *)KSEG0, &except_vec0, 0x80);
308 memcpy((void *)KSEG0 + 0x080, &except_vec1_r10k, 0x80);
309 memcpy((void *)(KSEG0 + 0x100), (void *) KSEG0, 0x80);
310 memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic,
311 0x100);
312 flush_cache_l1();
313 flush_cache_l2();
315 #endif
317 spin_unlock(&hub_mask_lock);
321 * This is similar to hard_smp_processor_id().
323 cpuid_t getcpuid(void)
325 klcpu_t *klcpu;
327 klcpu = nasid_slice_to_cpuinfo(get_nasid(),LOCAL_HUB_L(PI_CPU_NUM));
328 return klcpu->cpu_info.virtid;
331 void per_cpu_init(void)
333 extern void install_cpu_nmi_handler(int slice);
334 extern void load_mmu(void);
335 static int is_slave = 0;
336 int cpu = smp_processor_id();
337 cnodeid_t cnode = get_compact_nodeid();
339 current_cpu_data.asid_cache = ASID_FIRST_VERSION;
340 TLBMISS_HANDLER_SETUP();
341 #if 0
342 intr_init();
343 #endif
344 set_cp0_status(ST0_IM, 0);
345 per_hub_init(cnode);
346 cpu_time_init();
347 if (smp_processor_id()) /* master can't do this early, no kmalloc */
348 install_cpuintr(cpu);
349 /* Install our NMI handler if symmon hasn't installed one. */
350 install_cpu_nmi_handler(cputoslice(cpu));
351 #if 0
352 install_tlbintr(cpu);
353 #endif
354 set_cp0_status(SRB_DEV0 | SRB_DEV1, SRB_DEV0 | SRB_DEV1);
355 if (is_slave) {
356 set_cp0_status(ST0_BEV, 0);
357 if (mips4_available)
358 set_cp0_status(ST0_XX, ST0_XX);
359 set_cp0_status(ST0_KX|ST0_SX|ST0_UX, ST0_KX|ST0_SX|ST0_UX);
360 sti();
361 load_mmu();
362 atomic_inc(&numstarted);
363 } else {
364 is_slave = 1;
368 cnodeid_t get_compact_nodeid(void)
370 nasid_t nasid;
372 nasid = get_nasid();
374 * Map the physical node id to a virtual node id (virtual node ids
375 * are contiguous).
377 return NASID_TO_COMPACT_NODEID(nasid);
380 #ifdef CONFIG_SMP
383 * Takes as first input the PROM assigned cpu id, and the kernel
384 * assigned cpu id as the second.
386 static void alloc_cpupda(cpuid_t cpu, int cpunum)
388 cnodeid_t node;
389 nasid_t nasid;
391 node = get_cpu_cnode(cpu);
392 nasid = COMPACT_TO_NASID_NODEID(node);
394 cputonasid(cpunum) = nasid;
395 cputocnode(cpunum) = node;
396 cputoslice(cpunum) = get_cpu_slice(cpu);
397 cpu_data[cpunum].p_cpuid = cpu;
400 void __init smp_callin(void)
402 #if 0
403 calibrate_delay();
404 smp_store_cpu_info(cpuid);
405 #endif
408 int __init start_secondary(void)
410 extern int cpu_idle(void);
411 extern atomic_t smp_commenced;
413 smp_callin();
414 while (!atomic_read(&smp_commenced));
415 return cpu_idle();
418 static volatile cpumask_t boot_barrier;
420 void cboot(void)
422 CPUMASK_CLRB(boot_barrier, getcpuid()); /* needs atomicity */
423 per_cpu_init();
424 #if 0
425 ecc_init();
426 bte_lateinit();
427 init_mfhi_war();
428 #endif
429 _flush_tlb_all();
430 flush_cache_l1();
431 flush_cache_l2();
432 start_secondary();
435 void allowboot(void)
437 int num_cpus = 0;
438 cpuid_t cpu, mycpuid = getcpuid();
439 cnodeid_t cnode;
440 extern void bootstrap(void);
442 sn_mp_setup();
443 /* Master has already done per_cpu_init() */
444 install_cpuintr(smp_processor_id());
445 #if 0
446 bte_lateinit();
447 ecc_init();
448 #endif
450 replicate_kernel_text(numnodes);
451 boot_barrier = boot_cpumask;
452 /* Launch slaves. */
453 for (cpu = 0; cpu < maxcpus; cpu++) {
454 if (cpu == mycpuid) {
455 alloc_cpupda(cpu, num_cpus);
456 num_cpus++;
457 /* We're already started, clear our bit */
458 CPUMASK_CLRB(boot_barrier, cpu);
459 continue;
462 /* Skip holes in CPU space */
463 if (CPUMASK_TSTB(boot_cpumask, cpu)) {
464 struct task_struct *p;
467 * The following code is purely to make sure
468 * Linux can schedule processes on this slave.
470 kernel_thread(0, NULL, CLONE_PID);
471 p = init_task.prev_task;
472 sprintf(p->comm, "%s%d", "Idle", num_cpus);
473 init_tasks[num_cpus] = p;
474 alloc_cpupda(cpu, num_cpus);
475 p->processor = num_cpus;
476 p->has_cpu = 1; /* we schedule the first task manually */
477 del_from_runqueue(p);
478 unhash_process(p);
479 /* Attach to the address space of init_task. */
480 atomic_inc(&init_mm.mm_count);
481 p->active_mm = &init_mm;
484 * Launch a slave into bootstrap().
485 * It doesn't take an argument, and we
486 * set sp to the kernel stack of the newly
487 * created idle process, gp to the proc struct
488 * (so that current-> works).
490 LAUNCH_SLAVE(cputonasid(num_cpus),cputoslice(num_cpus),
491 (launch_proc_t)MAPPED_KERN_RW_TO_K0(bootstrap),
492 0, (void *)((unsigned long)p +
493 KERNEL_STACK_SIZE - 32), (void *)p);
496 * Now optimistically set the mapping arrays. We
497 * need to wait here, verify the cpu booted up, then
498 * fire up the next cpu.
500 __cpu_number_map[cpu] = num_cpus;
501 __cpu_logical_map[num_cpus] = cpu;
502 num_cpus++;
504 * Wait this cpu to start up and initialize its hub,
505 * and discover the io devices it will control.
507 * XXX: We really want to fire up launch all the CPUs
508 * at once. We have to preserve the order of the
509 * devices on the bridges first though.
511 while(atomic_read(&numstarted) != num_cpus);
516 #ifdef LATER
517 Wait logic goes here.
518 #endif
519 for (cnode = 0; cnode < numnodes; cnode++) {
520 #if 0
521 if (cnodetocpu(cnode) == -1) {
522 printk("Initializing headless hub,cnode %d", cnode);
523 per_hub_init(cnode);
525 #endif
527 #if 0
528 cpu_io_setup();
529 init_mfhi_war();
530 #endif
531 smp_num_cpus = num_cpus;
534 #else /* CONFIG_SMP */
535 void cboot(void) {}
536 #endif /* CONFIG_SMP */