Import 2.3.18pre1
[davej-history.git] / arch / ppc / kernel / smp.c
blob3d2fb057f22f5d2936fdd671104cc81a79a569dc
1 /*
2 * $Id: smp.c,v 1.62 1999/09/05 11:56:34 paulus Exp $
4 * Smp support for ppc.
6 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
7 * deal of code from the sparc and intel versions.
9 * Support for PReP (Motorola MTX/MVME) SMP by Troy Benjegerdes
10 * (troy@microux.com, hozer@drgw.net)
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/smp.h>
16 #include <linux/smp_lock.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/delay.h>
20 #define __KERNEL_SYSCALLS__
21 #include <linux/unistd.h>
22 #include <linux/init.h>
23 #include <linux/openpic.h>
24 #include <linux/spinlock.h>
26 #include <asm/ptrace.h>
27 #include <asm/atomic.h>
28 #include <asm/irq.h>
29 #include <asm/page.h>
30 #include <asm/pgtable.h>
31 #include <asm/hardirq.h>
32 #include <asm/softirq.h>
33 #include <asm/init.h>
34 #include <asm/io.h>
35 #include <asm/prom.h>
36 #include <asm/smp.h>
38 #include "time.h"
39 int first_cpu_booted = 0;
40 int smp_threads_ready = 0;
41 volatile int smp_commenced = 0;
42 int smp_num_cpus = 1;
43 struct cpuinfo_PPC cpu_data[NR_CPUS];
44 struct klock_info_struct klock_info = { KLOCK_CLEAR, 0 };
45 volatile unsigned char active_kernel_processor = NO_PROC_ID; /* Processor holding kernel spinlock */
46 volatile unsigned long ipi_count;
47 spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
48 unsigned int prof_multiplier[NR_CPUS];
49 unsigned int prof_counter[NR_CPUS];
50 cycles_t cacheflush_time;
52 /* all cpu mappings are 1-1 -- Cort */
53 int cpu_number_map[NR_CPUS] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,};
54 volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
56 int start_secondary(void *);
57 extern int cpu_idle(void *unused);
58 u_int openpic_read(volatile u_int *addr);
60 /* register for interrupting the secondary processor on the powersurge */
61 #define PSURGE_INTR ((volatile unsigned *)0xf80000c0)
63 void smp_local_timer_interrupt(struct pt_regs * regs)
65 int cpu = smp_processor_id();
66 extern void update_one_process(struct task_struct *,unsigned long,
67 unsigned long,unsigned long,int);
68 if (!--prof_counter[cpu]) {
69 int user=0,system=0;
70 struct task_struct * p = current;
73 * After doing the above, we need to make like
74 * a normal interrupt - otherwise timer interrupts
75 * ignore the global interrupt lock, which is the
76 * WrongThing (tm) to do.
79 if (user_mode(regs))
80 user=1;
81 else
82 system=1;
84 if (p->pid) {
85 update_one_process(p, 1, user, system, cpu);
87 p->counter -= 1;
88 if (p->counter <= 0) {
89 p->counter = 0;
90 current->need_resched = 1;
92 if (p->priority < DEF_PRIORITY) {
93 kstat.cpu_nice += user;
94 kstat.per_cpu_nice[cpu] += user;
95 } else {
96 kstat.cpu_user += user;
97 kstat.per_cpu_user[cpu] += user;
100 kstat.cpu_system += system;
101 kstat.per_cpu_system[cpu] += system;
104 prof_counter[cpu]=prof_multiplier[cpu];
109 * Dirty hack to get smp message passing working.
111 * As it is now, if we're sending two message at the same time
112 * we have race conditions. The PowerSurge doesn't easily
113 * allow us to send IPI messages so we put the messages in
114 * smp_message[].
116 * This is because don't have several IPI's on the PowerSurge even though
117 * we do on the chrp. It would be nice to use the actual IPI's on the chrp
118 * rather than this but having two methods of doing IPI isn't a good idea
119 * right now.
120 * -- Cort
122 int smp_message[NR_CPUS];
123 void smp_message_recv(void)
125 int msg = smp_message[smp_processor_id()];
127 if ( _machine == _MACH_Pmac )
129 /* clear interrupt */
130 out_be32(PSURGE_INTR, ~0);
133 /* make sure msg is for us */
134 if ( msg == -1 ) return;
136 ipi_count++;
138 switch( msg )
140 case MSG_STOP_CPU:
141 __cli();
142 while (1) ;
143 break;
144 case MSG_RESCHEDULE:
145 current->need_resched = 1;
146 break;
147 case 0xf0f0: /* syncing time bases - just return */
148 break;
149 default:
150 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
151 smp_processor_id(), msg);
152 break;
154 /* reset message */
155 smp_message[smp_processor_id()] = -1;
158 void smp_send_reschedule(int cpu)
160 /* This is only used if `cpu' is running an idle task,
161 so it will reschedule itself anyway... */
162 /*smp_message_pass(cpu, MSG_RESCHEDULE, 0, 0);*/
165 void smp_send_stop(void)
167 smp_message_pass(MSG_ALL_BUT_SELF, MSG_STOP_CPU, 0, 0);
170 spinlock_t mesg_pass_lock = SPIN_LOCK_UNLOCKED;
171 void smp_message_pass(int target, int msg, unsigned long data, int wait)
173 int i;
174 if ( !(_machine & (_MACH_Pmac|_MACH_chrp)) )
175 return;
177 spin_lock(&mesg_pass_lock);
180 * We assume here that the msg is not -1. If it is,
181 * the recipient won't know the message was destined
182 * for it. -- Cort
185 switch( target )
187 case MSG_ALL:
188 smp_message[smp_processor_id()] = msg;
189 /* fall through */
190 case MSG_ALL_BUT_SELF:
191 for ( i = 0 ; i < smp_num_cpus ; i++ )
192 if ( i != smp_processor_id () )
193 smp_message[i] = msg;
194 break;
195 default:
196 smp_message[target] = msg;
197 break;
200 if ( _machine == _MACH_Pmac )
202 /* interrupt secondary processor */
203 out_be32(PSURGE_INTR, ~0);
204 out_be32(PSURGE_INTR, 0);
206 * Assume for now that the secondary doesn't send
207 * IPI's -- Cort
209 /* interrupt primary */
210 /**(volatile unsigned long *)(0xf3019000);*/
213 if ( _machine == _MACH_chrp )
216 * There has to be some way of doing this better -
217 * perhaps a sent-to-all or send-to-all-but-self
218 * in the openpic. This gets us going for now, though.
219 * -- Cort
221 switch ( target )
223 case MSG_ALL:
224 for ( i = 0 ; i < smp_num_cpus ; i++ )
225 openpic_cause_IPI(i, 0, 0xffffffff );
226 break;
227 case MSG_ALL_BUT_SELF:
228 for ( i = 0 ; i < smp_num_cpus ; i++ )
229 if ( i != smp_processor_id () )
230 openpic_cause_IPI(i, 0,
231 0xffffffff & ~(1 << smp_processor_id()));
232 break;
233 default:
234 openpic_cause_IPI(target, 0, 1U << target);
235 break;
239 spin_unlock(&mesg_pass_lock);
242 void __init smp_boot_cpus(void)
244 extern struct task_struct *current_set[NR_CPUS];
245 extern unsigned long smp_chrp_cpu_nr;
246 extern void __secondary_start_psurge(void);
247 extern void __secondary_start_chrp(void);
248 int i, cpu_nr;
249 struct task_struct *p;
250 unsigned long a;
252 printk("Entering SMP Mode...\n");
253 /* let other processors know to not do certain initialization */
254 first_cpu_booted = 1;
255 smp_num_cpus = 1;
256 smp_store_cpu_info(0);
259 * assume for now that the first cpu booted is
260 * cpu 0, the master -- Cort
262 cpu_callin_map[0] = 1;
263 active_kernel_processor = 0;
264 current->processor = 0;
266 init_idle();
268 for (i = 0; i < NR_CPUS; i++) {
269 prof_counter[i] = 1;
270 prof_multiplier[i] = 1;
274 * XXX very rough, assumes 20 bus cycles to read a cache line,
275 * timebase increments every 4 bus cycles, 32kB L1 data cache.
277 cacheflush_time = 5 * 1024;
279 if ( !(_machine & (_MACH_Pmac|_MACH_chrp)) )
281 printk("SMP not supported on this machine.\n");
282 return;
285 switch ( _machine )
287 case _MACH_Pmac:
288 /* assume powersurge board - 2 processors -- Cort */
289 cpu_nr = 2;
290 break;
291 case _MACH_chrp:
292 /* openpic doesn't report # of cpus, just # possible -- Cort */
293 #if 0
294 cpu_nr = ((openpic_read(&OpenPIC->Global.Feature_Reporting0)
295 & OPENPIC_FEATURE_LAST_PROCESSOR_MASK) >>
296 OPENPIC_FEATURE_LAST_PROCESSOR_SHIFT)+1;
297 #endif
298 cpu_nr = smp_chrp_cpu_nr;
299 break;
303 * only check for cpus we know exist. We keep the callin map
304 * with cpus at the bottom -- Cort
306 for ( i = 1 ; i < cpu_nr; i++ )
308 int c;
309 struct pt_regs regs;
310 struct task_struct *idle;
312 /* create a process for the processor */
313 /* we don't care about the values in regs since we'll
314 never reschedule the forked task. */
315 if (do_fork(CLONE_VM|CLONE_PID, 0, &regs) < 0)
316 panic("failed fork for CPU %d", i);
317 p = init_task.prev_task;
318 if (!p)
319 panic("No idle task for CPU %d", i);
320 del_from_runqueue(p);
321 unhash_process(p);
322 init_tasks[i] = p;
324 p->processor = i;
325 p->has_cpu = 1;
326 current_set[i] = p;
328 /* need to flush here since secondary bats aren't setup */
329 for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
330 asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
331 asm volatile("sync");
333 /* wake up cpus */
334 switch ( _machine )
336 case _MACH_Pmac:
337 /* setup entry point of secondary processor */
338 *(volatile unsigned long *)(0xf2800000) =
339 (unsigned long)__secondary_start_psurge-KERNELBASE;
340 eieio();
341 /* interrupt secondary to begin executing code */
342 out_be32(PSURGE_INTR, ~0);
343 udelay(1);
344 out_be32(PSURGE_INTR, 0);
345 break;
346 case _MACH_chrp:
347 *(unsigned long *)KERNELBASE = i;
348 asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
349 #if 0
350 device = find_type_devices("cpu");
351 /* assume cpu device list is in order, find the ith cpu */
352 for ( a = i; device && a; device = device->next, a-- )
354 if ( !device )
355 break;
356 printk( "Starting %s (%lu): ", device->full_name,
357 *(ulong *)get_property(device, "reg", NULL) );
358 call_rtas( "start-cpu", 3, 1, NULL,
359 *(ulong *)get_property(device, "reg", NULL),
360 __pa(__secondary_start_chrp), i);
361 #endif
362 break;
366 * wait to see if the cpu made a callin (is actually up).
367 * use this value that I found through experimentation.
368 * -- Cort
370 for ( c = 1000; c && !cpu_callin_map[i] ; c-- )
371 udelay(100);
373 if ( cpu_callin_map[i] )
375 printk("Processor %d found.\n", i);
376 /* this sync's the decr's -- Cort */
377 if ( _machine == _MACH_Pmac )
378 set_dec(decrementer_count);
379 smp_num_cpus++;
380 } else {
381 printk("Processor %d is stuck.\n", i);
385 if ( _machine == _MACH_Pmac )
387 /* reset the entry point so if we get another intr we won't
388 * try to startup again */
389 *(volatile unsigned long *)(0xf2800000) = 0x100;
390 /* send interrupt to other processors to start decr's on all cpus */
391 smp_message_pass(1,0xf0f0, 0, 0);
395 void __init smp_commence(void)
398 * Lets the callin's below out of their loop.
400 wmb();
401 smp_commenced = 1;
404 /* intel needs this */
405 void __init initialize_secondary(void)
409 /* Activate a secondary processor. */
410 int __init start_secondary(void *unused)
412 atomic_inc(&init_mm.mm_count);
413 current->active_mm = &init_mm;
414 smp_callin();
415 return cpu_idle(NULL);
418 void __init smp_callin(void)
420 smp_store_cpu_info(current->processor);
421 set_dec(decrementer_count);
423 #if 0
424 current->mm->mmap->vm_page_prot = PAGE_SHARED;
425 current->mm->mmap->vm_start = PAGE_OFFSET;
426 current->mm->mmap->vm_end = init_mm.mmap->vm_end;
427 #endif
428 cpu_callin_map[current->processor] = 1;
429 while(!smp_commenced)
430 barrier();
431 __sti();
434 void __init smp_setup(char *str, int *ints)
438 int __init setup_profiling_timer(unsigned int multiplier)
440 return 0;
443 void __init smp_store_cpu_info(int id)
445 struct cpuinfo_PPC *c = &cpu_data[id];
447 /* assume bogomips are same for everything */
448 c->loops_per_sec = loops_per_sec;
449 c->pvr = _get_PVR();