2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001 Broadcom Corporation
21 #include <linux/config.h>
22 #include <linux/cache.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
33 #include <asm/atomic.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/hardirq.h>
38 #include <asm/mmu_context.h>
41 int smp_threads_ready
; /* Not used */
43 // static atomic_t cpus_booted = ATOMIC_INIT(0);
44 atomic_t cpus_booted
= ATOMIC_INIT(0);
46 cpumask_t phys_cpu_present_map
; /* Bitmask of physically CPUs */
47 cpumask_t cpu_online_map
; /* Bitmask of currently online CPUs */
48 int __cpu_number_map
[NR_CPUS
];
49 int __cpu_logical_map
[NR_CPUS
];
51 /* These are defined by the board-specific code. */
54 * Cause the function described by call_data to be executed on the passed
55 * cpu. When the function has finished, increment the finished field of
58 void core_send_ipi(int cpu
, unsigned int action
);
61 * Clear all undefined state in the cpu, set up sp and gp to the passed
62 * values, and kick the cpu into smp_bootstrap();
64 void prom_boot_secondary(int cpu
, unsigned long sp
, unsigned long gp
);
67 * After we've done initial boot, this function is called to allow the
68 * board code to clean up state, if needed
70 void prom_init_secondary(void);
72 void prom_smp_finish(void);
74 cycles_t cacheflush_time
;
75 unsigned long cache_decay_ticks
;
77 void smp_tune_scheduling (void)
79 struct cache_desc
*cd
= ¤t_cpu_data
.scache
;
80 unsigned long cachesize
; /* kB */
81 unsigned long bandwidth
= 350; /* MB/s */
82 unsigned long cpu_khz
;
85 * Crude estimate until we actually meassure ...
87 cpu_khz
= loops_per_jiffy
* 2 * HZ
/ 1000;
90 * Rough estimation for SMP scheduling, this is the number of
91 * cycles it takes for a fully memory-limited process to flush
92 * the SMP-local cache.
94 * (For a P5 this pretty much means we will choose another idle
95 * CPU almost always at wakeup time (this is due to the small
96 * L1 cache), on PIIs it's around 50-100 usecs, depending on
101 * This basically disables processor-affinity scheduling on SMP
102 * without a cycle counter. Currently all SMP capable MIPS
103 * processors have a cycle counter.
109 cachesize
= cd
->linesz
* cd
->sets
* cd
->ways
;
110 cacheflush_time
= (cpu_khz
>>10) * (cachesize
<<10) / bandwidth
;
111 cache_decay_ticks
= (long)cacheflush_time
/cpu_khz
* HZ
/ 1000;
113 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
114 (long)cacheflush_time
/(cpu_khz
/1000),
115 ((long)cacheflush_time
*100/(cpu_khz
/1000)) % 100);
116 printk("task migration cache decay timeout: %ld msecs.\n",
117 (cache_decay_ticks
+ 1) * 1000 / HZ
);
120 void __init
smp_callin(void)
124 smp_store_cpu_info(cpuid
);
128 #ifndef CONFIG_SGI_IP27
130 * Hook for doing final board-specific setup after the generic smp setup
133 asmlinkage
void start_secondary(void)
135 unsigned int cpu
= smp_processor_id();
138 prom_init_secondary();
142 * XXX parity protection should be folded in here when it's converted
143 * to an option instead of something based on .cputype
145 pgd_current
[cpu
] = init_mm
.pgd
;
146 cpu_data
[cpu
].udelay_val
= loops_per_jiffy
;
148 printk("Slave cpu booted successfully\n");
149 CPUMASK_SETB(cpu_online_map
, cpu
);
150 atomic_inc(&cpus_booted
);
153 #endif /* CONFIG_SGI_IP27 */
156 * this function sends a 'reschedule' IPI to another CPU.
157 * it goes straight through and wastes no time serializing
158 * anything. Worst case is that we lose a reschedule ...
160 void smp_send_reschedule(int cpu
)
162 core_send_ipi(cpu
, SMP_RESCHEDULE_YOURSELF
);
165 static spinlock_t call_lock
= SPIN_LOCK_UNLOCKED
;
167 struct call_data_struct
*call_data
;
170 * Run a function on all other CPUs.
171 * <func> The function to run. This must be fast and non-blocking.
172 * <info> An arbitrary pointer to pass to the function.
173 * <retry> If true, keep retrying until ready.
174 * <wait> If true, wait until function has completed on other CPUs.
175 * [RETURNS] 0 on success, else a negative status code.
177 * Does not return until remote CPUs are nearly ready to execute <func>
178 * or are or have executed.
180 * You must not call this function with disabled interrupts or from a
181 * hardware interrupt handler or from a bottom half handler.
183 int smp_call_function (void (*func
) (void *info
), void *info
, int retry
,
186 struct call_data_struct data
;
187 int i
, cpus
= num_online_cpus() - 1;
188 int cpu
= smp_processor_id();
195 atomic_set(&data
.started
, 0);
198 atomic_set(&data
.finished
, 0);
200 spin_lock(&call_lock
);
203 /* Send a message to all other CPUs and wait for them to respond */
204 for (i
= 0; i
< NR_CPUS
; i
++)
205 if (cpu_online(cpu
) && i
!= cpu
)
206 core_send_ipi(i
, SMP_CALL_FUNCTION
);
208 /* Wait for response */
209 /* FIXME: lock-up detection, backtrace on lock-up */
210 while (atomic_read(&data
.started
) != cpus
)
214 while (atomic_read(&data
.finished
) != cpus
)
216 spin_unlock(&call_lock
);
221 void smp_call_function_interrupt(void)
223 void (*func
) (void *info
) = call_data
->func
;
224 void *info
= call_data
->info
;
225 int wait
= call_data
->wait
;
228 * Notify initiating CPU that I've grabbed the data and am
229 * about to execute the function.
232 atomic_inc(&call_data
->started
);
235 * At this point the info structure may be out of scope unless wait==1.
243 atomic_inc(&call_data
->finished
);
247 static void stop_this_cpu(void *dummy
)
252 clear_bit(smp_processor_id(), &cpu_online_map
);
253 local_irq_enable(); /* May need to service _machine_restart IPI */
254 for (;;); /* Wait if available. */
257 void smp_send_stop(void)
259 smp_call_function(stop_this_cpu
, NULL
, 1, 0);
262 /* Not really SMP stuff ... */
263 int setup_profiling_timer(unsigned int multiplier
)
268 static void flush_tlb_all_ipi(void *info
)
270 local_flush_tlb_all();
273 void flush_tlb_all(void)
275 on_each_cpu(flush_tlb_all_ipi
, 0, 1, 1);
278 static void flush_tlb_mm_ipi(void *mm
)
280 local_flush_tlb_mm((struct mm_struct
*)mm
);
284 * The following tlb flush calls are invoked when old translations are
285 * being torn down, or pte attributes are changing. For single threaded
286 * address spaces, a new context is obtained on the current cpu, and tlb
287 * context on other cpus are invalidated to force a new context allocation
288 * at switch_mm time, should the mm ever be used on other cpus. For
289 * multithreaded address spaces, intercpu interrupts have to be sent.
290 * Another case where intercpu interrupts are required is when the target
291 * mm might be active on another cpu (eg debuggers doing the flushes on
292 * behalf of debugees, kswapd stealing pages from another process etc).
296 void flush_tlb_mm(struct mm_struct
*mm
)
300 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
301 smp_call_function(flush_tlb_mm_ipi
, (void *)mm
, 1, 1);
304 for (i
= 0; i
< num_online_cpus(); i
++)
305 if (smp_processor_id() != i
)
306 cpu_context(i
, mm
) = 0;
308 local_flush_tlb_mm(mm
);
313 struct flush_tlb_data
{
314 struct vm_area_struct
*vma
;
319 static void flush_tlb_range_ipi(void *info
)
321 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
323 local_flush_tlb_range(fd
->vma
, fd
->addr1
, fd
->addr2
);
326 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
328 struct mm_struct
*mm
= vma
->vm_mm
;
331 if ((atomic_read(&mm
->mm_users
) != 1) || (current
->mm
!= mm
)) {
332 struct flush_tlb_data fd
;
337 smp_call_function(flush_tlb_range_ipi
, (void *)&fd
, 1, 1);
340 for (i
= 0; i
< num_online_cpus(); i
++)
341 if (smp_processor_id() != i
)
342 cpu_context(i
, mm
) = 0;
344 local_flush_tlb_range(vma
, start
, end
);
348 static void flush_tlb_kernel_range_ipi(void *info
)
350 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
352 local_flush_tlb_kernel_range(fd
->addr1
, fd
->addr2
);
355 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
357 struct flush_tlb_data fd
;
361 smp_call_function(flush_tlb_kernel_range_ipi
, (void *)&fd
, 1, 1);
362 local_flush_tlb_kernel_range(start
, end
);
365 static void flush_tlb_page_ipi(void *info
)
367 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
369 local_flush_tlb_page(fd
->vma
, fd
->addr1
);
372 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
375 if ((atomic_read(&vma
->vm_mm
->mm_users
) != 1) || (current
->mm
!= vma
->vm_mm
)) {
376 struct flush_tlb_data fd
;
380 smp_call_function(flush_tlb_page_ipi
, (void *)&fd
, 1, 1);
383 for (i
= 0; i
< num_online_cpus(); i
++)
384 if (smp_processor_id() != i
)
385 cpu_context(i
, vma
->vm_mm
) = 0;
387 local_flush_tlb_page(vma
, page
);
391 static void flush_tlb_one_ipi(void *info
)
393 unsigned long vaddr
= (unsigned long) info
;
395 local_flush_tlb_one(vaddr
);
398 void flush_tlb_one(unsigned long vaddr
)
400 smp_call_function(flush_tlb_one_ipi
, (void *) vaddr
, 1, 1);
401 local_flush_tlb_one(vaddr
);
404 EXPORT_SYMBOL(flush_tlb_page
);
405 EXPORT_SYMBOL(flush_tlb_one
);
406 EXPORT_SYMBOL(cpu_data
);
407 EXPORT_SYMBOL(synchronize_irq
);