2 * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
4 * Copyright 2007-2009 Analog Devices Inc.
5 * Philippe Gerum <rpm@xenomai.org>
7 * Licensed under the GPL-2.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/cpu.h>
21 #include <linux/smp.h>
22 #include <linux/cpumask.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
25 #include <linux/slab.h>
26 #include <asm/atomic.h>
27 #include <asm/cacheflush.h>
28 #include <asm/mmu_context.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/processor.h>
32 #include <asm/ptrace.h>
35 #include <linux/err.h>
39 * 05000120 - we always define corelock as 32-bit integer in L2
41 struct corelock_slot corelock
__attribute__ ((__section__(".l2.bss")));
43 void __cpuinitdata
*init_retx_coreb
, *init_saved_retx_coreb
,
44 *init_saved_seqstat_coreb
, *init_saved_icplb_fault_addr_coreb
,
45 *init_saved_dcplb_fault_addr_coreb
;
47 #define BFIN_IPI_RESCHEDULE 0
48 #define BFIN_IPI_CALL_FUNC 1
49 #define BFIN_IPI_CPU_STOP 2
51 struct blackfin_flush_data
{
56 void *secondary_stack
;
59 struct smp_call_struct
{
60 void (*func
)(void *info
);
66 static struct blackfin_flush_data smp_flush_data
;
68 static DEFINE_SPINLOCK(stop_lock
);
72 struct smp_call_struct call_struct
;
75 /* A magic number - stress test shows this is safe for common cases */
76 #define BFIN_IPI_MSGQ_LEN 5
78 /* Simple FIFO buffer, overflow leads to panic */
79 struct ipi_message_queue
{
82 unsigned long head
; /* head of the queue */
83 struct ipi_message ipi_message
[BFIN_IPI_MSGQ_LEN
];
86 static DEFINE_PER_CPU(struct ipi_message_queue
, ipi_msg_queue
);
88 static void ipi_cpu_stop(unsigned int cpu
)
90 spin_lock(&stop_lock
);
91 printk(KERN_CRIT
"CPU%u: stopping\n", cpu
);
93 spin_unlock(&stop_lock
);
95 cpu_clear(cpu
, cpu_online_map
);
103 static void ipi_flush_icache(void *info
)
105 struct blackfin_flush_data
*fdata
= info
;
107 /* Invalidate the memory holding the bounds of the flushed region. */
108 blackfin_dcache_invalidate_range((unsigned long)fdata
,
109 (unsigned long)fdata
+ sizeof(*fdata
));
111 blackfin_icache_flush_range(fdata
->start
, fdata
->end
);
114 static void ipi_call_function(unsigned int cpu
, struct ipi_message
*msg
)
117 void (*func
)(void *info
);
119 func
= msg
->call_struct
.func
;
120 info
= msg
->call_struct
.info
;
121 wait
= msg
->call_struct
.wait
;
124 #ifdef __ARCH_SYNC_CORE_DCACHE
126 * 'wait' usually means synchronization between CPUs.
127 * Invalidate D cache in case shared data was changed
128 * by func() to ensure cache coherence.
130 resync_core_dcache();
132 cpu_clear(cpu
, *msg
->call_struct
.waitmask
);
136 /* Use IRQ_SUPPLE_0 to request reschedule.
137 * When returning from interrupt to user space,
138 * there is chance to reschedule */
139 static irqreturn_t
ipi_handler_int0(int irq
, void *dev_instance
)
141 unsigned int cpu
= smp_processor_id();
143 platform_clear_ipi(cpu
, IRQ_SUPPLE_0
);
147 static irqreturn_t
ipi_handler_int1(int irq
, void *dev_instance
)
149 struct ipi_message
*msg
;
150 struct ipi_message_queue
*msg_queue
;
151 unsigned int cpu
= smp_processor_id();
154 platform_clear_ipi(cpu
, IRQ_SUPPLE_1
);
156 msg_queue
= &__get_cpu_var(ipi_msg_queue
);
158 spin_lock_irqsave(&msg_queue
->lock
, flags
);
160 while (msg_queue
->count
) {
161 msg
= &msg_queue
->ipi_message
[msg_queue
->head
];
163 case BFIN_IPI_CALL_FUNC
:
164 spin_unlock_irqrestore(&msg_queue
->lock
, flags
);
165 ipi_call_function(cpu
, msg
);
166 spin_lock_irqsave(&msg_queue
->lock
, flags
);
168 case BFIN_IPI_CPU_STOP
:
169 spin_unlock_irqrestore(&msg_queue
->lock
, flags
);
171 spin_lock_irqsave(&msg_queue
->lock
, flags
);
174 printk(KERN_CRIT
"CPU%u: Unknown IPI message 0x%lx\n",
179 msg_queue
->head
%= BFIN_IPI_MSGQ_LEN
;
182 spin_unlock_irqrestore(&msg_queue
->lock
, flags
);
186 static void ipi_queue_init(void)
189 struct ipi_message_queue
*msg_queue
;
190 for_each_possible_cpu(cpu
) {
191 msg_queue
= &per_cpu(ipi_msg_queue
, cpu
);
192 spin_lock_init(&msg_queue
->lock
);
193 msg_queue
->count
= 0;
198 static inline void smp_send_message(cpumask_t callmap
, unsigned long type
,
199 void (*func
) (void *info
), void *info
, int wait
)
202 struct ipi_message_queue
*msg_queue
;
203 struct ipi_message
*msg
;
204 unsigned long flags
, next_msg
;
205 cpumask_t waitmask
= callmap
; /* waitmask is shared by all cpus */
207 for_each_cpu_mask(cpu
, callmap
) {
208 msg_queue
= &per_cpu(ipi_msg_queue
, cpu
);
209 spin_lock_irqsave(&msg_queue
->lock
, flags
);
210 if (msg_queue
->count
< BFIN_IPI_MSGQ_LEN
) {
211 next_msg
= (msg_queue
->head
+ msg_queue
->count
)
213 msg
= &msg_queue
->ipi_message
[next_msg
];
215 if (type
== BFIN_IPI_CALL_FUNC
) {
216 msg
->call_struct
.func
= func
;
217 msg
->call_struct
.info
= info
;
218 msg
->call_struct
.wait
= wait
;
219 msg
->call_struct
.waitmask
= &waitmask
;
223 panic("IPI message queue overflow\n");
224 spin_unlock_irqrestore(&msg_queue
->lock
, flags
);
225 platform_send_ipi_cpu(cpu
, IRQ_SUPPLE_1
);
229 while (!cpus_empty(waitmask
))
230 blackfin_dcache_invalidate_range(
231 (unsigned long)(&waitmask
),
232 (unsigned long)(&waitmask
));
233 #ifdef __ARCH_SYNC_CORE_DCACHE
235 * Invalidate D cache in case shared data was changed by
236 * other processors to ensure cache coherence.
238 resync_core_dcache();
243 int smp_call_function(void (*func
)(void *info
), void *info
, int wait
)
247 callmap
= cpu_online_map
;
248 cpu_clear(smp_processor_id(), callmap
);
249 if (cpus_empty(callmap
))
252 smp_send_message(callmap
, BFIN_IPI_CALL_FUNC
, func
, info
, wait
);
256 EXPORT_SYMBOL_GPL(smp_call_function
);
258 int smp_call_function_single(int cpuid
, void (*func
) (void *info
), void *info
,
261 unsigned int cpu
= cpuid
;
264 if (cpu_is_offline(cpu
))
267 cpu_set(cpu
, callmap
);
269 smp_send_message(callmap
, BFIN_IPI_CALL_FUNC
, func
, info
, wait
);
273 EXPORT_SYMBOL_GPL(smp_call_function_single
);
275 void smp_send_reschedule(int cpu
)
277 /* simply trigger an ipi */
278 if (cpu_is_offline(cpu
))
280 platform_send_ipi_cpu(cpu
, IRQ_SUPPLE_0
);
285 void smp_send_stop(void)
289 callmap
= cpu_online_map
;
290 cpu_clear(smp_processor_id(), callmap
);
291 if (cpus_empty(callmap
))
294 smp_send_message(callmap
, BFIN_IPI_CPU_STOP
, NULL
, NULL
, 0);
299 int __cpuinit
__cpu_up(unsigned int cpu
)
302 static struct task_struct
*idle
;
307 idle
= fork_idle(cpu
);
309 printk(KERN_ERR
"CPU%u: fork() failed\n", cpu
);
310 return PTR_ERR(idle
);
313 secondary_stack
= task_stack_page(idle
) + THREAD_SIZE
;
315 ret
= platform_boot_secondary(cpu
, idle
);
317 secondary_stack
= NULL
;
322 static void __cpuinit
setup_secondary(unsigned int cpu
)
328 ilat
= bfin_read_ILAT();
330 bfin_write_ILAT(ilat
);
333 /* Enable interrupt levels IVG7-15. IARs have been already
334 * programmed by the boot CPU. */
335 bfin_irq_flags
|= IMASK_IVG15
|
336 IMASK_IVG14
| IMASK_IVG13
| IMASK_IVG12
| IMASK_IVG11
|
337 IMASK_IVG10
| IMASK_IVG9
| IMASK_IVG8
| IMASK_IVG7
| IMASK_IVGHW
;
340 void __cpuinit
secondary_start_kernel(void)
342 unsigned int cpu
= smp_processor_id();
343 struct mm_struct
*mm
= &init_mm
;
345 if (_bfin_swrst
& SWRST_DBL_FAULT_B
) {
346 printk(KERN_EMERG
"CoreB Recovering from DOUBLE FAULT event\n");
347 #ifdef CONFIG_DEBUG_DOUBLEFAULT
348 printk(KERN_EMERG
" While handling exception (EXCAUSE = 0x%x) at %pF\n",
349 (int)init_saved_seqstat_coreb
& SEQSTAT_EXCAUSE
, init_saved_retx_coreb
);
350 printk(KERN_NOTICE
" DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb
);
351 printk(KERN_NOTICE
" ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb
);
353 printk(KERN_NOTICE
" The instruction at %pF caused a double exception\n",
358 * We want the D-cache to be enabled early, in case the atomic
359 * support code emulates cache coherence (see
360 * __ARCH_SYNC_CORE_DCACHE).
362 init_exception_vectors();
364 bfin_setup_caches(cpu
);
368 /* Attach the new idle task to the global mm. */
369 atomic_inc(&mm
->mm_users
);
370 atomic_inc(&mm
->mm_count
);
371 current
->active_mm
= mm
;
375 setup_secondary(cpu
);
377 platform_secondary_init(cpu
);
379 /* setup local core timer */
380 bfin_local_timer_setup();
385 * Calibrate loops per jiffy value.
386 * IRQs need to be enabled here - D-cache can be invalidated
387 * in timer irq handler, so core B can read correct jiffies.
394 void __init
smp_prepare_boot_cpu(void)
398 void __init
smp_prepare_cpus(unsigned int max_cpus
)
400 platform_prepare_cpus(max_cpus
);
402 platform_request_ipi(IRQ_SUPPLE_0
, ipi_handler_int0
);
403 platform_request_ipi(IRQ_SUPPLE_1
, ipi_handler_int1
);
406 void __init
smp_cpus_done(unsigned int max_cpus
)
408 unsigned long bogosum
= 0;
411 for_each_online_cpu(cpu
)
412 bogosum
+= loops_per_jiffy
;
414 printk(KERN_INFO
"SMP: Total of %d processors activated "
415 "(%lu.%02lu BogoMIPS).\n",
417 bogosum
/ (500000/HZ
),
418 (bogosum
/ (5000/HZ
)) % 100);
421 void smp_icache_flush_range_others(unsigned long start
, unsigned long end
)
423 smp_flush_data
.start
= start
;
424 smp_flush_data
.end
= end
;
426 if (smp_call_function(&ipi_flush_icache
, &smp_flush_data
, 0))
427 printk(KERN_WARNING
"SMP: failed to run I-cache flush request on other CPUs\n");
429 EXPORT_SYMBOL_GPL(smp_icache_flush_range_others
);
431 #ifdef __ARCH_SYNC_CORE_ICACHE
432 unsigned long icache_invld_count
[NR_CPUS
];
433 void resync_core_icache(void)
435 unsigned int cpu
= get_cpu();
436 blackfin_invalidate_entire_icache();
437 icache_invld_count
[cpu
]++;
440 EXPORT_SYMBOL(resync_core_icache
);
443 #ifdef __ARCH_SYNC_CORE_DCACHE
444 unsigned long dcache_invld_count
[NR_CPUS
];
445 unsigned long barrier_mask
__attribute__ ((__section__(".l2.bss")));
447 void resync_core_dcache(void)
449 unsigned int cpu
= get_cpu();
450 blackfin_invalidate_entire_dcache();
451 dcache_invld_count
[cpu
]++;
454 EXPORT_SYMBOL(resync_core_dcache
);
457 #ifdef CONFIG_HOTPLUG_CPU
458 int __cpuexit
__cpu_disable(void)
460 unsigned int cpu
= smp_processor_id();
465 set_cpu_online(cpu
, false);
469 static DECLARE_COMPLETION(cpu_killed
);
471 int __cpuexit
__cpu_die(unsigned int cpu
)
473 return wait_for_completion_timeout(&cpu_killed
, 5000);
478 complete(&cpu_killed
);
480 atomic_dec(&init_mm
.mm_users
);
481 atomic_dec(&init_mm
.mm_count
);