2 * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
4 * Copyright 2007-2009 Analog Devices Inc.
5 * Philippe Gerum <rpm@xenomai.org>
7 * Licensed under the GPL-2.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/cpu.h>
21 #include <linux/smp.h>
22 #include <linux/cpumask.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
25 #include <linux/slab.h>
26 #include <asm/atomic.h>
27 #include <asm/cacheflush.h>
28 #include <asm/mmu_context.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/processor.h>
32 #include <asm/ptrace.h>
35 #include <linux/err.h>
39 * 05000120 - we always define corelock as 32-bit integer in L2
41 struct corelock_slot corelock
__attribute__ ((__section__(".l2.bss")));
43 void __cpuinitdata
*init_retx_coreb
, *init_saved_retx_coreb
,
44 *init_saved_seqstat_coreb
, *init_saved_icplb_fault_addr_coreb
,
45 *init_saved_dcplb_fault_addr_coreb
;
47 #define BFIN_IPI_RESCHEDULE 0
48 #define BFIN_IPI_CALL_FUNC 1
49 #define BFIN_IPI_CPU_STOP 2
51 struct blackfin_flush_data
{
56 void *secondary_stack
;
59 struct smp_call_struct
{
60 void (*func
)(void *info
);
67 static struct blackfin_flush_data smp_flush_data
;
69 static DEFINE_SPINLOCK(stop_lock
);
72 struct list_head list
;
74 struct smp_call_struct call_struct
;
77 struct ipi_message_queue
{
78 struct list_head head
;
83 static DEFINE_PER_CPU(struct ipi_message_queue
, ipi_msg_queue
);
85 static void ipi_cpu_stop(unsigned int cpu
)
87 spin_lock(&stop_lock
);
88 printk(KERN_CRIT
"CPU%u: stopping\n", cpu
);
90 spin_unlock(&stop_lock
);
92 cpu_clear(cpu
, cpu_online_map
);
100 static void ipi_flush_icache(void *info
)
102 struct blackfin_flush_data
*fdata
= info
;
104 /* Invalidate the memory holding the bounds of the flushed region. */
105 blackfin_dcache_invalidate_range((unsigned long)fdata
,
106 (unsigned long)fdata
+ sizeof(*fdata
));
108 blackfin_icache_flush_range(fdata
->start
, fdata
->end
);
111 static void ipi_call_function(unsigned int cpu
, struct ipi_message
*msg
)
114 void (*func
)(void *info
);
116 func
= msg
->call_struct
.func
;
117 info
= msg
->call_struct
.info
;
118 wait
= msg
->call_struct
.wait
;
119 cpu_clear(cpu
, msg
->call_struct
.pending
);
122 #ifdef __ARCH_SYNC_CORE_DCACHE
124 * 'wait' usually means synchronization between CPUs.
125 * Invalidate D cache in case shared data was changed
126 * by func() to ensure cache coherence.
128 resync_core_dcache();
130 cpu_clear(cpu
, msg
->call_struct
.waitmask
);
135 static irqreturn_t
ipi_handler(int irq
, void *dev_instance
)
137 struct ipi_message
*msg
;
138 struct ipi_message_queue
*msg_queue
;
139 unsigned int cpu
= smp_processor_id();
141 platform_clear_ipi(cpu
);
143 msg_queue
= &__get_cpu_var(ipi_msg_queue
);
146 spin_lock(&msg_queue
->lock
);
147 while (!list_empty(&msg_queue
->head
)) {
148 msg
= list_entry(msg_queue
->head
.next
, typeof(*msg
), list
);
149 list_del(&msg
->list
);
151 case BFIN_IPI_RESCHEDULE
:
152 /* That's the easiest one; leave it to
153 * return_from_int. */
156 case BFIN_IPI_CALL_FUNC
:
157 spin_unlock(&msg_queue
->lock
);
158 ipi_call_function(cpu
, msg
);
159 spin_lock(&msg_queue
->lock
);
161 case BFIN_IPI_CPU_STOP
:
162 spin_unlock(&msg_queue
->lock
);
164 spin_lock(&msg_queue
->lock
);
168 printk(KERN_CRIT
"CPU%u: Unknown IPI message 0x%lx\n",
174 spin_unlock(&msg_queue
->lock
);
178 static void ipi_queue_init(void)
181 struct ipi_message_queue
*msg_queue
;
182 for_each_possible_cpu(cpu
) {
183 msg_queue
= &per_cpu(ipi_msg_queue
, cpu
);
184 INIT_LIST_HEAD(&msg_queue
->head
);
185 spin_lock_init(&msg_queue
->lock
);
186 msg_queue
->count
= 0;
190 int smp_call_function(void (*func
)(void *info
), void *info
, int wait
)
195 struct ipi_message_queue
*msg_queue
;
196 struct ipi_message
*msg
;
198 callmap
= cpu_online_map
;
199 cpu_clear(smp_processor_id(), callmap
);
200 if (cpus_empty(callmap
))
203 msg
= kmalloc(sizeof(*msg
), GFP_ATOMIC
);
206 INIT_LIST_HEAD(&msg
->list
);
207 msg
->call_struct
.func
= func
;
208 msg
->call_struct
.info
= info
;
209 msg
->call_struct
.wait
= wait
;
210 msg
->call_struct
.pending
= callmap
;
211 msg
->call_struct
.waitmask
= callmap
;
212 msg
->type
= BFIN_IPI_CALL_FUNC
;
214 for_each_cpu_mask(cpu
, callmap
) {
215 msg_queue
= &per_cpu(ipi_msg_queue
, cpu
);
216 spin_lock_irqsave(&msg_queue
->lock
, flags
);
217 list_add_tail(&msg
->list
, &msg_queue
->head
);
218 spin_unlock_irqrestore(&msg_queue
->lock
, flags
);
219 platform_send_ipi_cpu(cpu
);
222 while (!cpus_empty(msg
->call_struct
.waitmask
))
223 blackfin_dcache_invalidate_range(
224 (unsigned long)(&msg
->call_struct
.waitmask
),
225 (unsigned long)(&msg
->call_struct
.waitmask
));
226 #ifdef __ARCH_SYNC_CORE_DCACHE
228 * Invalidate D cache in case shared data was changed by
229 * other processors to ensure cache coherence.
231 resync_core_dcache();
237 EXPORT_SYMBOL_GPL(smp_call_function
);
239 int smp_call_function_single(int cpuid
, void (*func
) (void *info
), void *info
,
242 unsigned int cpu
= cpuid
;
245 struct ipi_message_queue
*msg_queue
;
246 struct ipi_message
*msg
;
248 if (cpu_is_offline(cpu
))
251 cpu_set(cpu
, callmap
);
253 msg
= kmalloc(sizeof(*msg
), GFP_ATOMIC
);
256 INIT_LIST_HEAD(&msg
->list
);
257 msg
->call_struct
.func
= func
;
258 msg
->call_struct
.info
= info
;
259 msg
->call_struct
.wait
= wait
;
260 msg
->call_struct
.pending
= callmap
;
261 msg
->call_struct
.waitmask
= callmap
;
262 msg
->type
= BFIN_IPI_CALL_FUNC
;
264 msg_queue
= &per_cpu(ipi_msg_queue
, cpu
);
265 spin_lock_irqsave(&msg_queue
->lock
, flags
);
266 list_add_tail(&msg
->list
, &msg_queue
->head
);
267 spin_unlock_irqrestore(&msg_queue
->lock
, flags
);
268 platform_send_ipi_cpu(cpu
);
271 while (!cpus_empty(msg
->call_struct
.waitmask
))
272 blackfin_dcache_invalidate_range(
273 (unsigned long)(&msg
->call_struct
.waitmask
),
274 (unsigned long)(&msg
->call_struct
.waitmask
));
275 #ifdef __ARCH_SYNC_CORE_DCACHE
277 * Invalidate D cache in case shared data was changed by
278 * other processors to ensure cache coherence.
280 resync_core_dcache();
286 EXPORT_SYMBOL_GPL(smp_call_function_single
);
288 void smp_send_reschedule(int cpu
)
291 struct ipi_message_queue
*msg_queue
;
292 struct ipi_message
*msg
;
294 if (cpu_is_offline(cpu
))
297 msg
= kzalloc(sizeof(*msg
), GFP_ATOMIC
);
300 INIT_LIST_HEAD(&msg
->list
);
301 msg
->type
= BFIN_IPI_RESCHEDULE
;
303 msg_queue
= &per_cpu(ipi_msg_queue
, cpu
);
304 spin_lock_irqsave(&msg_queue
->lock
, flags
);
305 list_add_tail(&msg
->list
, &msg_queue
->head
);
306 spin_unlock_irqrestore(&msg_queue
->lock
, flags
);
307 platform_send_ipi_cpu(cpu
);
312 void smp_send_stop(void)
317 struct ipi_message_queue
*msg_queue
;
318 struct ipi_message
*msg
;
320 callmap
= cpu_online_map
;
321 cpu_clear(smp_processor_id(), callmap
);
322 if (cpus_empty(callmap
))
325 msg
= kzalloc(sizeof(*msg
), GFP_ATOMIC
);
328 INIT_LIST_HEAD(&msg
->list
);
329 msg
->type
= BFIN_IPI_CPU_STOP
;
331 for_each_cpu_mask(cpu
, callmap
) {
332 msg_queue
= &per_cpu(ipi_msg_queue
, cpu
);
333 spin_lock_irqsave(&msg_queue
->lock
, flags
);
334 list_add_tail(&msg
->list
, &msg_queue
->head
);
335 spin_unlock_irqrestore(&msg_queue
->lock
, flags
);
336 platform_send_ipi_cpu(cpu
);
341 int __cpuinit
__cpu_up(unsigned int cpu
)
344 static struct task_struct
*idle
;
349 idle
= fork_idle(cpu
);
351 printk(KERN_ERR
"CPU%u: fork() failed\n", cpu
);
352 return PTR_ERR(idle
);
355 secondary_stack
= task_stack_page(idle
) + THREAD_SIZE
;
357 ret
= platform_boot_secondary(cpu
, idle
);
359 secondary_stack
= NULL
;
364 static void __cpuinit
setup_secondary(unsigned int cpu
)
370 ilat
= bfin_read_ILAT();
372 bfin_write_ILAT(ilat
);
375 /* Enable interrupt levels IVG7-15. IARs have been already
376 * programmed by the boot CPU. */
377 bfin_irq_flags
|= IMASK_IVG15
|
378 IMASK_IVG14
| IMASK_IVG13
| IMASK_IVG12
| IMASK_IVG11
|
379 IMASK_IVG10
| IMASK_IVG9
| IMASK_IVG8
| IMASK_IVG7
| IMASK_IVGHW
;
382 void __cpuinit
secondary_start_kernel(void)
384 unsigned int cpu
= smp_processor_id();
385 struct mm_struct
*mm
= &init_mm
;
387 if (_bfin_swrst
& SWRST_DBL_FAULT_B
) {
388 printk(KERN_EMERG
"CoreB Recovering from DOUBLE FAULT event\n");
389 #ifdef CONFIG_DEBUG_DOUBLEFAULT
390 printk(KERN_EMERG
" While handling exception (EXCAUSE = 0x%x) at %pF\n",
391 (int)init_saved_seqstat_coreb
& SEQSTAT_EXCAUSE
, init_saved_retx_coreb
);
392 printk(KERN_NOTICE
" DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb
);
393 printk(KERN_NOTICE
" ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb
);
395 printk(KERN_NOTICE
" The instruction at %pF caused a double exception\n",
400 * We want the D-cache to be enabled early, in case the atomic
401 * support code emulates cache coherence (see
402 * __ARCH_SYNC_CORE_DCACHE).
404 init_exception_vectors();
406 bfin_setup_caches(cpu
);
410 /* Attach the new idle task to the global mm. */
411 atomic_inc(&mm
->mm_users
);
412 atomic_inc(&mm
->mm_count
);
413 current
->active_mm
= mm
;
417 setup_secondary(cpu
);
419 platform_secondary_init(cpu
);
421 /* setup local core timer */
422 bfin_local_timer_setup();
427 * Calibrate loops per jiffy value.
428 * IRQs need to be enabled here - D-cache can be invalidated
429 * in timer irq handler, so core B can read correct jiffies.
436 void __init
smp_prepare_boot_cpu(void)
440 void __init
smp_prepare_cpus(unsigned int max_cpus
)
442 platform_prepare_cpus(max_cpus
);
444 platform_request_ipi(ipi_handler
);
447 void __init
smp_cpus_done(unsigned int max_cpus
)
449 unsigned long bogosum
= 0;
452 for_each_online_cpu(cpu
)
453 bogosum
+= loops_per_jiffy
;
455 printk(KERN_INFO
"SMP: Total of %d processors activated "
456 "(%lu.%02lu BogoMIPS).\n",
458 bogosum
/ (500000/HZ
),
459 (bogosum
/ (5000/HZ
)) % 100);
462 void smp_icache_flush_range_others(unsigned long start
, unsigned long end
)
464 smp_flush_data
.start
= start
;
465 smp_flush_data
.end
= end
;
467 if (smp_call_function(&ipi_flush_icache
, &smp_flush_data
, 0))
468 printk(KERN_WARNING
"SMP: failed to run I-cache flush request on other CPUs\n");
470 EXPORT_SYMBOL_GPL(smp_icache_flush_range_others
);
472 #ifdef __ARCH_SYNC_CORE_ICACHE
473 unsigned long icache_invld_count
[NR_CPUS
];
474 void resync_core_icache(void)
476 unsigned int cpu
= get_cpu();
477 blackfin_invalidate_entire_icache();
478 icache_invld_count
[cpu
]++;
481 EXPORT_SYMBOL(resync_core_icache
);
484 #ifdef __ARCH_SYNC_CORE_DCACHE
485 unsigned long dcache_invld_count
[NR_CPUS
];
486 unsigned long barrier_mask
__attribute__ ((__section__(".l2.bss")));
488 void resync_core_dcache(void)
490 unsigned int cpu
= get_cpu();
491 blackfin_invalidate_entire_dcache();
492 dcache_invld_count
[cpu
]++;
495 EXPORT_SYMBOL(resync_core_dcache
);
498 #ifdef CONFIG_HOTPLUG_CPU
499 int __cpuexit
__cpu_disable(void)
501 unsigned int cpu
= smp_processor_id();
506 set_cpu_online(cpu
, false);
510 static DECLARE_COMPLETION(cpu_killed
);
512 int __cpuexit
__cpu_die(unsigned int cpu
)
514 return wait_for_completion_timeout(&cpu_killed
, 5000);
519 complete(&cpu_killed
);
521 atomic_dec(&init_mm
.mm_users
);
522 atomic_dec(&init_mm
.mm_count
);