2 #include <asm/arch/irq.h>
3 #include <asm/arch/hwregs/intr_vect.h>
4 #include <asm/arch/hwregs/intr_vect_defs.h>
5 #include <asm/tlbflush.h>
6 #include <asm/mmu_context.h>
7 #include <asm/arch/hwregs/mmu_defs_asm.h>
8 #include <asm/arch/hwregs/supp_reg.h>
9 #include <asm/atomic.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/timex.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/cpumask.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
20 #define IPI_SCHEDULE 1
22 #define IPI_FLUSH_TLB 4
24 #define FLUSH_ALL (void*)0xffffffff
26 /* Vector of locks used for various atomic operations */
27 spinlock_t cris_atomic_locks
[] = { [0 ... LOCK_COUNT
- 1] = SPIN_LOCK_UNLOCKED
};
30 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
31 cpumask_t phys_cpu_present_map
= CPU_MASK_NONE
;
32 EXPORT_SYMBOL(phys_cpu_present_map
);
34 /* Variables used during SMP boot */
35 volatile int cpu_now_booting
= 0;
36 volatile struct thread_info
*smp_init_current_idle_thread
;
38 /* Variables used during IPI */
39 static DEFINE_SPINLOCK(call_lock
);
40 static DEFINE_SPINLOCK(tlbstate_lock
);
42 struct call_data_struct
{
43 void (*func
) (void *info
);
48 static struct call_data_struct
* call_data
;
50 static struct mm_struct
* flush_mm
;
51 static struct vm_area_struct
* flush_vma
;
52 static unsigned long flush_addr
;
54 extern int setup_irq(int, struct irqaction
*);
57 static unsigned long irq_regs
[NR_CPUS
] =
63 static irqreturn_t
crisv32_ipi_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
);
64 static int send_ipi(int vector
, int wait
, cpumask_t cpu_mask
);
65 static struct irqaction irq_ipi
= { crisv32_ipi_interrupt
, IRQF_DISABLED
,
66 CPU_MASK_NONE
, "ipi", NULL
, NULL
};
68 extern void cris_mmu_init(void);
69 extern void cris_timer_init(void);
71 /* SMP initialization */
72 void __init
smp_prepare_cpus(unsigned int max_cpus
)
76 /* From now on we can expect IPIs so set them up */
77 setup_irq(IPI_INTR_VECT
, &irq_ipi
);
79 /* Mark all possible CPUs as present */
80 for (i
= 0; i
< max_cpus
; i
++)
81 cpu_set(i
, phys_cpu_present_map
);
84 void __devinit
smp_prepare_boot_cpu(void)
86 /* PGD pointer has moved after per_cpu initialization so
90 pgd
= (pgd_t
**)&per_cpu(current_pgd
, smp_processor_id());
93 SUPP_REG_WR(RW_MM_TLB_PGD
, pgd
);
95 SUPP_REG_WR(RW_MM_TLB_PGD
, pgd
);
97 cpu_set(0, cpu_online_map
);
98 cpu_set(0, phys_cpu_present_map
);
101 void __init
smp_cpus_done(unsigned int max_cpus
)
105 /* Bring one cpu online.*/
107 smp_boot_one_cpu(int cpuid
)
110 struct task_struct
*idle
;
112 idle
= fork_idle(cpuid
);
114 panic("SMP: fork failed for CPU:%d", cpuid
);
116 task_thread_info(idle
)->cpu
= cpuid
;
118 /* Information to the CPU that is about to boot */
119 smp_init_current_idle_thread
= task_thread_info(idle
);
120 cpu_now_booting
= cpuid
;
122 /* Wait for CPU to come online */
123 for (timeout
= 0; timeout
< 10000; timeout
++) {
124 if(cpu_online(cpuid
)) {
126 smp_init_current_idle_thread
= NULL
;
127 return 0; /* CPU online */
133 put_task_struct(idle
);
136 printk(KERN_CRIT
"SMP: CPU:%d is stuck.\n", cpuid
);
140 /* Secondary CPUs starts uing C here. Here we need to setup CPU
141 * specific stuff such as the local timer and the MMU. */
142 void __init
smp_callin(void)
144 extern void cpu_idle(void);
146 int cpu
= cpu_now_booting
;
147 reg_intr_vect_rw_mask vect_mask
= {0};
149 /* Initialise the idle task for this CPU */
150 atomic_inc(&init_mm
.mm_count
);
151 current
->active_mm
= &init_mm
;
157 /* Setup local timer. */
160 /* Enable IRQ and idle */
161 REG_WR(intr_vect
, irq_regs
[cpu
], rw_mask
, vect_mask
);
162 unmask_irq(IPI_INTR_VECT
);
163 unmask_irq(TIMER_INTR_VECT
);
167 cpu_set(cpu
, cpu_online_map
);
171 /* Stop execution on this CPU.*/
172 void stop_this_cpu(void* dummy
)
175 asm volatile("halt");
179 void smp_send_stop(void)
181 smp_call_function(stop_this_cpu
, NULL
, 1, 0);
184 int setup_profiling_timer(unsigned int multiplier
)
190 /* cache_decay_ticks is used by the scheduler to decide if a process
191 * is "hot" on one CPU. A higher value means a higher penalty to move
192 * a process to another CPU. Our cache is rather small so we report
195 unsigned long cache_decay_ticks
= 1;
197 int __devinit
__cpu_up(unsigned int cpu
)
199 smp_boot_one_cpu(cpu
);
200 return cpu_online(cpu
) ? 0 : -ENOSYS
;
203 void smp_send_reschedule(int cpu
)
205 cpumask_t cpu_mask
= CPU_MASK_NONE
;
206 cpu_set(cpu
, cpu_mask
);
207 send_ipi(IPI_SCHEDULE
, 0, cpu_mask
);
212 * Flush needs to be done on the local CPU and on any other CPU that
213 * may have the same mapping. The mm->cpu_vm_mask is used to keep track
214 * of which CPUs that a specific process has been executed on.
216 void flush_tlb_common(struct mm_struct
* mm
, struct vm_area_struct
* vma
, unsigned long addr
)
221 spin_lock_irqsave(&tlbstate_lock
, flags
);
222 cpu_mask
= (mm
== FLUSH_ALL
? CPU_MASK_ALL
: mm
->cpu_vm_mask
);
223 cpu_clear(smp_processor_id(), cpu_mask
);
227 send_ipi(IPI_FLUSH_TLB
, 1, cpu_mask
);
228 spin_unlock_irqrestore(&tlbstate_lock
, flags
);
231 void flush_tlb_all(void)
234 flush_tlb_common(FLUSH_ALL
, FLUSH_ALL
, 0);
237 void flush_tlb_mm(struct mm_struct
*mm
)
240 flush_tlb_common(mm
, FLUSH_ALL
, 0);
241 /* No more mappings in other CPUs */
242 cpus_clear(mm
->cpu_vm_mask
);
243 cpu_set(smp_processor_id(), mm
->cpu_vm_mask
);
246 void flush_tlb_page(struct vm_area_struct
*vma
,
249 __flush_tlb_page(vma
, addr
);
250 flush_tlb_common(vma
->vm_mm
, vma
, addr
);
253 /* Inter processor interrupts
255 * The IPIs are used for:
256 * * Force a schedule on a CPU
257 * * FLush TLB on other CPUs
258 * * Call a function on other CPUs
261 int send_ipi(int vector
, int wait
, cpumask_t cpu_mask
)
264 reg_intr_vect_rw_ipi ipi
= REG_RD(intr_vect
, irq_regs
[i
], rw_ipi
);
267 /* Calculate CPUs to send to. */
268 cpus_and(cpu_mask
, cpu_mask
, cpu_online_map
);
271 for_each_cpu_mask(i
, cpu_mask
)
273 ipi
.vector
|= vector
;
274 REG_WR(intr_vect
, irq_regs
[i
], rw_ipi
, ipi
);
277 /* Wait for IPI to finish on other CPUS */
279 for_each_cpu_mask(i
, cpu_mask
) {
281 for (j
= 0 ; j
< 1000; j
++) {
282 ipi
= REG_RD(intr_vect
, irq_regs
[i
], rw_ipi
);
290 printk("SMP call timeout from %d to %d\n", smp_processor_id(), i
);
300 * You must not call this function with disabled interrupts or from a
301 * hardware interrupt handler or from a bottom half handler.
303 int smp_call_function(void (*func
)(void *info
), void *info
,
304 int nonatomic
, int wait
)
306 cpumask_t cpu_mask
= CPU_MASK_ALL
;
307 struct call_data_struct data
;
310 cpu_clear(smp_processor_id(), cpu_mask
);
312 WARN_ON(irqs_disabled());
318 spin_lock(&call_lock
);
320 ret
= send_ipi(IPI_CALL
, wait
, cpu_mask
);
321 spin_unlock(&call_lock
);
326 irqreturn_t
crisv32_ipi_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
328 void (*func
) (void *info
) = call_data
->func
;
329 void *info
= call_data
->info
;
330 reg_intr_vect_rw_ipi ipi
;
332 ipi
= REG_RD(intr_vect
, irq_regs
[smp_processor_id()], rw_ipi
);
334 if (ipi
.vector
& IPI_CALL
) {
337 if (ipi
.vector
& IPI_FLUSH_TLB
) {
338 if (flush_mm
== FLUSH_ALL
)
340 else if (flush_vma
== FLUSH_ALL
)
341 __flush_tlb_mm(flush_mm
);
343 __flush_tlb_page(flush_vma
, flush_addr
);
347 REG_WR(intr_vect
, irq_regs
[smp_processor_id()], rw_ipi
, ipi
);