1 #include <linux/config.h>
2 #include <linux/init.h>
3 #include <linux/spinlock.h>
4 #include <linux/threads.h>
5 #include <linux/time.h>
6 #include <linux/timex.h>
7 #include <linux/sched.h>
9 #include <asm/atomic.h>
10 #include <asm/processor.h>
11 #include <asm/system.h>
12 #include <asm/hardirq.h>
13 #include <asm/softirq.h>
14 #include <asm/mmu_context.h>
16 #ifdef CONFIG_SGI_IP27
18 #include <asm/sn/arch.h>
19 #include <asm/sn/intr.h>
20 #include <asm/sn/addrs.h>
21 #include <asm/sn/agent.h>
22 #include <asm/sn/sn0/ip27.h>
24 #define DORESCHED 0xab
27 #define IRQ_TO_SWLEVEL(i) i + 7 /* Delete this from here */
29 static void sendintr(int destid
, unsigned char status
)
33 #if (CPUS_PER_NODE == 2)
35 case DORESCHED
: irq
= CPU_RESCHED_A_IRQ
; break;
36 case DOCALL
: irq
= CPU_CALL_A_IRQ
; break;
37 default: panic("sendintr");
39 irq
+= cputoslice(destid
);
42 * Convert the compact hub number to the NASID to get the correct
43 * part of the address space. Then set the interrupt bit associated
44 * with the CPU we want to send the interrupt to.
46 REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cputocnode(destid
)),
49 << Bomb
! Must redefine
this for more than
2 CPUS
. >>
53 #endif /* CONFIG_SGI_IP27 */
55 /* The 'big kernel lock' */
56 spinlock_t kernel_flag
= SPIN_LOCK_UNLOCKED
;
57 int smp_threads_ready
; /* Not used */
58 atomic_t smp_commenced
= ATOMIC_INIT(0);
59 struct cpuinfo_mips cpu_data
[NR_CPUS
];
60 int smp_num_cpus
; /* Number that came online. */
61 int __cpu_number_map
[NR_CPUS
];
62 int __cpu_logical_map
[NR_CPUS
];
63 cycles_t cacheflush_time
;
65 static void smp_tune_scheduling (void)
69 void __init
smp_boot_cpus(void)
71 extern void allowboot(void);
73 init_new_context(current
, &init_mm
);
74 current
->processor
= 0;
76 smp_tune_scheduling();
80 void __init
smp_commence(void)
83 atomic_set(&smp_commenced
,1);
86 static void stop_this_cpu(void *dummy
)
94 void smp_send_stop(void)
96 smp_call_function(stop_this_cpu
, NULL
, 1, 0);
101 * this function sends a 'reschedule' IPI to another CPU.
102 * it goes straight through and wastes no time serializing
103 * anything. Worst case is that we lose a reschedule ...
105 void smp_send_reschedule(int cpu
)
107 sendintr(cpu
, DORESCHED
);
110 /* Not really SMP stuff ... */
111 int setup_profiling_timer(unsigned int multiplier
)
117 * Run a function on all other CPUs.
118 * <func> The function to run. This must be fast and non-blocking.
119 * <info> An arbitrary pointer to pass to the function.
120 * <retry> If true, keep retrying until ready.
121 * <wait> If true, wait until function has completed on other CPUs.
122 * [RETURNS] 0 on success, else a negative status code.
124 * Does not return until remote CPUs are nearly ready to execute <func>
125 * or are or have executed.
127 static volatile struct call_data_struct
{
128 void (*func
) (void *info
);
135 int smp_call_function (void (*func
) (void *info
), void *info
, int retry
,
138 struct call_data_struct data
;
139 int i
, cpus
= smp_num_cpus
-1;
140 static spinlock_t lock
= SPIN_LOCK_UNLOCKED
;
147 atomic_set(&data
.started
, 0);
150 atomic_set(&data
.finished
, 0);
154 /* Send a message to all other CPUs and wait for them to respond */
155 for (i
= 0; i
< smp_num_cpus
; i
++)
156 if (smp_processor_id() != i
)
159 /* Wait for response */
160 /* FIXME: lock-up detection, backtrace on lock-up */
161 while (atomic_read(&data
.started
) != cpus
)
165 while (atomic_read(&data
.finished
) != cpus
)
167 spin_unlock_bh(&lock
);
171 void smp_call_function_interrupt(void)
173 void (*func
) (void *info
) = call_data
->func
;
174 void *info
= call_data
->info
;
175 int wait
= call_data
->wait
;
178 * Notify initiating CPU that I've grabbed the data and am
179 * about to execute the function.
181 atomic_inc(&call_data
->started
);
184 * At this point the info structure may be out of scope unless wait==1.
188 atomic_inc(&call_data
->finished
);
192 static void flush_tlb_all_ipi(void *info
)
197 void flush_tlb_all(void)
199 smp_call_function(flush_tlb_all_ipi
, 0, 1, 1);
203 static void flush_tlb_mm_ipi(void *mm
)
205 _flush_tlb_mm((struct mm_struct
*)mm
);
208 void flush_tlb_mm(struct mm_struct
*mm
)
210 smp_call_function(flush_tlb_mm_ipi
, (void *)mm
, 1, 1);
214 struct flush_tlb_data
{
215 struct mm_struct
*mm
;
216 struct vm_area_struct
*vma
;
221 static void flush_tlb_range_ipi(void *info
)
223 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
225 _flush_tlb_range(fd
->mm
, fd
->addr1
, fd
->addr2
);
228 void flush_tlb_range(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
230 struct flush_tlb_data fd
;
235 smp_call_function(flush_tlb_range_ipi
, (void *)&fd
, 1, 1);
236 _flush_tlb_range(mm
, start
, end
);
239 static void flush_tlb_page_ipi(void *info
)
241 struct flush_tlb_data
*fd
= (struct flush_tlb_data
*)info
;
243 _flush_tlb_page(fd
->vma
, fd
->addr1
);
246 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
248 struct flush_tlb_data fd
;
252 smp_call_function(flush_tlb_page_ipi
, (void *)&fd
, 1, 1);
253 _flush_tlb_page(vma
, page
);