2 * arch/s390/kernel/smp.c
4 * Copyright (C) IBM Corp. 1999,2006
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/smp_lock.h>
31 #include <linux/delay.h>
32 #include <linux/cache.h>
33 #include <linux/interrupt.h>
34 #include <linux/cpu.h>
37 #include <asm/pgalloc.h>
39 #include <asm/s390_ext.h>
40 #include <asm/cpcmd.h>
41 #include <asm/tlbflush.h>
43 extern volatile int __cpu_logical_map
[];
46 * An array with a pointer the lowcore of every CPU.
49 struct _lowcore
*lowcore_ptr
[NR_CPUS
];
51 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
52 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
54 static struct task_struct
*current_set
[NR_CPUS
];
57 * Reboot, halt and power_off routines for SMP.
59 extern char vmhalt_cmd
[];
60 extern char vmpoff_cmd
[];
62 static void smp_ext_bitcall(int, ec_bit_sig
);
63 static void smp_ext_bitcall_others(ec_bit_sig
);
66 * Structure and data for smp_call_function(). This is designed to minimise
67 * static memory requirements. It also looks cleaner.
69 static DEFINE_SPINLOCK(call_lock
);
71 struct call_data_struct
{
72 void (*func
) (void *info
);
79 static struct call_data_struct
* call_data
;
82 * 'Call function' interrupt callback
84 static void do_call_function(void)
86 void (*func
) (void *info
) = call_data
->func
;
87 void *info
= call_data
->info
;
88 int wait
= call_data
->wait
;
90 atomic_inc(&call_data
->started
);
93 atomic_inc(&call_data
->finished
);
97 * this function sends a 'generic call function' IPI to all other CPUs
101 int smp_call_function (void (*func
) (void *info
), void *info
, int nonatomic
,
104 * [SUMMARY] Run a function on all other CPUs.
105 * <func> The function to run. This must be fast and non-blocking.
106 * <info> An arbitrary pointer to pass to the function.
107 * <nonatomic> currently unused.
108 * <wait> If true, wait (atomically) until function has completed on other CPUs.
109 * [RETURNS] 0 on success, else a negative status code. Does not return until
110 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
112 * You must not call this function with disabled interrupts or from a
113 * hardware interrupt handler or from a bottom half handler.
116 struct call_data_struct data
;
117 int cpus
= num_online_cpus()-1;
122 /* Can deadlock when called with interrupts disabled */
123 WARN_ON(irqs_disabled());
127 atomic_set(&data
.started
, 0);
130 atomic_set(&data
.finished
, 0);
132 spin_lock(&call_lock
);
134 /* Send a message to all other CPUs and wait for them to respond */
135 smp_ext_bitcall_others(ec_call_function
);
137 /* Wait for response */
138 while (atomic_read(&data
.started
) != cpus
)
142 while (atomic_read(&data
.finished
) != cpus
)
144 spin_unlock(&call_lock
);
150 * Call a function on one CPU
151 * cpu : the CPU the function should be executed on
153 * You must not call this function with disabled interrupts or from a
154 * hardware interrupt handler. You may call it from a bottom half.
156 * It is guaranteed that the called function runs on the specified CPU,
157 * preemption is disabled.
159 int smp_call_function_on(void (*func
) (void *info
), void *info
,
160 int nonatomic
, int wait
, int cpu
)
162 struct call_data_struct data
;
165 if (!cpu_online(cpu
))
168 /* disable preemption for local function call */
169 curr_cpu
= get_cpu();
171 if (curr_cpu
== cpu
) {
172 /* direct call to function */
180 atomic_set(&data
.started
, 0);
183 atomic_set(&data
.finished
, 0);
185 spin_lock_bh(&call_lock
);
187 smp_ext_bitcall(cpu
, ec_call_function
);
189 /* Wait for response */
190 while (atomic_read(&data
.started
) != 1)
194 while (atomic_read(&data
.finished
) != 1)
197 spin_unlock_bh(&call_lock
);
201 EXPORT_SYMBOL(smp_call_function_on
);
203 static inline void do_send_stop(void)
207 /* stop all processors */
208 for_each_online_cpu(cpu
) {
209 if (cpu
== smp_processor_id())
212 rc
= signal_processor(cpu
, sigp_stop
);
213 } while (rc
== sigp_busy
);
217 static inline void do_store_status(void)
221 /* store status of all processors in their lowcores (real 0) */
222 for_each_online_cpu(cpu
) {
223 if (cpu
== smp_processor_id())
226 rc
= signal_processor_p(
227 (__u32
)(unsigned long) lowcore_ptr
[cpu
], cpu
,
228 sigp_store_status_at_address
);
229 } while(rc
== sigp_busy
);
234 * this function sends a 'stop' sigp to all other CPUs in the system.
235 * it goes straight through.
237 void smp_send_stop(void)
239 /* write magic number to zero page (absolute 0) */
240 lowcore_ptr
[smp_processor_id()]->panic_magic
= __PANIC_MAGIC
;
242 /* stop other processors. */
245 /* store status of other processors. */
250 * Reboot, halt and power_off routines for SMP.
253 static void do_machine_restart(void * __unused
)
256 static atomic_t cpuid
= ATOMIC_INIT(-1);
258 if (atomic_cmpxchg(&cpuid
, -1, smp_processor_id()) != -1)
259 signal_processor(smp_processor_id(), sigp_stop
);
261 /* Wait for all other cpus to enter stopped state */
262 for_each_online_cpu(cpu
) {
263 if (cpu
== smp_processor_id())
265 while(!smp_cpu_not_running(cpu
))
269 /* Store status of other cpus. */
273 * Finally call reipl. Because we waited for all other
274 * cpus to enter this function we know that they do
275 * not hold any s390irq-locks (the cpus have been
276 * interrupted by an external interrupt and s390irq
277 * locks are always held disabled).
282 void machine_restart_smp(char * __unused
)
284 on_each_cpu(do_machine_restart
, NULL
, 0, 0);
287 static void do_wait_for_stop(void)
289 unsigned long cr
[16];
291 __ctl_store(cr
, 0, 15);
294 __ctl_load(cr
, 0, 15);
299 static void do_machine_halt(void * __unused
)
301 static atomic_t cpuid
= ATOMIC_INIT(-1);
303 if (atomic_cmpxchg(&cpuid
, -1, smp_processor_id()) == -1) {
305 if (MACHINE_IS_VM
&& strlen(vmhalt_cmd
) > 0)
306 cpcmd(vmhalt_cmd
, NULL
, 0, NULL
);
307 signal_processor(smp_processor_id(),
308 sigp_stop_and_store_status
);
313 void machine_halt_smp(void)
315 on_each_cpu(do_machine_halt
, NULL
, 0, 0);
318 static void do_machine_power_off(void * __unused
)
320 static atomic_t cpuid
= ATOMIC_INIT(-1);
322 if (atomic_cmpxchg(&cpuid
, -1, smp_processor_id()) == -1) {
324 if (MACHINE_IS_VM
&& strlen(vmpoff_cmd
) > 0)
325 cpcmd(vmpoff_cmd
, NULL
, 0, NULL
);
326 signal_processor(smp_processor_id(),
327 sigp_stop_and_store_status
);
332 void machine_power_off_smp(void)
334 on_each_cpu(do_machine_power_off
, NULL
, 0, 0);
338 * This is the main routine where commands issued by other
342 void do_ext_call_interrupt(struct pt_regs
*regs
, __u16 code
)
347 * handle bit signal external calls
349 * For the ec_schedule signal we have to do nothing. All the work
350 * is done automatically when we return from the interrupt.
352 bits
= xchg(&S390_lowcore
.ext_call_fast
, 0);
354 if (test_bit(ec_call_function
, &bits
))
359 * Send an external call sigp to another cpu and return without waiting
360 * for its completion.
362 static void smp_ext_bitcall(int cpu
, ec_bit_sig sig
)
365 * Set signaling bit in lowcore of target cpu and kick it
367 set_bit(sig
, (unsigned long *) &lowcore_ptr
[cpu
]->ext_call_fast
);
368 while(signal_processor(cpu
, sigp_emergency_signal
) == sigp_busy
)
373 * Send an external call sigp to every other cpu in the system and
374 * return without waiting for its completion.
376 static void smp_ext_bitcall_others(ec_bit_sig sig
)
380 for_each_online_cpu(cpu
) {
381 if (cpu
== smp_processor_id())
384 * Set signaling bit in lowcore of target cpu and kick it
386 set_bit(sig
, (unsigned long *) &lowcore_ptr
[cpu
]->ext_call_fast
);
387 while (signal_processor(cpu
, sigp_emergency_signal
) == sigp_busy
)
394 * this function sends a 'purge tlb' signal to another CPU.
396 void smp_ptlb_callback(void *info
)
401 void smp_ptlb_all(void)
403 on_each_cpu(smp_ptlb_callback
, NULL
, 0, 1);
405 EXPORT_SYMBOL(smp_ptlb_all
);
406 #endif /* ! CONFIG_64BIT */
409 * this function sends a 'reschedule' IPI to another CPU.
410 * it goes straight through and wastes no time serializing
411 * anything. Worst case is that we lose a reschedule ...
413 void smp_send_reschedule(int cpu
)
415 smp_ext_bitcall(cpu
, ec_schedule
);
419 * parameter area for the set/clear control bit callbacks
425 unsigned long orvals
[16];
426 unsigned long andvals
[16];
427 } ec_creg_mask_parms
;
430 * callback for setting/clearing control bits
432 void smp_ctl_bit_callback(void *info
) {
433 ec_creg_mask_parms
*pp
;
434 unsigned long cregs
[16];
437 pp
= (ec_creg_mask_parms
*) info
;
438 __ctl_store(cregs
[pp
->start_ctl
], pp
->start_ctl
, pp
->end_ctl
);
439 for (i
= pp
->start_ctl
; i
<= pp
->end_ctl
; i
++)
440 cregs
[i
] = (cregs
[i
] & pp
->andvals
[i
]) | pp
->orvals
[i
];
441 __ctl_load(cregs
[pp
->start_ctl
], pp
->start_ctl
, pp
->end_ctl
);
445 * Set a bit in a control register of all cpus
447 void smp_ctl_set_bit(int cr
, int bit
) {
448 ec_creg_mask_parms parms
;
450 parms
.start_ctl
= cr
;
452 parms
.orvals
[cr
] = 1 << bit
;
453 parms
.andvals
[cr
] = -1L;
455 smp_call_function(smp_ctl_bit_callback
, &parms
, 0, 1);
456 __ctl_set_bit(cr
, bit
);
461 * Clear a bit in a control register of all cpus
463 void smp_ctl_clear_bit(int cr
, int bit
) {
464 ec_creg_mask_parms parms
;
466 parms
.start_ctl
= cr
;
468 parms
.orvals
[cr
] = 0;
469 parms
.andvals
[cr
] = ~(1L << bit
);
471 smp_call_function(smp_ctl_bit_callback
, &parms
, 0, 1);
472 __ctl_clear_bit(cr
, bit
);
477 * Lets check how many CPUs we have.
481 __init
smp_count_cpus(void)
483 unsigned int cpu
, num_cpus
;
487 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
490 boot_cpu_addr
= S390_lowcore
.cpu_data
.cpu_addr
;
491 current_thread_info()->cpu
= 0;
493 for (cpu
= 0; cpu
<= 65535; cpu
++) {
494 if ((__u16
) cpu
== boot_cpu_addr
)
496 __cpu_logical_map
[1] = (__u16
) cpu
;
497 if (signal_processor(1, sigp_sense
) ==
498 sigp_not_operational
)
503 printk("Detected %d CPU's\n",(int) num_cpus
);
504 printk("Boot cpu address %2X\n", boot_cpu_addr
);
510 * Activate a secondary processor.
512 extern void init_cpu_timer(void);
513 extern void init_cpu_vtimer(void);
514 extern int pfault_init(void);
515 extern void pfault_fini(void);
517 int __devinit
start_secondary(void *cpuvoid
)
522 /* init per CPU timer */
524 #ifdef CONFIG_VIRT_TIMER
528 /* Enable pfault pseudo page faults on this cpu. */
532 /* Mark this cpu as online */
533 cpu_set(smp_processor_id(), cpu_online_map
);
534 /* Switch on interrupts */
536 /* Print info about this processor */
537 print_cpu_info(&S390_lowcore
.cpu_data
);
538 /* cpu_idle will call schedule for us */
543 static void __init
smp_create_idle(unsigned int cpu
)
545 struct task_struct
*p
;
548 * don't care about the psw and regs settings since we'll never
549 * reschedule the forked task.
553 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
554 current_set
[cpu
] = p
;
557 /* Reserving and releasing of CPUs */
559 static DEFINE_SPINLOCK(smp_reserve_lock
);
560 static int smp_cpu_reserved
[NR_CPUS
];
563 smp_get_cpu(cpumask_t cpu_mask
)
568 spin_lock_irqsave(&smp_reserve_lock
, flags
);
569 /* Try to find an already reserved cpu. */
570 for_each_cpu_mask(cpu
, cpu_mask
) {
571 if (smp_cpu_reserved
[cpu
] != 0) {
572 smp_cpu_reserved
[cpu
]++;
577 /* Reserve a new cpu from cpu_mask. */
578 for_each_cpu_mask(cpu
, cpu_mask
) {
579 if (cpu_online(cpu
)) {
580 smp_cpu_reserved
[cpu
]++;
586 spin_unlock_irqrestore(&smp_reserve_lock
, flags
);
595 spin_lock_irqsave(&smp_reserve_lock
, flags
);
596 smp_cpu_reserved
[cpu
]--;
597 spin_unlock_irqrestore(&smp_reserve_lock
, flags
);
605 /* Check for stopped state */
606 if (signal_processor_ps(&status
, 0, cpu
, sigp_sense
) == sigp_status_stored
) {
613 /* Upping and downing of CPUs */
616 __cpu_up(unsigned int cpu
)
618 struct task_struct
*idle
;
619 struct _lowcore
*cpu_lowcore
;
620 struct stack_frame
*sf
;
624 for (curr_cpu
= 0; curr_cpu
<= 65535; curr_cpu
++) {
625 __cpu_logical_map
[cpu
] = (__u16
) curr_cpu
;
626 if (cpu_stopped(cpu
))
630 if (!cpu_stopped(cpu
))
633 ccode
= signal_processor_p((__u32
)(unsigned long)(lowcore_ptr
[cpu
]),
634 cpu
, sigp_set_prefix
);
636 printk("sigp_set_prefix failed for cpu %d "
637 "with condition code %d\n",
638 (int) cpu
, (int) ccode
);
642 idle
= current_set
[cpu
];
643 cpu_lowcore
= lowcore_ptr
[cpu
];
644 cpu_lowcore
->kernel_stack
= (unsigned long)
645 task_stack_page(idle
) + (THREAD_SIZE
);
646 sf
= (struct stack_frame
*) (cpu_lowcore
->kernel_stack
647 - sizeof(struct pt_regs
)
648 - sizeof(struct stack_frame
));
649 memset(sf
, 0, sizeof(struct stack_frame
));
650 sf
->gprs
[9] = (unsigned long) sf
;
651 cpu_lowcore
->save_area
[15] = (unsigned long) sf
;
652 __ctl_store(cpu_lowcore
->cregs_save_area
[0], 0, 15);
653 __asm__
__volatile__("stam 0,15,0(%0)"
654 : : "a" (&cpu_lowcore
->access_regs_save_area
)
656 cpu_lowcore
->percpu_offset
= __per_cpu_offset
[cpu
];
657 cpu_lowcore
->current_task
= (unsigned long) idle
;
658 cpu_lowcore
->cpu_data
.cpu_nr
= cpu
;
661 while (signal_processor(cpu
,sigp_restart
) == sigp_busy
)
664 while (!cpu_online(cpu
))
669 static unsigned int __initdata additional_cpus
;
670 static unsigned int __initdata possible_cpus
;
672 void __init
smp_setup_cpu_possible_map(void)
674 unsigned int phy_cpus
, pos_cpus
, cpu
;
676 phy_cpus
= smp_count_cpus();
677 pos_cpus
= min(phy_cpus
+ additional_cpus
, (unsigned int) NR_CPUS
);
680 pos_cpus
= min(possible_cpus
, (unsigned int) NR_CPUS
);
682 for (cpu
= 0; cpu
< pos_cpus
; cpu
++)
683 cpu_set(cpu
, cpu_possible_map
);
685 phy_cpus
= min(phy_cpus
, pos_cpus
);
687 for (cpu
= 0; cpu
< phy_cpus
; cpu
++)
688 cpu_set(cpu
, cpu_present_map
);
691 #ifdef CONFIG_HOTPLUG_CPU
693 static int __init
setup_additional_cpus(char *s
)
695 additional_cpus
= simple_strtoul(s
, NULL
, 0);
698 early_param("additional_cpus", setup_additional_cpus
);
700 static int __init
setup_possible_cpus(char *s
)
702 possible_cpus
= simple_strtoul(s
, NULL
, 0);
705 early_param("possible_cpus", setup_possible_cpus
);
711 ec_creg_mask_parms cr_parms
;
712 int cpu
= smp_processor_id();
714 spin_lock_irqsave(&smp_reserve_lock
, flags
);
715 if (smp_cpu_reserved
[cpu
] != 0) {
716 spin_unlock_irqrestore(&smp_reserve_lock
, flags
);
719 cpu_clear(cpu
, cpu_online_map
);
722 /* Disable pfault pseudo page faults on this cpu. */
727 /* disable all external interrupts */
729 cr_parms
.start_ctl
= 0;
730 cr_parms
.end_ctl
= 0;
731 cr_parms
.orvals
[0] = 0;
732 cr_parms
.andvals
[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
733 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
734 smp_ctl_bit_callback(&cr_parms
);
736 /* disable all I/O interrupts */
738 cr_parms
.start_ctl
= 6;
739 cr_parms
.end_ctl
= 6;
740 cr_parms
.orvals
[6] = 0;
741 cr_parms
.andvals
[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
742 1<<27 | 1<<26 | 1<<25 | 1<<24);
743 smp_ctl_bit_callback(&cr_parms
);
745 /* disable most machine checks */
747 cr_parms
.start_ctl
= 14;
748 cr_parms
.end_ctl
= 14;
749 cr_parms
.orvals
[14] = 0;
750 cr_parms
.andvals
[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
751 smp_ctl_bit_callback(&cr_parms
);
753 spin_unlock_irqrestore(&smp_reserve_lock
, flags
);
758 __cpu_die(unsigned int cpu
)
760 /* Wait until target cpu is down */
761 while (!smp_cpu_not_running(cpu
))
763 printk("Processor %d spun down\n", cpu
);
770 signal_processor(smp_processor_id(), sigp_stop
);
775 #endif /* CONFIG_HOTPLUG_CPU */
778 * Cycle through the processors and setup structures.
781 void __init
smp_prepare_cpus(unsigned int max_cpus
)
787 /* request the 0x1201 emergency signal external interrupt */
788 if (register_external_interrupt(0x1201, do_ext_call_interrupt
) != 0)
789 panic("Couldn't request external interrupt 0x1201");
790 memset(lowcore_ptr
,0,sizeof(lowcore_ptr
));
792 * Initialize prefix pages and stacks for all possible cpus
794 print_cpu_info(&S390_lowcore
.cpu_data
);
796 for_each_possible_cpu(i
) {
797 lowcore_ptr
[i
] = (struct _lowcore
*)
798 __get_free_pages(GFP_KERNEL
|GFP_DMA
,
799 sizeof(void*) == 8 ? 1 : 0);
800 stack
= __get_free_pages(GFP_KERNEL
,ASYNC_ORDER
);
801 if (lowcore_ptr
[i
] == NULL
|| stack
== 0ULL)
802 panic("smp_boot_cpus failed to allocate memory\n");
804 *(lowcore_ptr
[i
]) = S390_lowcore
;
805 lowcore_ptr
[i
]->async_stack
= stack
+ (ASYNC_SIZE
);
806 stack
= __get_free_pages(GFP_KERNEL
,0);
808 panic("smp_boot_cpus failed to allocate memory\n");
809 lowcore_ptr
[i
]->panic_stack
= stack
+ (PAGE_SIZE
);
811 if (MACHINE_HAS_IEEE
) {
812 lowcore_ptr
[i
]->extended_save_area_addr
=
813 (__u32
) __get_free_pages(GFP_KERNEL
,0);
814 if (lowcore_ptr
[i
]->extended_save_area_addr
== 0)
815 panic("smp_boot_cpus failed to "
816 "allocate memory\n");
821 if (MACHINE_HAS_IEEE
)
822 ctl_set_bit(14, 29); /* enable extended save area */
824 set_prefix((u32
)(unsigned long) lowcore_ptr
[smp_processor_id()]);
826 for_each_possible_cpu(cpu
)
827 if (cpu
!= smp_processor_id())
828 smp_create_idle(cpu
);
831 void __devinit
smp_prepare_boot_cpu(void)
833 BUG_ON(smp_processor_id() != 0);
835 cpu_set(0, cpu_online_map
);
836 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
837 current_set
[0] = current
;
840 void smp_cpus_done(unsigned int max_cpus
)
842 cpu_present_map
= cpu_possible_map
;
846 * the frequency of the profiling timer can be changed
847 * by writing a multiplier value into /proc/profile.
849 * usually you want to run this on all CPUs ;)
851 int setup_profiling_timer(unsigned int multiplier
)
856 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
858 static int __init
topology_init(void)
863 for_each_possible_cpu(cpu
) {
864 ret
= register_cpu(&per_cpu(cpu_devices
, cpu
), cpu
);
866 printk(KERN_WARNING
"topology_init: register_cpu %d "
867 "failed (%d)\n", cpu
, ret
);
872 subsys_initcall(topology_init
);
874 EXPORT_SYMBOL(cpu_online_map
);
875 EXPORT_SYMBOL(cpu_possible_map
);
876 EXPORT_SYMBOL(lowcore_ptr
);
877 EXPORT_SYMBOL(smp_ctl_set_bit
);
878 EXPORT_SYMBOL(smp_ctl_clear_bit
);
879 EXPORT_SYMBOL(smp_call_function
);
880 EXPORT_SYMBOL(smp_get_cpu
);
881 EXPORT_SYMBOL(smp_put_cpu
);