2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
26 #include <linux/err.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/delay.h>
30 #include <linux/cache.h>
31 #include <linux/interrupt.h>
32 #include <linux/cpu.h>
33 #include <linux/timex.h>
34 #include <linux/bootmem.h>
36 #include <asm/setup.h>
38 #include <asm/pgalloc.h>
40 #include <asm/s390_ext.h>
41 #include <asm/cpcmd.h>
42 #include <asm/tlbflush.h>
43 #include <asm/timer.h>
44 #include <asm/lowcore.h>
49 * An array with a pointer the lowcore of every CPU.
51 struct _lowcore
*lowcore_ptr
[NR_CPUS
];
52 EXPORT_SYMBOL(lowcore_ptr
);
54 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
55 EXPORT_SYMBOL(cpu_online_map
);
57 cpumask_t cpu_possible_map
= CPU_MASK_ALL
;
58 EXPORT_SYMBOL(cpu_possible_map
);
60 static struct task_struct
*current_set
[NR_CPUS
];
62 static u8 smp_cpu_type
;
63 static int smp_use_sigp_detection
;
70 DEFINE_MUTEX(smp_cpu_state_mutex
);
71 int smp_cpu_polarization
[NR_CPUS
];
72 static int smp_cpu_state
[NR_CPUS
];
73 static int cpu_management
;
75 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
77 static void smp_ext_bitcall(int, ec_bit_sig
);
80 * Structure and data for __smp_call_function_map(). This is designed to
81 * minimise static memory requirements. It also looks cleaner.
83 static DEFINE_SPINLOCK(call_lock
);
85 struct call_data_struct
{
86 void (*func
) (void *info
);
93 static struct call_data_struct
*call_data
;
96 * 'Call function' interrupt callback
98 static void do_call_function(void)
100 void (*func
) (void *info
) = call_data
->func
;
101 void *info
= call_data
->info
;
102 int wait
= call_data
->wait
;
104 cpu_set(smp_processor_id(), call_data
->started
);
107 cpu_set(smp_processor_id(), call_data
->finished
);;
110 static void __smp_call_function_map(void (*func
) (void *info
), void *info
,
111 int nonatomic
, int wait
, cpumask_t map
)
113 struct call_data_struct data
;
117 * Can deadlock when interrupts are disabled or if in wrong context.
119 WARN_ON(irqs_disabled() || in_irq());
122 * Check for local function call. We have to have the same call order
123 * as in on_each_cpu() because of machine_restart_smp().
125 if (cpu_isset(smp_processor_id(), map
)) {
127 cpu_clear(smp_processor_id(), map
);
130 cpus_and(map
, map
, cpu_online_map
);
136 data
.started
= CPU_MASK_NONE
;
139 data
.finished
= CPU_MASK_NONE
;
141 spin_lock(&call_lock
);
144 for_each_cpu_mask(cpu
, map
)
145 smp_ext_bitcall(cpu
, ec_call_function
);
147 /* Wait for response */
148 while (!cpus_equal(map
, data
.started
))
151 while (!cpus_equal(map
, data
.finished
))
153 spin_unlock(&call_lock
);
164 * @func: the function to run; this must be fast and non-blocking
165 * @info: an arbitrary pointer to pass to the function
167 * @wait: if true, wait (atomically) until function has completed on other CPUs
169 * Run a function on all other CPUs.
171 * You must not call this function with disabled interrupts, from a
172 * hardware interrupt handler or from a bottom half.
174 int smp_call_function(void (*func
) (void *info
), void *info
, int nonatomic
,
180 map
= cpu_online_map
;
181 cpu_clear(smp_processor_id(), map
);
182 __smp_call_function_map(func
, info
, nonatomic
, wait
, map
);
186 EXPORT_SYMBOL(smp_call_function
);
189 * smp_call_function_single:
190 * @cpu: the CPU where func should run
191 * @func: the function to run; this must be fast and non-blocking
192 * @info: an arbitrary pointer to pass to the function
194 * @wait: if true, wait (atomically) until function has completed on other CPUs
196 * Run a function on one processor.
198 * You must not call this function with disabled interrupts, from a
199 * hardware interrupt handler or from a bottom half.
201 int smp_call_function_single(int cpu
, void (*func
) (void *info
), void *info
,
202 int nonatomic
, int wait
)
205 __smp_call_function_map(func
, info
, nonatomic
, wait
,
206 cpumask_of_cpu(cpu
));
210 EXPORT_SYMBOL(smp_call_function_single
);
213 * smp_call_function_mask(): Run a function on a set of other CPUs.
214 * @mask: The set of cpus to run on. Must not include the current cpu.
215 * @func: The function to run. This must be fast and non-blocking.
216 * @info: An arbitrary pointer to pass to the function.
217 * @wait: If true, wait (atomically) until function has completed on other CPUs.
219 * Returns 0 on success, else a negative status code.
221 * If @wait is true, then returns once @func has returned; otherwise
222 * it returns just before the target cpu calls @func.
224 * You must not call this function with disabled interrupts or from a
225 * hardware interrupt handler or from a bottom half handler.
227 int smp_call_function_mask(cpumask_t mask
, void (*func
)(void *), void *info
,
231 cpu_clear(smp_processor_id(), mask
);
232 __smp_call_function_map(func
, info
, 0, wait
, mask
);
236 EXPORT_SYMBOL(smp_call_function_mask
);
238 void smp_send_stop(void)
242 /* Disable all interrupts/machine checks */
243 __load_psw_mask(psw_kernel_bits
& ~PSW_MASK_MCHECK
);
245 /* write magic number to zero page (absolute 0) */
246 lowcore_ptr
[smp_processor_id()]->panic_magic
= __PANIC_MAGIC
;
248 /* stop all processors */
249 for_each_online_cpu(cpu
) {
250 if (cpu
== smp_processor_id())
253 rc
= signal_processor(cpu
, sigp_stop
);
254 } while (rc
== sigp_busy
);
256 while (!smp_cpu_not_running(cpu
))
262 * This is the main routine where commands issued by other
266 static void do_ext_call_interrupt(__u16 code
)
271 * handle bit signal external calls
273 * For the ec_schedule signal we have to do nothing. All the work
274 * is done automatically when we return from the interrupt.
276 bits
= xchg(&S390_lowcore
.ext_call_fast
, 0);
278 if (test_bit(ec_call_function
, &bits
))
283 * Send an external call sigp to another cpu and return without waiting
284 * for its completion.
286 static void smp_ext_bitcall(int cpu
, ec_bit_sig sig
)
289 * Set signaling bit in lowcore of target cpu and kick it
291 set_bit(sig
, (unsigned long *) &lowcore_ptr
[cpu
]->ext_call_fast
);
292 while (signal_processor(cpu
, sigp_emergency_signal
) == sigp_busy
)
298 * this function sends a 'purge tlb' signal to another CPU.
300 void smp_ptlb_callback(void *info
)
305 void smp_ptlb_all(void)
307 on_each_cpu(smp_ptlb_callback
, NULL
, 0, 1);
309 EXPORT_SYMBOL(smp_ptlb_all
);
310 #endif /* ! CONFIG_64BIT */
313 * this function sends a 'reschedule' IPI to another CPU.
314 * it goes straight through and wastes no time serializing
315 * anything. Worst case is that we lose a reschedule ...
317 void smp_send_reschedule(int cpu
)
319 smp_ext_bitcall(cpu
, ec_schedule
);
323 * parameter area for the set/clear control bit callbacks
325 struct ec_creg_mask_parms
{
326 unsigned long orvals
[16];
327 unsigned long andvals
[16];
331 * callback for setting/clearing control bits
333 static void smp_ctl_bit_callback(void *info
)
335 struct ec_creg_mask_parms
*pp
= info
;
336 unsigned long cregs
[16];
339 __ctl_store(cregs
, 0, 15);
340 for (i
= 0; i
<= 15; i
++)
341 cregs
[i
] = (cregs
[i
] & pp
->andvals
[i
]) | pp
->orvals
[i
];
342 __ctl_load(cregs
, 0, 15);
346 * Set a bit in a control register of all cpus
348 void smp_ctl_set_bit(int cr
, int bit
)
350 struct ec_creg_mask_parms parms
;
352 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
353 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
354 parms
.orvals
[cr
] = 1 << bit
;
355 on_each_cpu(smp_ctl_bit_callback
, &parms
, 0, 1);
357 EXPORT_SYMBOL(smp_ctl_set_bit
);
360 * Clear a bit in a control register of all cpus
362 void smp_ctl_clear_bit(int cr
, int bit
)
364 struct ec_creg_mask_parms parms
;
366 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
367 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
368 parms
.andvals
[cr
] = ~(1L << bit
);
369 on_each_cpu(smp_ctl_bit_callback
, &parms
, 0, 1);
371 EXPORT_SYMBOL(smp_ctl_clear_bit
);
374 * In early ipl state a temp. logically cpu number is needed, so the sigp
375 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
376 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
378 #define CPU_INIT_NO 1
380 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
383 * zfcpdump_prefix_array holds prefix registers for the following scenario:
384 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
385 * save its prefix registers, since they get lost, when switching from 31 bit
388 unsigned int zfcpdump_prefix_array
[NR_CPUS
+ 1] \
389 __attribute__((__section__(".data")));
391 static void __init
smp_get_save_area(unsigned int cpu
, unsigned int phy_cpu
)
393 if (ipl_info
.type
!= IPL_TYPE_FCP_DUMP
)
395 if (cpu
>= NR_CPUS
) {
396 printk(KERN_WARNING
"Registers for cpu %i not saved since dump "
397 "kernel was compiled with NR_CPUS=%i\n", cpu
, NR_CPUS
);
400 zfcpdump_save_areas
[cpu
] = kmalloc(sizeof(union save_area
), GFP_KERNEL
);
401 __cpu_logical_map
[CPU_INIT_NO
] = (__u16
) phy_cpu
;
402 while (signal_processor(CPU_INIT_NO
, sigp_stop_and_store_status
) ==
405 memcpy(zfcpdump_save_areas
[cpu
],
406 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE
,
409 /* copy original prefix register */
410 zfcpdump_save_areas
[cpu
]->s390x
.pref_reg
= zfcpdump_prefix_array
[cpu
];
414 union save_area
*zfcpdump_save_areas
[NR_CPUS
+ 1];
415 EXPORT_SYMBOL_GPL(zfcpdump_save_areas
);
419 static inline void smp_get_save_area(unsigned int cpu
, unsigned int phy_cpu
) { }
421 #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
423 static int cpu_stopped(int cpu
)
427 /* Check for stopped state */
428 if (signal_processor_ps(&status
, 0, cpu
, sigp_sense
) ==
429 sigp_status_stored
) {
436 static int cpu_known(int cpu_id
)
440 for_each_present_cpu(cpu
) {
441 if (__cpu_logical_map
[cpu
] == cpu_id
)
447 static int smp_rescan_cpus_sigp(cpumask_t avail
)
449 int cpu_id
, logical_cpu
;
451 logical_cpu
= first_cpu(avail
);
452 if (logical_cpu
== NR_CPUS
)
454 for (cpu_id
= 0; cpu_id
<= 65535; cpu_id
++) {
455 if (cpu_known(cpu_id
))
457 __cpu_logical_map
[logical_cpu
] = cpu_id
;
458 smp_cpu_polarization
[logical_cpu
] = POLARIZATION_UNKNWN
;
459 if (!cpu_stopped(logical_cpu
))
461 cpu_set(logical_cpu
, cpu_present_map
);
462 smp_cpu_state
[logical_cpu
] = CPU_STATE_CONFIGURED
;
463 logical_cpu
= next_cpu(logical_cpu
, avail
);
464 if (logical_cpu
== NR_CPUS
)
470 static int smp_rescan_cpus_sclp(cpumask_t avail
)
472 struct sclp_cpu_info
*info
;
473 int cpu_id
, logical_cpu
, cpu
;
476 logical_cpu
= first_cpu(avail
);
477 if (logical_cpu
== NR_CPUS
)
479 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
482 rc
= sclp_get_cpu_info(info
);
485 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
486 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= smp_cpu_type
)
488 cpu_id
= info
->cpu
[cpu
].address
;
489 if (cpu_known(cpu_id
))
491 __cpu_logical_map
[logical_cpu
] = cpu_id
;
492 smp_cpu_polarization
[logical_cpu
] = POLARIZATION_UNKNWN
;
493 cpu_set(logical_cpu
, cpu_present_map
);
494 if (cpu
>= info
->configured
)
495 smp_cpu_state
[logical_cpu
] = CPU_STATE_STANDBY
;
497 smp_cpu_state
[logical_cpu
] = CPU_STATE_CONFIGURED
;
498 logical_cpu
= next_cpu(logical_cpu
, avail
);
499 if (logical_cpu
== NR_CPUS
)
507 static int smp_rescan_cpus(void)
511 cpus_xor(avail
, cpu_possible_map
, cpu_present_map
);
512 if (smp_use_sigp_detection
)
513 return smp_rescan_cpus_sigp(avail
);
515 return smp_rescan_cpus_sclp(avail
);
518 static void __init
smp_detect_cpus(void)
520 unsigned int cpu
, c_cpus
, s_cpus
;
521 struct sclp_cpu_info
*info
;
522 u16 boot_cpu_addr
, cpu_addr
;
526 boot_cpu_addr
= S390_lowcore
.cpu_data
.cpu_addr
;
527 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
529 panic("smp_detect_cpus failed to allocate memory\n");
530 /* Use sigp detection algorithm if sclp doesn't work. */
531 if (sclp_get_cpu_info(info
)) {
532 smp_use_sigp_detection
= 1;
533 for (cpu
= 0; cpu
<= 65535; cpu
++) {
534 if (cpu
== boot_cpu_addr
)
536 __cpu_logical_map
[CPU_INIT_NO
] = cpu
;
537 if (!cpu_stopped(CPU_INIT_NO
))
539 smp_get_save_area(c_cpus
, cpu
);
545 if (info
->has_cpu_type
) {
546 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
547 if (info
->cpu
[cpu
].address
== boot_cpu_addr
) {
548 smp_cpu_type
= info
->cpu
[cpu
].type
;
554 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
555 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= smp_cpu_type
)
557 cpu_addr
= info
->cpu
[cpu
].address
;
558 if (cpu_addr
== boot_cpu_addr
)
560 __cpu_logical_map
[CPU_INIT_NO
] = cpu_addr
;
561 if (!cpu_stopped(CPU_INIT_NO
)) {
565 smp_get_save_area(c_cpus
, cpu_addr
);
570 printk(KERN_INFO
"CPUs: %d configured, %d standby\n", c_cpus
, s_cpus
);
577 * Activate a secondary processor.
579 int __cpuinit
start_secondary(void *cpuvoid
)
584 /* Enable TOD clock interrupts on the secondary cpu. */
586 #ifdef CONFIG_VIRT_TIMER
587 /* Enable cpu timer interrupts on the secondary cpu. */
590 /* Enable pfault pseudo page faults on this cpu. */
593 /* Mark this cpu as online */
594 cpu_set(smp_processor_id(), cpu_online_map
);
595 /* Switch on interrupts */
597 /* Print info about this processor */
598 print_cpu_info(&S390_lowcore
.cpu_data
);
599 /* cpu_idle will call schedule for us */
604 static void __init
smp_create_idle(unsigned int cpu
)
606 struct task_struct
*p
;
609 * don't care about the psw and regs settings since we'll never
610 * reschedule the forked task.
614 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
615 current_set
[cpu
] = p
;
616 spin_lock_init(&(&per_cpu(s390_idle
, cpu
))->lock
);
619 static int __cpuinit
smp_alloc_lowcore(int cpu
)
621 unsigned long async_stack
, panic_stack
;
622 struct _lowcore
*lowcore
;
625 lc_order
= sizeof(long) == 8 ? 1 : 0;
626 lowcore
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, lc_order
);
629 async_stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
630 panic_stack
= __get_free_page(GFP_KERNEL
);
631 if (!panic_stack
|| !async_stack
)
633 memcpy(lowcore
, &S390_lowcore
, 512);
634 memset((char *)lowcore
+ 512, 0, sizeof(*lowcore
) - 512);
635 lowcore
->async_stack
= async_stack
+ ASYNC_SIZE
;
636 lowcore
->panic_stack
= panic_stack
+ PAGE_SIZE
;
639 if (MACHINE_HAS_IEEE
) {
640 unsigned long save_area
;
642 save_area
= get_zeroed_page(GFP_KERNEL
);
645 lowcore
->extended_save_area_addr
= (u32
) save_area
;
648 lowcore_ptr
[cpu
] = lowcore
;
653 free_page(panic_stack
);
656 free_pages(async_stack
, ASYNC_ORDER
);
657 free_pages((unsigned long) lowcore
, lc_order
);
661 #ifdef CONFIG_HOTPLUG_CPU
662 static void smp_free_lowcore(int cpu
)
664 struct _lowcore
*lowcore
;
667 lc_order
= sizeof(long) == 8 ? 1 : 0;
668 lowcore
= lowcore_ptr
[cpu
];
670 if (MACHINE_HAS_IEEE
)
671 free_page((unsigned long) lowcore
->extended_save_area_addr
);
673 free_page(lowcore
->panic_stack
- PAGE_SIZE
);
674 free_pages(lowcore
->async_stack
- ASYNC_SIZE
, ASYNC_ORDER
);
675 free_pages((unsigned long) lowcore
, lc_order
);
676 lowcore_ptr
[cpu
] = NULL
;
678 #endif /* CONFIG_HOTPLUG_CPU */
680 /* Upping and downing of CPUs */
681 int __cpuinit
__cpu_up(unsigned int cpu
)
683 struct task_struct
*idle
;
684 struct _lowcore
*cpu_lowcore
;
685 struct stack_frame
*sf
;
688 if (smp_cpu_state
[cpu
] != CPU_STATE_CONFIGURED
)
690 if (smp_alloc_lowcore(cpu
))
693 ccode
= signal_processor_p((__u32
)(unsigned long)(lowcore_ptr
[cpu
]),
694 cpu
, sigp_set_prefix
);
696 printk("sigp_set_prefix failed for cpu %d "
697 "with condition code %d\n",
698 (int) cpu
, (int) ccode
);
702 idle
= current_set
[cpu
];
703 cpu_lowcore
= lowcore_ptr
[cpu
];
704 cpu_lowcore
->kernel_stack
= (unsigned long)
705 task_stack_page(idle
) + THREAD_SIZE
;
706 cpu_lowcore
->thread_info
= (unsigned long) task_thread_info(idle
);
707 sf
= (struct stack_frame
*) (cpu_lowcore
->kernel_stack
708 - sizeof(struct pt_regs
)
709 - sizeof(struct stack_frame
));
710 memset(sf
, 0, sizeof(struct stack_frame
));
711 sf
->gprs
[9] = (unsigned long) sf
;
712 cpu_lowcore
->save_area
[15] = (unsigned long) sf
;
713 __ctl_store(cpu_lowcore
->cregs_save_area
[0], 0, 15);
716 : : "a" (&cpu_lowcore
->access_regs_save_area
) : "memory");
717 cpu_lowcore
->percpu_offset
= __per_cpu_offset
[cpu
];
718 cpu_lowcore
->current_task
= (unsigned long) idle
;
719 cpu_lowcore
->cpu_data
.cpu_nr
= cpu
;
720 cpu_lowcore
->kernel_asce
= S390_lowcore
.kernel_asce
;
721 cpu_lowcore
->ipl_device
= S390_lowcore
.ipl_device
;
724 while (signal_processor(cpu
, sigp_restart
) == sigp_busy
)
727 while (!cpu_online(cpu
))
732 static int __init
setup_possible_cpus(char *s
)
736 pcpus
= simple_strtoul(s
, NULL
, 0);
737 cpu_possible_map
= cpumask_of_cpu(0);
738 for (cpu
= 1; cpu
< pcpus
&& cpu
< NR_CPUS
; cpu
++)
739 cpu_set(cpu
, cpu_possible_map
);
742 early_param("possible_cpus", setup_possible_cpus
);
744 #ifdef CONFIG_HOTPLUG_CPU
746 int __cpu_disable(void)
748 struct ec_creg_mask_parms cr_parms
;
749 int cpu
= smp_processor_id();
751 cpu_clear(cpu
, cpu_online_map
);
753 /* Disable pfault pseudo page faults on this cpu. */
756 memset(&cr_parms
.orvals
, 0, sizeof(cr_parms
.orvals
));
757 memset(&cr_parms
.andvals
, 0xff, sizeof(cr_parms
.andvals
));
759 /* disable all external interrupts */
760 cr_parms
.orvals
[0] = 0;
761 cr_parms
.andvals
[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
762 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
763 /* disable all I/O interrupts */
764 cr_parms
.orvals
[6] = 0;
765 cr_parms
.andvals
[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
766 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
767 /* disable most machine checks */
768 cr_parms
.orvals
[14] = 0;
769 cr_parms
.andvals
[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
772 smp_ctl_bit_callback(&cr_parms
);
777 void __cpu_die(unsigned int cpu
)
779 /* Wait until target cpu is down */
780 while (!smp_cpu_not_running(cpu
))
782 smp_free_lowcore(cpu
);
783 printk(KERN_INFO
"Processor %d spun down\n", cpu
);
789 signal_processor(smp_processor_id(), sigp_stop
);
794 #endif /* CONFIG_HOTPLUG_CPU */
796 void __init
smp_prepare_cpus(unsigned int max_cpus
)
799 unsigned long save_area
= 0;
801 unsigned long async_stack
, panic_stack
;
802 struct _lowcore
*lowcore
;
808 /* request the 0x1201 emergency signal external interrupt */
809 if (register_external_interrupt(0x1201, do_ext_call_interrupt
) != 0)
810 panic("Couldn't request external interrupt 0x1201");
811 print_cpu_info(&S390_lowcore
.cpu_data
);
813 /* Reallocate current lowcore, but keep its contents. */
814 lc_order
= sizeof(long) == 8 ? 1 : 0;
815 lowcore
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, lc_order
);
816 panic_stack
= __get_free_page(GFP_KERNEL
);
817 async_stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
819 if (MACHINE_HAS_IEEE
)
820 save_area
= get_zeroed_page(GFP_KERNEL
);
823 local_mcck_disable();
824 lowcore_ptr
[smp_processor_id()] = lowcore
;
825 *lowcore
= S390_lowcore
;
826 lowcore
->panic_stack
= panic_stack
+ PAGE_SIZE
;
827 lowcore
->async_stack
= async_stack
+ ASYNC_SIZE
;
829 if (MACHINE_HAS_IEEE
)
830 lowcore
->extended_save_area_addr
= (u32
) save_area
;
832 set_prefix((u32
)(unsigned long) lowcore
);
835 for_each_possible_cpu(cpu
)
836 if (cpu
!= smp_processor_id())
837 smp_create_idle(cpu
);
840 void __init
smp_prepare_boot_cpu(void)
842 BUG_ON(smp_processor_id() != 0);
844 current_thread_info()->cpu
= 0;
845 cpu_set(0, cpu_present_map
);
846 cpu_set(0, cpu_online_map
);
847 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
848 current_set
[0] = current
;
849 smp_cpu_state
[0] = CPU_STATE_CONFIGURED
;
850 smp_cpu_polarization
[0] = POLARIZATION_UNKNWN
;
851 spin_lock_init(&(&__get_cpu_var(s390_idle
))->lock
);
854 void __init
smp_cpus_done(unsigned int max_cpus
)
859 * the frequency of the profiling timer can be changed
860 * by writing a multiplier value into /proc/profile.
862 * usually you want to run this on all CPUs ;)
864 int setup_profiling_timer(unsigned int multiplier
)
869 #ifdef CONFIG_HOTPLUG_CPU
870 static ssize_t
cpu_configure_show(struct sys_device
*dev
, char *buf
)
874 mutex_lock(&smp_cpu_state_mutex
);
875 count
= sprintf(buf
, "%d\n", smp_cpu_state
[dev
->id
]);
876 mutex_unlock(&smp_cpu_state_mutex
);
880 static ssize_t
cpu_configure_store(struct sys_device
*dev
, const char *buf
,
887 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
889 if (val
!= 0 && val
!= 1)
892 mutex_lock(&smp_cpu_state_mutex
);
900 if (smp_cpu_state
[cpu
] == CPU_STATE_CONFIGURED
) {
901 rc
= sclp_cpu_deconfigure(__cpu_logical_map
[cpu
]);
903 smp_cpu_state
[cpu
] = CPU_STATE_STANDBY
;
904 smp_cpu_polarization
[cpu
] = POLARIZATION_UNKNWN
;
909 if (smp_cpu_state
[cpu
] == CPU_STATE_STANDBY
) {
910 rc
= sclp_cpu_configure(__cpu_logical_map
[cpu
]);
912 smp_cpu_state
[cpu
] = CPU_STATE_CONFIGURED
;
913 smp_cpu_polarization
[cpu
] = POLARIZATION_UNKNWN
;
922 mutex_unlock(&smp_cpu_state_mutex
);
923 return rc
? rc
: count
;
925 static SYSDEV_ATTR(configure
, 0644, cpu_configure_show
, cpu_configure_store
);
926 #endif /* CONFIG_HOTPLUG_CPU */
928 static ssize_t
cpu_polarization_show(struct sys_device
*dev
, char *buf
)
933 mutex_lock(&smp_cpu_state_mutex
);
934 switch (smp_cpu_polarization
[cpu
]) {
935 case POLARIZATION_HRZ
:
936 count
= sprintf(buf
, "horizontal\n");
938 case POLARIZATION_VL
:
939 count
= sprintf(buf
, "vertical:low\n");
941 case POLARIZATION_VM
:
942 count
= sprintf(buf
, "vertical:medium\n");
944 case POLARIZATION_VH
:
945 count
= sprintf(buf
, "vertical:high\n");
948 count
= sprintf(buf
, "unknown\n");
951 mutex_unlock(&smp_cpu_state_mutex
);
954 static SYSDEV_ATTR(polarization
, 0444, cpu_polarization_show
, NULL
);
956 static ssize_t
show_cpu_address(struct sys_device
*dev
, char *buf
)
958 return sprintf(buf
, "%d\n", __cpu_logical_map
[dev
->id
]);
960 static SYSDEV_ATTR(address
, 0444, show_cpu_address
, NULL
);
963 static struct attribute
*cpu_common_attrs
[] = {
964 #ifdef CONFIG_HOTPLUG_CPU
965 &attr_configure
.attr
,
968 &attr_polarization
.attr
,
972 static struct attribute_group cpu_common_attr_group
= {
973 .attrs
= cpu_common_attrs
,
976 static ssize_t
show_capability(struct sys_device
*dev
, char *buf
)
978 unsigned int capability
;
981 rc
= get_cpu_capability(&capability
);
984 return sprintf(buf
, "%u\n", capability
);
986 static SYSDEV_ATTR(capability
, 0444, show_capability
, NULL
);
988 static ssize_t
show_idle_count(struct sys_device
*dev
, char *buf
)
990 struct s390_idle_data
*idle
;
991 unsigned long long idle_count
;
993 idle
= &per_cpu(s390_idle
, dev
->id
);
994 spin_lock_irq(&idle
->lock
);
995 idle_count
= idle
->idle_count
;
996 spin_unlock_irq(&idle
->lock
);
997 return sprintf(buf
, "%llu\n", idle_count
);
999 static SYSDEV_ATTR(idle_count
, 0444, show_idle_count
, NULL
);
1001 static ssize_t
show_idle_time(struct sys_device
*dev
, char *buf
)
1003 struct s390_idle_data
*idle
;
1004 unsigned long long new_time
;
1006 idle
= &per_cpu(s390_idle
, dev
->id
);
1007 spin_lock_irq(&idle
->lock
);
1008 if (idle
->in_idle
) {
1009 new_time
= get_clock();
1010 idle
->idle_time
+= new_time
- idle
->idle_enter
;
1011 idle
->idle_enter
= new_time
;
1013 new_time
= idle
->idle_time
;
1014 spin_unlock_irq(&idle
->lock
);
1015 return sprintf(buf
, "%llu\n", new_time
>> 12);
1017 static SYSDEV_ATTR(idle_time_us
, 0444, show_idle_time
, NULL
);
1019 static struct attribute
*cpu_online_attrs
[] = {
1020 &attr_capability
.attr
,
1021 &attr_idle_count
.attr
,
1022 &attr_idle_time_us
.attr
,
1026 static struct attribute_group cpu_online_attr_group
= {
1027 .attrs
= cpu_online_attrs
,
1030 static int __cpuinit
smp_cpu_notify(struct notifier_block
*self
,
1031 unsigned long action
, void *hcpu
)
1033 unsigned int cpu
= (unsigned int)(long)hcpu
;
1034 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
1035 struct sys_device
*s
= &c
->sysdev
;
1036 struct s390_idle_data
*idle
;
1040 case CPU_ONLINE_FROZEN
:
1041 idle
= &per_cpu(s390_idle
, cpu
);
1042 spin_lock_irq(&idle
->lock
);
1043 idle
->idle_enter
= 0;
1044 idle
->idle_time
= 0;
1045 idle
->idle_count
= 0;
1046 spin_unlock_irq(&idle
->lock
);
1047 if (sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
))
1051 case CPU_DEAD_FROZEN
:
1052 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
1058 static struct notifier_block __cpuinitdata smp_cpu_nb
= {
1059 .notifier_call
= smp_cpu_notify
,
1062 static int __devinit
smp_add_present_cpu(int cpu
)
1064 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
1065 struct sys_device
*s
= &c
->sysdev
;
1068 c
->hotpluggable
= 1;
1069 rc
= register_cpu(c
, cpu
);
1072 rc
= sysfs_create_group(&s
->kobj
, &cpu_common_attr_group
);
1075 if (!cpu_online(cpu
))
1077 rc
= sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1080 sysfs_remove_group(&s
->kobj
, &cpu_common_attr_group
);
1082 #ifdef CONFIG_HOTPLUG_CPU
1089 #ifdef CONFIG_HOTPLUG_CPU
1090 static ssize_t __ref
rescan_store(struct sys_device
*dev
,
1091 const char *buf
, size_t count
)
1097 mutex_lock(&smp_cpu_state_mutex
);
1099 newcpus
= cpu_present_map
;
1100 rc
= smp_rescan_cpus();
1103 cpus_andnot(newcpus
, cpu_present_map
, newcpus
);
1104 for_each_cpu_mask(cpu
, newcpus
) {
1105 rc
= smp_add_present_cpu(cpu
);
1107 cpu_clear(cpu
, cpu_present_map
);
1112 mutex_unlock(&smp_cpu_state_mutex
);
1113 if (!cpus_empty(newcpus
))
1114 topology_schedule_update();
1115 return rc
? rc
: count
;
1117 static SYSDEV_ATTR(rescan
, 0200, NULL
, rescan_store
);
1118 #endif /* CONFIG_HOTPLUG_CPU */
1120 static ssize_t
dispatching_show(struct sys_device
*dev
, char *buf
)
1124 mutex_lock(&smp_cpu_state_mutex
);
1125 count
= sprintf(buf
, "%d\n", cpu_management
);
1126 mutex_unlock(&smp_cpu_state_mutex
);
1130 static ssize_t
dispatching_store(struct sys_device
*dev
, const char *buf
,
1136 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
1138 if (val
!= 0 && val
!= 1)
1141 mutex_lock(&smp_cpu_state_mutex
);
1143 if (cpu_management
== val
)
1145 rc
= topology_set_cpu_management(val
);
1147 cpu_management
= val
;
1150 mutex_unlock(&smp_cpu_state_mutex
);
1151 return rc
? rc
: count
;
1153 static SYSDEV_ATTR(dispatching
, 0644, dispatching_show
, dispatching_store
);
1155 static int __init
topology_init(void)
1160 register_cpu_notifier(&smp_cpu_nb
);
1162 #ifdef CONFIG_HOTPLUG_CPU
1163 rc
= sysfs_create_file(&cpu_sysdev_class
.kset
.kobj
,
1168 rc
= sysfs_create_file(&cpu_sysdev_class
.kset
.kobj
,
1169 &attr_dispatching
.attr
);
1172 for_each_present_cpu(cpu
) {
1173 rc
= smp_add_present_cpu(cpu
);
1179 subsys_initcall(topology_init
);