2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
26 #include <linux/err.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/delay.h>
30 #include <linux/cache.h>
31 #include <linux/interrupt.h>
32 #include <linux/cpu.h>
33 #include <linux/timex.h>
34 #include <linux/bootmem.h>
36 #include <asm/setup.h>
38 #include <asm/pgalloc.h>
40 #include <asm/s390_ext.h>
41 #include <asm/cpcmd.h>
42 #include <asm/tlbflush.h>
43 #include <asm/timer.h>
44 #include <asm/lowcore.h>
49 * An array with a pointer the lowcore of every CPU.
51 struct _lowcore
*lowcore_ptr
[NR_CPUS
];
52 EXPORT_SYMBOL(lowcore_ptr
);
54 cpumask_t cpu_online_map
= CPU_MASK_NONE
;
55 EXPORT_SYMBOL(cpu_online_map
);
57 cpumask_t cpu_possible_map
= CPU_MASK_NONE
;
58 EXPORT_SYMBOL(cpu_possible_map
);
60 static struct task_struct
*current_set
[NR_CPUS
];
62 static u8 smp_cpu_type
;
63 static int smp_use_sigp_detection
;
70 #ifdef CONFIG_HOTPLUG_CPU
71 static DEFINE_MUTEX(smp_cpu_state_mutex
);
73 static int smp_cpu_state
[NR_CPUS
];
75 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
76 DEFINE_PER_CPU(struct s390_idle_data
, s390_idle
);
78 static void smp_ext_bitcall(int, ec_bit_sig
);
81 * Structure and data for __smp_call_function_map(). This is designed to
82 * minimise static memory requirements. It also looks cleaner.
84 static DEFINE_SPINLOCK(call_lock
);
86 struct call_data_struct
{
87 void (*func
) (void *info
);
94 static struct call_data_struct
*call_data
;
97 * 'Call function' interrupt callback
99 static void do_call_function(void)
101 void (*func
) (void *info
) = call_data
->func
;
102 void *info
= call_data
->info
;
103 int wait
= call_data
->wait
;
105 cpu_set(smp_processor_id(), call_data
->started
);
108 cpu_set(smp_processor_id(), call_data
->finished
);;
111 static void __smp_call_function_map(void (*func
) (void *info
), void *info
,
112 int nonatomic
, int wait
, cpumask_t map
)
114 struct call_data_struct data
;
118 * Can deadlock when interrupts are disabled or if in wrong context.
120 WARN_ON(irqs_disabled() || in_irq());
123 * Check for local function call. We have to have the same call order
124 * as in on_each_cpu() because of machine_restart_smp().
126 if (cpu_isset(smp_processor_id(), map
)) {
128 cpu_clear(smp_processor_id(), map
);
131 cpus_and(map
, map
, cpu_online_map
);
137 data
.started
= CPU_MASK_NONE
;
140 data
.finished
= CPU_MASK_NONE
;
142 spin_lock(&call_lock
);
145 for_each_cpu_mask(cpu
, map
)
146 smp_ext_bitcall(cpu
, ec_call_function
);
148 /* Wait for response */
149 while (!cpus_equal(map
, data
.started
))
152 while (!cpus_equal(map
, data
.finished
))
154 spin_unlock(&call_lock
);
165 * @func: the function to run; this must be fast and non-blocking
166 * @info: an arbitrary pointer to pass to the function
168 * @wait: if true, wait (atomically) until function has completed on other CPUs
170 * Run a function on all other CPUs.
172 * You must not call this function with disabled interrupts, from a
173 * hardware interrupt handler or from a bottom half.
175 int smp_call_function(void (*func
) (void *info
), void *info
, int nonatomic
,
181 map
= cpu_online_map
;
182 cpu_clear(smp_processor_id(), map
);
183 __smp_call_function_map(func
, info
, nonatomic
, wait
, map
);
187 EXPORT_SYMBOL(smp_call_function
);
190 * smp_call_function_single:
191 * @cpu: the CPU where func should run
192 * @func: the function to run; this must be fast and non-blocking
193 * @info: an arbitrary pointer to pass to the function
195 * @wait: if true, wait (atomically) until function has completed on other CPUs
197 * Run a function on one processor.
199 * You must not call this function with disabled interrupts, from a
200 * hardware interrupt handler or from a bottom half.
202 int smp_call_function_single(int cpu
, void (*func
) (void *info
), void *info
,
203 int nonatomic
, int wait
)
206 __smp_call_function_map(func
, info
, nonatomic
, wait
,
207 cpumask_of_cpu(cpu
));
211 EXPORT_SYMBOL(smp_call_function_single
);
213 void smp_send_stop(void)
217 /* Disable all interrupts/machine checks */
218 __load_psw_mask(psw_kernel_bits
& ~PSW_MASK_MCHECK
);
220 /* write magic number to zero page (absolute 0) */
221 lowcore_ptr
[smp_processor_id()]->panic_magic
= __PANIC_MAGIC
;
223 /* stop all processors */
224 for_each_online_cpu(cpu
) {
225 if (cpu
== smp_processor_id())
228 rc
= signal_processor(cpu
, sigp_stop
);
229 } while (rc
== sigp_busy
);
231 while (!smp_cpu_not_running(cpu
))
237 * Reboot, halt and power_off routines for SMP.
239 void machine_restart_smp(char *__unused
)
245 void machine_halt_smp(void)
248 if (MACHINE_IS_VM
&& strlen(vmhalt_cmd
) > 0)
249 __cpcmd(vmhalt_cmd
, NULL
, 0, NULL
);
250 signal_processor(smp_processor_id(), sigp_stop_and_store_status
);
254 void machine_power_off_smp(void)
257 if (MACHINE_IS_VM
&& strlen(vmpoff_cmd
) > 0)
258 __cpcmd(vmpoff_cmd
, NULL
, 0, NULL
);
259 signal_processor(smp_processor_id(), sigp_stop_and_store_status
);
264 * This is the main routine where commands issued by other
268 static void do_ext_call_interrupt(__u16 code
)
273 * handle bit signal external calls
275 * For the ec_schedule signal we have to do nothing. All the work
276 * is done automatically when we return from the interrupt.
278 bits
= xchg(&S390_lowcore
.ext_call_fast
, 0);
280 if (test_bit(ec_call_function
, &bits
))
285 * Send an external call sigp to another cpu and return without waiting
286 * for its completion.
288 static void smp_ext_bitcall(int cpu
, ec_bit_sig sig
)
291 * Set signaling bit in lowcore of target cpu and kick it
293 set_bit(sig
, (unsigned long *) &lowcore_ptr
[cpu
]->ext_call_fast
);
294 while (signal_processor(cpu
, sigp_emergency_signal
) == sigp_busy
)
300 * this function sends a 'purge tlb' signal to another CPU.
302 void smp_ptlb_callback(void *info
)
307 void smp_ptlb_all(void)
309 on_each_cpu(smp_ptlb_callback
, NULL
, 0, 1);
311 EXPORT_SYMBOL(smp_ptlb_all
);
312 #endif /* ! CONFIG_64BIT */
315 * this function sends a 'reschedule' IPI to another CPU.
316 * it goes straight through and wastes no time serializing
317 * anything. Worst case is that we lose a reschedule ...
319 void smp_send_reschedule(int cpu
)
321 smp_ext_bitcall(cpu
, ec_schedule
);
325 * parameter area for the set/clear control bit callbacks
327 struct ec_creg_mask_parms
{
328 unsigned long orvals
[16];
329 unsigned long andvals
[16];
333 * callback for setting/clearing control bits
335 static void smp_ctl_bit_callback(void *info
)
337 struct ec_creg_mask_parms
*pp
= info
;
338 unsigned long cregs
[16];
341 __ctl_store(cregs
, 0, 15);
342 for (i
= 0; i
<= 15; i
++)
343 cregs
[i
] = (cregs
[i
] & pp
->andvals
[i
]) | pp
->orvals
[i
];
344 __ctl_load(cregs
, 0, 15);
348 * Set a bit in a control register of all cpus
350 void smp_ctl_set_bit(int cr
, int bit
)
352 struct ec_creg_mask_parms parms
;
354 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
355 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
356 parms
.orvals
[cr
] = 1 << bit
;
357 on_each_cpu(smp_ctl_bit_callback
, &parms
, 0, 1);
359 EXPORT_SYMBOL(smp_ctl_set_bit
);
362 * Clear a bit in a control register of all cpus
364 void smp_ctl_clear_bit(int cr
, int bit
)
366 struct ec_creg_mask_parms parms
;
368 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
369 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
370 parms
.andvals
[cr
] = ~(1L << bit
);
371 on_each_cpu(smp_ctl_bit_callback
, &parms
, 0, 1);
373 EXPORT_SYMBOL(smp_ctl_clear_bit
);
376 * In early ipl state a temp. logically cpu number is needed, so the sigp
377 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
378 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
380 #define CPU_INIT_NO 1
382 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
385 * zfcpdump_prefix_array holds prefix registers for the following scenario:
386 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
387 * save its prefix registers, since they get lost, when switching from 31 bit
390 unsigned int zfcpdump_prefix_array
[NR_CPUS
+ 1] \
391 __attribute__((__section__(".data")));
393 static void __init
smp_get_save_area(unsigned int cpu
, unsigned int phy_cpu
)
395 if (ipl_info
.type
!= IPL_TYPE_FCP_DUMP
)
397 if (cpu
>= NR_CPUS
) {
398 printk(KERN_WARNING
"Registers for cpu %i not saved since dump "
399 "kernel was compiled with NR_CPUS=%i\n", cpu
, NR_CPUS
);
402 zfcpdump_save_areas
[cpu
] = alloc_bootmem(sizeof(union save_area
));
403 __cpu_logical_map
[CPU_INIT_NO
] = (__u16
) phy_cpu
;
404 while (signal_processor(CPU_INIT_NO
, sigp_stop_and_store_status
) ==
407 memcpy(zfcpdump_save_areas
[cpu
],
408 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE
,
411 /* copy original prefix register */
412 zfcpdump_save_areas
[cpu
]->s390x
.pref_reg
= zfcpdump_prefix_array
[cpu
];
416 union save_area
*zfcpdump_save_areas
[NR_CPUS
+ 1];
417 EXPORT_SYMBOL_GPL(zfcpdump_save_areas
);
421 static inline void smp_get_save_area(unsigned int cpu
, unsigned int phy_cpu
) { }
423 #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
425 static int cpu_stopped(int cpu
)
429 /* Check for stopped state */
430 if (signal_processor_ps(&status
, 0, cpu
, sigp_sense
) ==
431 sigp_status_stored
) {
439 * Lets check how many CPUs we have.
441 static void __init
smp_count_cpus(unsigned int *configured_cpus
,
442 unsigned int *standby_cpus
)
445 struct sclp_cpu_info
*info
;
446 u16 boot_cpu_addr
, cpu_addr
;
448 boot_cpu_addr
= S390_lowcore
.cpu_data
.cpu_addr
;
449 current_thread_info()->cpu
= 0;
450 *configured_cpus
= 1;
453 info
= alloc_bootmem_pages(sizeof(*info
));
455 disabled_wait((unsigned long) __builtin_return_address(0));
457 /* Use sigp detection algorithm if sclp doesn't work. */
458 if (sclp_get_cpu_info(info
)) {
459 smp_use_sigp_detection
= 1;
460 for (cpu
= 0; cpu
<= 65535; cpu
++) {
461 if (cpu
== boot_cpu_addr
)
463 __cpu_logical_map
[CPU_INIT_NO
] = cpu
;
464 if (cpu_stopped(CPU_INIT_NO
))
465 (*configured_cpus
)++;
470 if (info
->has_cpu_type
) {
471 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
472 if (info
->cpu
[cpu
].address
== boot_cpu_addr
) {
473 smp_cpu_type
= info
->cpu
[cpu
].type
;
479 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
480 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= smp_cpu_type
)
482 cpu_addr
= info
->cpu
[cpu
].address
;
483 if (cpu_addr
== boot_cpu_addr
)
485 __cpu_logical_map
[CPU_INIT_NO
] = cpu_addr
;
486 if (!cpu_stopped(CPU_INIT_NO
)) {
490 smp_get_save_area(*configured_cpus
, cpu_addr
);
491 (*configured_cpus
)++;
494 printk(KERN_INFO
"CPUs: %d configured, %d standby\n",
495 *configured_cpus
, *standby_cpus
);
496 free_bootmem((unsigned long) info
, sizeof(*info
));
499 static int cpu_known(int cpu_id
)
503 for_each_present_cpu(cpu
) {
504 if (__cpu_logical_map
[cpu
] == cpu_id
)
510 static int smp_rescan_cpus_sigp(cpumask_t avail
)
512 int cpu_id
, logical_cpu
;
514 logical_cpu
= first_cpu(avail
);
515 if (logical_cpu
== NR_CPUS
)
517 for (cpu_id
= 0; cpu_id
<= 65535; cpu_id
++) {
518 if (cpu_known(cpu_id
))
520 __cpu_logical_map
[logical_cpu
] = cpu_id
;
521 if (!cpu_stopped(logical_cpu
))
523 cpu_set(logical_cpu
, cpu_present_map
);
524 smp_cpu_state
[logical_cpu
] = CPU_STATE_CONFIGURED
;
525 logical_cpu
= next_cpu(logical_cpu
, avail
);
526 if (logical_cpu
== NR_CPUS
)
532 static int __init_refok
smp_rescan_cpus_sclp(cpumask_t avail
)
534 struct sclp_cpu_info
*info
;
535 int cpu_id
, logical_cpu
, cpu
;
538 logical_cpu
= first_cpu(avail
);
539 if (logical_cpu
== NR_CPUS
)
541 if (slab_is_available())
542 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
544 info
= alloc_bootmem(sizeof(*info
));
547 rc
= sclp_get_cpu_info(info
);
550 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
551 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= smp_cpu_type
)
553 cpu_id
= info
->cpu
[cpu
].address
;
554 if (cpu_known(cpu_id
))
556 __cpu_logical_map
[logical_cpu
] = cpu_id
;
557 cpu_set(logical_cpu
, cpu_present_map
);
558 if (cpu
>= info
->configured
)
559 smp_cpu_state
[logical_cpu
] = CPU_STATE_STANDBY
;
561 smp_cpu_state
[logical_cpu
] = CPU_STATE_CONFIGURED
;
562 logical_cpu
= next_cpu(logical_cpu
, avail
);
563 if (logical_cpu
== NR_CPUS
)
567 if (slab_is_available())
570 free_bootmem((unsigned long) info
, sizeof(*info
));
574 static int smp_rescan_cpus(void)
579 cpus_and(avail
, avail
, cpu_possible_map
);
580 cpus_andnot(avail
, avail
, cpu_present_map
);
581 if (smp_use_sigp_detection
)
582 return smp_rescan_cpus_sigp(avail
);
584 return smp_rescan_cpus_sclp(avail
);
588 * Activate a secondary processor.
590 int __cpuinit
start_secondary(void *cpuvoid
)
595 /* Enable TOD clock interrupts on the secondary cpu. */
597 #ifdef CONFIG_VIRT_TIMER
598 /* Enable cpu timer interrupts on the secondary cpu. */
601 /* Enable pfault pseudo page faults on this cpu. */
604 /* Mark this cpu as online */
605 cpu_set(smp_processor_id(), cpu_online_map
);
606 /* Switch on interrupts */
608 /* Print info about this processor */
609 print_cpu_info(&S390_lowcore
.cpu_data
);
610 /* cpu_idle will call schedule for us */
615 static void __init
smp_create_idle(unsigned int cpu
)
617 struct task_struct
*p
;
620 * don't care about the psw and regs settings since we'll never
621 * reschedule the forked task.
625 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
626 current_set
[cpu
] = p
;
627 spin_lock_init(&(&per_cpu(s390_idle
, cpu
))->lock
);
630 /* Upping and downing of CPUs */
631 int __cpu_up(unsigned int cpu
)
633 struct task_struct
*idle
;
634 struct _lowcore
*cpu_lowcore
;
635 struct stack_frame
*sf
;
638 if (smp_cpu_state
[cpu
] != CPU_STATE_CONFIGURED
)
641 ccode
= signal_processor_p((__u32
)(unsigned long)(lowcore_ptr
[cpu
]),
642 cpu
, sigp_set_prefix
);
644 printk("sigp_set_prefix failed for cpu %d "
645 "with condition code %d\n",
646 (int) cpu
, (int) ccode
);
650 idle
= current_set
[cpu
];
651 cpu_lowcore
= lowcore_ptr
[cpu
];
652 cpu_lowcore
->kernel_stack
= (unsigned long)
653 task_stack_page(idle
) + THREAD_SIZE
;
654 sf
= (struct stack_frame
*) (cpu_lowcore
->kernel_stack
655 - sizeof(struct pt_regs
)
656 - sizeof(struct stack_frame
));
657 memset(sf
, 0, sizeof(struct stack_frame
));
658 sf
->gprs
[9] = (unsigned long) sf
;
659 cpu_lowcore
->save_area
[15] = (unsigned long) sf
;
660 __ctl_store(cpu_lowcore
->cregs_save_area
[0], 0, 15);
663 : : "a" (&cpu_lowcore
->access_regs_save_area
) : "memory");
664 cpu_lowcore
->percpu_offset
= __per_cpu_offset
[cpu
];
665 cpu_lowcore
->current_task
= (unsigned long) idle
;
666 cpu_lowcore
->cpu_data
.cpu_nr
= cpu
;
669 while (signal_processor(cpu
, sigp_restart
) == sigp_busy
)
672 while (!cpu_online(cpu
))
677 static unsigned int __initdata additional_cpus
;
678 static unsigned int __initdata possible_cpus
;
680 void __init
smp_setup_cpu_possible_map(void)
682 unsigned int pos_cpus
, cpu
;
683 unsigned int configured_cpus
, standby_cpus
;
685 smp_count_cpus(&configured_cpus
, &standby_cpus
);
686 pos_cpus
= min(configured_cpus
+ standby_cpus
+ additional_cpus
,
687 (unsigned int) NR_CPUS
);
689 pos_cpus
= min(possible_cpus
, (unsigned int) NR_CPUS
);
690 for (cpu
= 0; cpu
< pos_cpus
; cpu
++)
691 cpu_set(cpu
, cpu_possible_map
);
692 cpu_present_map
= cpumask_of_cpu(0);
696 #ifdef CONFIG_HOTPLUG_CPU
698 static int __init
setup_additional_cpus(char *s
)
700 additional_cpus
= simple_strtoul(s
, NULL
, 0);
703 early_param("additional_cpus", setup_additional_cpus
);
705 static int __init
setup_possible_cpus(char *s
)
707 possible_cpus
= simple_strtoul(s
, NULL
, 0);
710 early_param("possible_cpus", setup_possible_cpus
);
712 int __cpu_disable(void)
714 struct ec_creg_mask_parms cr_parms
;
715 int cpu
= smp_processor_id();
717 cpu_clear(cpu
, cpu_online_map
);
719 /* Disable pfault pseudo page faults on this cpu. */
722 memset(&cr_parms
.orvals
, 0, sizeof(cr_parms
.orvals
));
723 memset(&cr_parms
.andvals
, 0xff, sizeof(cr_parms
.andvals
));
725 /* disable all external interrupts */
726 cr_parms
.orvals
[0] = 0;
727 cr_parms
.andvals
[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
728 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
729 /* disable all I/O interrupts */
730 cr_parms
.orvals
[6] = 0;
731 cr_parms
.andvals
[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
732 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
733 /* disable most machine checks */
734 cr_parms
.orvals
[14] = 0;
735 cr_parms
.andvals
[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
738 smp_ctl_bit_callback(&cr_parms
);
743 void __cpu_die(unsigned int cpu
)
745 /* Wait until target cpu is down */
746 while (!smp_cpu_not_running(cpu
))
748 printk(KERN_INFO
"Processor %d spun down\n", cpu
);
754 signal_processor(smp_processor_id(), sigp_stop
);
759 #endif /* CONFIG_HOTPLUG_CPU */
762 * Cycle through the processors and setup structures.
765 void __init
smp_prepare_cpus(unsigned int max_cpus
)
771 /* request the 0x1201 emergency signal external interrupt */
772 if (register_external_interrupt(0x1201, do_ext_call_interrupt
) != 0)
773 panic("Couldn't request external interrupt 0x1201");
774 memset(lowcore_ptr
, 0, sizeof(lowcore_ptr
));
776 * Initialize prefix pages and stacks for all possible cpus
778 print_cpu_info(&S390_lowcore
.cpu_data
);
780 for_each_possible_cpu(i
) {
781 lowcore_ptr
[i
] = (struct _lowcore
*)
782 __get_free_pages(GFP_KERNEL
| GFP_DMA
,
783 sizeof(void*) == 8 ? 1 : 0);
784 stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
785 if (!lowcore_ptr
[i
] || !stack
)
786 panic("smp_boot_cpus failed to allocate memory\n");
788 *(lowcore_ptr
[i
]) = S390_lowcore
;
789 lowcore_ptr
[i
]->async_stack
= stack
+ ASYNC_SIZE
;
790 stack
= __get_free_pages(GFP_KERNEL
, 0);
792 panic("smp_boot_cpus failed to allocate memory\n");
793 lowcore_ptr
[i
]->panic_stack
= stack
+ PAGE_SIZE
;
795 if (MACHINE_HAS_IEEE
) {
796 lowcore_ptr
[i
]->extended_save_area_addr
=
797 (__u32
) __get_free_pages(GFP_KERNEL
, 0);
798 if (!lowcore_ptr
[i
]->extended_save_area_addr
)
799 panic("smp_boot_cpus failed to "
800 "allocate memory\n");
805 if (MACHINE_HAS_IEEE
)
806 ctl_set_bit(14, 29); /* enable extended save area */
808 set_prefix((u32
)(unsigned long) lowcore_ptr
[smp_processor_id()]);
810 for_each_possible_cpu(cpu
)
811 if (cpu
!= smp_processor_id())
812 smp_create_idle(cpu
);
815 void __init
smp_prepare_boot_cpu(void)
817 BUG_ON(smp_processor_id() != 0);
819 cpu_set(0, cpu_online_map
);
820 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
821 current_set
[0] = current
;
822 smp_cpu_state
[0] = CPU_STATE_CONFIGURED
;
823 spin_lock_init(&(&__get_cpu_var(s390_idle
))->lock
);
826 void __init
smp_cpus_done(unsigned int max_cpus
)
831 * the frequency of the profiling timer can be changed
832 * by writing a multiplier value into /proc/profile.
834 * usually you want to run this on all CPUs ;)
836 int setup_profiling_timer(unsigned int multiplier
)
841 #ifdef CONFIG_HOTPLUG_CPU
842 static ssize_t
cpu_configure_show(struct sys_device
*dev
, char *buf
)
846 mutex_lock(&smp_cpu_state_mutex
);
847 count
= sprintf(buf
, "%d\n", smp_cpu_state
[dev
->id
]);
848 mutex_unlock(&smp_cpu_state_mutex
);
852 static ssize_t
cpu_configure_store(struct sys_device
*dev
, const char *buf
,
859 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
861 if (val
!= 0 && val
!= 1)
864 mutex_lock(&smp_cpu_state_mutex
);
872 if (smp_cpu_state
[cpu
] == CPU_STATE_CONFIGURED
) {
873 rc
= sclp_cpu_deconfigure(__cpu_logical_map
[cpu
]);
875 smp_cpu_state
[cpu
] = CPU_STATE_STANDBY
;
879 if (smp_cpu_state
[cpu
] == CPU_STATE_STANDBY
) {
880 rc
= sclp_cpu_configure(__cpu_logical_map
[cpu
]);
882 smp_cpu_state
[cpu
] = CPU_STATE_CONFIGURED
;
889 unlock_cpu_hotplug();
890 mutex_unlock(&smp_cpu_state_mutex
);
891 return rc
? rc
: count
;
893 static SYSDEV_ATTR(configure
, 0644, cpu_configure_show
, cpu_configure_store
);
894 #endif /* CONFIG_HOTPLUG_CPU */
896 static ssize_t
show_cpu_address(struct sys_device
*dev
, char *buf
)
898 return sprintf(buf
, "%d\n", __cpu_logical_map
[dev
->id
]);
900 static SYSDEV_ATTR(address
, 0444, show_cpu_address
, NULL
);
903 static struct attribute
*cpu_common_attrs
[] = {
904 #ifdef CONFIG_HOTPLUG_CPU
905 &attr_configure
.attr
,
911 static struct attribute_group cpu_common_attr_group
= {
912 .attrs
= cpu_common_attrs
,
915 static ssize_t
show_capability(struct sys_device
*dev
, char *buf
)
917 unsigned int capability
;
920 rc
= get_cpu_capability(&capability
);
923 return sprintf(buf
, "%u\n", capability
);
925 static SYSDEV_ATTR(capability
, 0444, show_capability
, NULL
);
927 static ssize_t
show_idle_count(struct sys_device
*dev
, char *buf
)
929 struct s390_idle_data
*idle
;
930 unsigned long long idle_count
;
932 idle
= &per_cpu(s390_idle
, dev
->id
);
933 spin_lock_irq(&idle
->lock
);
934 idle_count
= idle
->idle_count
;
935 spin_unlock_irq(&idle
->lock
);
936 return sprintf(buf
, "%llu\n", idle_count
);
938 static SYSDEV_ATTR(idle_count
, 0444, show_idle_count
, NULL
);
940 static ssize_t
show_idle_time(struct sys_device
*dev
, char *buf
)
942 struct s390_idle_data
*idle
;
943 unsigned long long new_time
;
945 idle
= &per_cpu(s390_idle
, dev
->id
);
946 spin_lock_irq(&idle
->lock
);
948 new_time
= get_clock();
949 idle
->idle_time
+= new_time
- idle
->idle_enter
;
950 idle
->idle_enter
= new_time
;
952 new_time
= idle
->idle_time
;
953 spin_unlock_irq(&idle
->lock
);
954 return sprintf(buf
, "%llu\n", new_time
>> 12);
956 static SYSDEV_ATTR(idle_time_us
, 0444, show_idle_time
, NULL
);
958 static struct attribute
*cpu_online_attrs
[] = {
959 &attr_capability
.attr
,
960 &attr_idle_count
.attr
,
961 &attr_idle_time_us
.attr
,
965 static struct attribute_group cpu_online_attr_group
= {
966 .attrs
= cpu_online_attrs
,
969 static int __cpuinit
smp_cpu_notify(struct notifier_block
*self
,
970 unsigned long action
, void *hcpu
)
972 unsigned int cpu
= (unsigned int)(long)hcpu
;
973 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
974 struct sys_device
*s
= &c
->sysdev
;
975 struct s390_idle_data
*idle
;
979 case CPU_ONLINE_FROZEN
:
980 idle
= &per_cpu(s390_idle
, cpu
);
981 spin_lock_irq(&idle
->lock
);
982 idle
->idle_enter
= 0;
984 idle
->idle_count
= 0;
985 spin_unlock_irq(&idle
->lock
);
986 if (sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
))
990 case CPU_DEAD_FROZEN
:
991 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
997 static struct notifier_block __cpuinitdata smp_cpu_nb
= {
998 .notifier_call
= smp_cpu_notify
,
1001 static int smp_add_present_cpu(int cpu
)
1003 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
1004 struct sys_device
*s
= &c
->sysdev
;
1007 c
->hotpluggable
= 1;
1008 rc
= register_cpu(c
, cpu
);
1011 rc
= sysfs_create_group(&s
->kobj
, &cpu_common_attr_group
);
1014 if (!cpu_online(cpu
))
1016 rc
= sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
1019 sysfs_remove_group(&s
->kobj
, &cpu_common_attr_group
);
1021 #ifdef CONFIG_HOTPLUG_CPU
1028 #ifdef CONFIG_HOTPLUG_CPU
1029 static ssize_t
rescan_store(struct sys_device
*dev
, const char *buf
,
1036 mutex_lock(&smp_cpu_state_mutex
);
1038 newcpus
= cpu_present_map
;
1039 rc
= smp_rescan_cpus();
1042 cpus_andnot(newcpus
, cpu_present_map
, newcpus
);
1043 for_each_cpu_mask(cpu
, newcpus
) {
1044 rc
= smp_add_present_cpu(cpu
);
1046 cpu_clear(cpu
, cpu_present_map
);
1050 unlock_cpu_hotplug();
1051 mutex_unlock(&smp_cpu_state_mutex
);
1052 return rc
? rc
: count
;
1054 static SYSDEV_ATTR(rescan
, 0200, NULL
, rescan_store
);
1055 #endif /* CONFIG_HOTPLUG_CPU */
1057 static int __init
topology_init(void)
1062 register_cpu_notifier(&smp_cpu_nb
);
1064 #ifdef CONFIG_HOTPLUG_CPU
1065 rc
= sysfs_create_file(&cpu_sysdev_class
.kset
.kobj
,
1070 for_each_present_cpu(cpu
) {
1071 rc
= smp_add_present_cpu(cpu
);
1077 subsys_initcall(topology_init
);