2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 #include <linux/module.h>
27 #include <linux/init.h>
29 #include <linux/err.h>
30 #include <linux/spinlock.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/delay.h>
33 #include <linux/cache.h>
34 #include <linux/interrupt.h>
35 #include <linux/cpu.h>
36 #include <linux/timex.h>
37 #include <linux/bootmem.h>
39 #include <asm/setup.h>
41 #include <asm/pgalloc.h>
43 #include <asm/s390_ext.h>
44 #include <asm/cpcmd.h>
45 #include <asm/tlbflush.h>
46 #include <asm/timer.h>
47 #include <asm/lowcore.h>
53 * An array with a pointer the lowcore of every CPU.
55 struct _lowcore
*lowcore_ptr
[NR_CPUS
];
56 EXPORT_SYMBOL(lowcore_ptr
);
58 static struct task_struct
*current_set
[NR_CPUS
];
60 static u8 smp_cpu_type
;
61 static int smp_use_sigp_detection
;
68 DEFINE_MUTEX(smp_cpu_state_mutex
);
69 int smp_cpu_polarization
[NR_CPUS
];
70 static int smp_cpu_state
[NR_CPUS
];
71 static int cpu_management
;
73 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
75 static void smp_ext_bitcall(int, ec_bit_sig
);
77 void smp_send_stop(void)
81 /* Disable all interrupts/machine checks */
82 __load_psw_mask(psw_kernel_bits
& ~PSW_MASK_MCHECK
);
84 /* write magic number to zero page (absolute 0) */
85 lowcore_ptr
[smp_processor_id()]->panic_magic
= __PANIC_MAGIC
;
87 /* stop all processors */
88 for_each_online_cpu(cpu
) {
89 if (cpu
== smp_processor_id())
92 rc
= signal_processor(cpu
, sigp_stop
);
93 } while (rc
== sigp_busy
);
95 while (!smp_cpu_not_running(cpu
))
101 * This is the main routine where commands issued by other
105 static void do_ext_call_interrupt(__u16 code
)
110 * handle bit signal external calls
112 * For the ec_schedule signal we have to do nothing. All the work
113 * is done automatically when we return from the interrupt.
115 bits
= xchg(&S390_lowcore
.ext_call_fast
, 0);
117 if (test_bit(ec_call_function
, &bits
))
118 generic_smp_call_function_interrupt();
120 if (test_bit(ec_call_function_single
, &bits
))
121 generic_smp_call_function_single_interrupt();
125 * Send an external call sigp to another cpu and return without waiting
126 * for its completion.
128 static void smp_ext_bitcall(int cpu
, ec_bit_sig sig
)
131 * Set signaling bit in lowcore of target cpu and kick it
133 set_bit(sig
, (unsigned long *) &lowcore_ptr
[cpu
]->ext_call_fast
);
134 while (signal_processor(cpu
, sigp_emergency_signal
) == sigp_busy
)
138 void arch_send_call_function_ipi(cpumask_t mask
)
142 for_each_cpu_mask(cpu
, mask
)
143 smp_ext_bitcall(cpu
, ec_call_function
);
146 void arch_send_call_function_single_ipi(int cpu
)
148 smp_ext_bitcall(cpu
, ec_call_function_single
);
153 * this function sends a 'purge tlb' signal to another CPU.
155 static void smp_ptlb_callback(void *info
)
160 void smp_ptlb_all(void)
162 on_each_cpu(smp_ptlb_callback
, NULL
, 1);
164 EXPORT_SYMBOL(smp_ptlb_all
);
165 #endif /* ! CONFIG_64BIT */
168 * this function sends a 'reschedule' IPI to another CPU.
169 * it goes straight through and wastes no time serializing
170 * anything. Worst case is that we lose a reschedule ...
172 void smp_send_reschedule(int cpu
)
174 smp_ext_bitcall(cpu
, ec_schedule
);
178 * parameter area for the set/clear control bit callbacks
180 struct ec_creg_mask_parms
{
181 unsigned long orvals
[16];
182 unsigned long andvals
[16];
186 * callback for setting/clearing control bits
188 static void smp_ctl_bit_callback(void *info
)
190 struct ec_creg_mask_parms
*pp
= info
;
191 unsigned long cregs
[16];
194 __ctl_store(cregs
, 0, 15);
195 for (i
= 0; i
<= 15; i
++)
196 cregs
[i
] = (cregs
[i
] & pp
->andvals
[i
]) | pp
->orvals
[i
];
197 __ctl_load(cregs
, 0, 15);
201 * Set a bit in a control register of all cpus
203 void smp_ctl_set_bit(int cr
, int bit
)
205 struct ec_creg_mask_parms parms
;
207 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
208 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
209 parms
.orvals
[cr
] = 1 << bit
;
210 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
212 EXPORT_SYMBOL(smp_ctl_set_bit
);
215 * Clear a bit in a control register of all cpus
217 void smp_ctl_clear_bit(int cr
, int bit
)
219 struct ec_creg_mask_parms parms
;
221 memset(&parms
.orvals
, 0, sizeof(parms
.orvals
));
222 memset(&parms
.andvals
, 0xff, sizeof(parms
.andvals
));
223 parms
.andvals
[cr
] = ~(1L << bit
);
224 on_each_cpu(smp_ctl_bit_callback
, &parms
, 1);
226 EXPORT_SYMBOL(smp_ctl_clear_bit
);
229 * In early ipl state a temp. logically cpu number is needed, so the sigp
230 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
231 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
233 #define CPU_INIT_NO 1
235 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
238 * zfcpdump_prefix_array holds prefix registers for the following scenario:
239 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
240 * save its prefix registers, since they get lost, when switching from 31 bit
243 unsigned int zfcpdump_prefix_array
[NR_CPUS
+ 1] \
244 __attribute__((__section__(".data")));
246 static void __init
smp_get_save_area(unsigned int cpu
, unsigned int phy_cpu
)
248 if (ipl_info
.type
!= IPL_TYPE_FCP_DUMP
)
250 if (cpu
>= NR_CPUS
) {
251 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
252 "the dump\n", cpu
, NR_CPUS
- 1);
255 zfcpdump_save_areas
[cpu
] = kmalloc(sizeof(union save_area
), GFP_KERNEL
);
256 __cpu_logical_map
[CPU_INIT_NO
] = (__u16
) phy_cpu
;
257 while (signal_processor(CPU_INIT_NO
, sigp_stop_and_store_status
) ==
260 memcpy(zfcpdump_save_areas
[cpu
],
261 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE
,
264 /* copy original prefix register */
265 zfcpdump_save_areas
[cpu
]->s390x
.pref_reg
= zfcpdump_prefix_array
[cpu
];
269 union save_area
*zfcpdump_save_areas
[NR_CPUS
+ 1];
270 EXPORT_SYMBOL_GPL(zfcpdump_save_areas
);
274 static inline void smp_get_save_area(unsigned int cpu
, unsigned int phy_cpu
) { }
276 #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
278 static int cpu_stopped(int cpu
)
282 /* Check for stopped state */
283 if (signal_processor_ps(&status
, 0, cpu
, sigp_sense
) ==
284 sigp_status_stored
) {
291 static int cpu_known(int cpu_id
)
295 for_each_present_cpu(cpu
) {
296 if (__cpu_logical_map
[cpu
] == cpu_id
)
302 static int smp_rescan_cpus_sigp(cpumask_t avail
)
304 int cpu_id
, logical_cpu
;
306 logical_cpu
= first_cpu(avail
);
307 if (logical_cpu
== NR_CPUS
)
309 for (cpu_id
= 0; cpu_id
<= 65535; cpu_id
++) {
310 if (cpu_known(cpu_id
))
312 __cpu_logical_map
[logical_cpu
] = cpu_id
;
313 smp_cpu_polarization
[logical_cpu
] = POLARIZATION_UNKNWN
;
314 if (!cpu_stopped(logical_cpu
))
316 cpu_set(logical_cpu
, cpu_present_map
);
317 smp_cpu_state
[logical_cpu
] = CPU_STATE_CONFIGURED
;
318 logical_cpu
= next_cpu(logical_cpu
, avail
);
319 if (logical_cpu
== NR_CPUS
)
325 static int smp_rescan_cpus_sclp(cpumask_t avail
)
327 struct sclp_cpu_info
*info
;
328 int cpu_id
, logical_cpu
, cpu
;
331 logical_cpu
= first_cpu(avail
);
332 if (logical_cpu
== NR_CPUS
)
334 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
337 rc
= sclp_get_cpu_info(info
);
340 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
341 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= smp_cpu_type
)
343 cpu_id
= info
->cpu
[cpu
].address
;
344 if (cpu_known(cpu_id
))
346 __cpu_logical_map
[logical_cpu
] = cpu_id
;
347 smp_cpu_polarization
[logical_cpu
] = POLARIZATION_UNKNWN
;
348 cpu_set(logical_cpu
, cpu_present_map
);
349 if (cpu
>= info
->configured
)
350 smp_cpu_state
[logical_cpu
] = CPU_STATE_STANDBY
;
352 smp_cpu_state
[logical_cpu
] = CPU_STATE_CONFIGURED
;
353 logical_cpu
= next_cpu(logical_cpu
, avail
);
354 if (logical_cpu
== NR_CPUS
)
362 static int __smp_rescan_cpus(void)
366 cpus_xor(avail
, cpu_possible_map
, cpu_present_map
);
367 if (smp_use_sigp_detection
)
368 return smp_rescan_cpus_sigp(avail
);
370 return smp_rescan_cpus_sclp(avail
);
373 static void __init
smp_detect_cpus(void)
375 unsigned int cpu
, c_cpus
, s_cpus
;
376 struct sclp_cpu_info
*info
;
377 u16 boot_cpu_addr
, cpu_addr
;
381 boot_cpu_addr
= S390_lowcore
.cpu_data
.cpu_addr
;
382 info
= kmalloc(sizeof(*info
), GFP_KERNEL
);
384 panic("smp_detect_cpus failed to allocate memory\n");
385 /* Use sigp detection algorithm if sclp doesn't work. */
386 if (sclp_get_cpu_info(info
)) {
387 smp_use_sigp_detection
= 1;
388 for (cpu
= 0; cpu
<= 65535; cpu
++) {
389 if (cpu
== boot_cpu_addr
)
391 __cpu_logical_map
[CPU_INIT_NO
] = cpu
;
392 if (!cpu_stopped(CPU_INIT_NO
))
394 smp_get_save_area(c_cpus
, cpu
);
400 if (info
->has_cpu_type
) {
401 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
402 if (info
->cpu
[cpu
].address
== boot_cpu_addr
) {
403 smp_cpu_type
= info
->cpu
[cpu
].type
;
409 for (cpu
= 0; cpu
< info
->combined
; cpu
++) {
410 if (info
->has_cpu_type
&& info
->cpu
[cpu
].type
!= smp_cpu_type
)
412 cpu_addr
= info
->cpu
[cpu
].address
;
413 if (cpu_addr
== boot_cpu_addr
)
415 __cpu_logical_map
[CPU_INIT_NO
] = cpu_addr
;
416 if (!cpu_stopped(CPU_INIT_NO
)) {
420 smp_get_save_area(c_cpus
, cpu_addr
);
425 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus
, s_cpus
);
432 * Activate a secondary processor.
434 int __cpuinit
start_secondary(void *cpuvoid
)
439 /* Enable TOD clock interrupts on the secondary cpu. */
441 /* Enable cpu timer interrupts on the secondary cpu. */
443 /* Enable pfault pseudo page faults on this cpu. */
446 /* call cpu notifiers */
447 notify_cpu_starting(smp_processor_id());
448 /* Mark this cpu as online */
450 cpu_set(smp_processor_id(), cpu_online_map
);
452 /* Switch on interrupts */
454 /* Print info about this processor */
455 print_cpu_info(&S390_lowcore
.cpu_data
);
456 /* cpu_idle will call schedule for us */
461 static void __init
smp_create_idle(unsigned int cpu
)
463 struct task_struct
*p
;
466 * don't care about the psw and regs settings since we'll never
467 * reschedule the forked task.
471 panic("failed fork for CPU %u: %li", cpu
, PTR_ERR(p
));
472 current_set
[cpu
] = p
;
475 static int __cpuinit
smp_alloc_lowcore(int cpu
)
477 unsigned long async_stack
, panic_stack
;
478 struct _lowcore
*lowcore
;
481 lc_order
= sizeof(long) == 8 ? 1 : 0;
482 lowcore
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, lc_order
);
485 async_stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
486 panic_stack
= __get_free_page(GFP_KERNEL
);
487 if (!panic_stack
|| !async_stack
)
489 memcpy(lowcore
, &S390_lowcore
, 512);
490 memset((char *)lowcore
+ 512, 0, sizeof(*lowcore
) - 512);
491 lowcore
->async_stack
= async_stack
+ ASYNC_SIZE
;
492 lowcore
->panic_stack
= panic_stack
+ PAGE_SIZE
;
495 if (MACHINE_HAS_IEEE
) {
496 unsigned long save_area
;
498 save_area
= get_zeroed_page(GFP_KERNEL
);
501 lowcore
->extended_save_area_addr
= (u32
) save_area
;
504 lowcore_ptr
[cpu
] = lowcore
;
508 free_page(panic_stack
);
509 free_pages(async_stack
, ASYNC_ORDER
);
510 free_pages((unsigned long) lowcore
, lc_order
);
514 #ifdef CONFIG_HOTPLUG_CPU
515 static void smp_free_lowcore(int cpu
)
517 struct _lowcore
*lowcore
;
520 lc_order
= sizeof(long) == 8 ? 1 : 0;
521 lowcore
= lowcore_ptr
[cpu
];
523 if (MACHINE_HAS_IEEE
)
524 free_page((unsigned long) lowcore
->extended_save_area_addr
);
526 free_page(lowcore
->panic_stack
- PAGE_SIZE
);
527 free_pages(lowcore
->async_stack
- ASYNC_SIZE
, ASYNC_ORDER
);
528 free_pages((unsigned long) lowcore
, lc_order
);
529 lowcore_ptr
[cpu
] = NULL
;
531 #endif /* CONFIG_HOTPLUG_CPU */
533 /* Upping and downing of CPUs */
534 int __cpuinit
__cpu_up(unsigned int cpu
)
536 struct task_struct
*idle
;
537 struct _lowcore
*cpu_lowcore
;
538 struct stack_frame
*sf
;
541 if (smp_cpu_state
[cpu
] != CPU_STATE_CONFIGURED
)
543 if (smp_alloc_lowcore(cpu
))
546 ccode
= signal_processor_p((__u32
)(unsigned long)(lowcore_ptr
[cpu
]),
547 cpu
, sigp_set_prefix
);
551 idle
= current_set
[cpu
];
552 cpu_lowcore
= lowcore_ptr
[cpu
];
553 cpu_lowcore
->kernel_stack
= (unsigned long)
554 task_stack_page(idle
) + THREAD_SIZE
;
555 cpu_lowcore
->thread_info
= (unsigned long) task_thread_info(idle
);
556 sf
= (struct stack_frame
*) (cpu_lowcore
->kernel_stack
557 - sizeof(struct pt_regs
)
558 - sizeof(struct stack_frame
));
559 memset(sf
, 0, sizeof(struct stack_frame
));
560 sf
->gprs
[9] = (unsigned long) sf
;
561 cpu_lowcore
->save_area
[15] = (unsigned long) sf
;
562 __ctl_store(cpu_lowcore
->cregs_save_area
, 0, 15);
565 : : "a" (&cpu_lowcore
->access_regs_save_area
) : "memory");
566 cpu_lowcore
->percpu_offset
= __per_cpu_offset
[cpu
];
567 cpu_lowcore
->current_task
= (unsigned long) idle
;
568 cpu_lowcore
->cpu_data
.cpu_nr
= cpu
;
569 cpu_lowcore
->kernel_asce
= S390_lowcore
.kernel_asce
;
570 cpu_lowcore
->ipl_device
= S390_lowcore
.ipl_device
;
573 while (signal_processor(cpu
, sigp_restart
) == sigp_busy
)
576 while (!cpu_online(cpu
))
581 static int __init
setup_possible_cpus(char *s
)
585 pcpus
= simple_strtoul(s
, NULL
, 0);
586 cpu_possible_map
= cpumask_of_cpu(0);
587 for (cpu
= 1; cpu
< pcpus
&& cpu
< NR_CPUS
; cpu
++)
588 cpu_set(cpu
, cpu_possible_map
);
591 early_param("possible_cpus", setup_possible_cpus
);
593 #ifdef CONFIG_HOTPLUG_CPU
595 int __cpu_disable(void)
597 struct ec_creg_mask_parms cr_parms
;
598 int cpu
= smp_processor_id();
600 cpu_clear(cpu
, cpu_online_map
);
602 /* Disable pfault pseudo page faults on this cpu. */
605 memset(&cr_parms
.orvals
, 0, sizeof(cr_parms
.orvals
));
606 memset(&cr_parms
.andvals
, 0xff, sizeof(cr_parms
.andvals
));
608 /* disable all external interrupts */
609 cr_parms
.orvals
[0] = 0;
610 cr_parms
.andvals
[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
611 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
612 /* disable all I/O interrupts */
613 cr_parms
.orvals
[6] = 0;
614 cr_parms
.andvals
[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
615 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
616 /* disable most machine checks */
617 cr_parms
.orvals
[14] = 0;
618 cr_parms
.andvals
[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
621 smp_ctl_bit_callback(&cr_parms
);
626 void __cpu_die(unsigned int cpu
)
628 /* Wait until target cpu is down */
629 while (!smp_cpu_not_running(cpu
))
631 smp_free_lowcore(cpu
);
632 pr_info("Processor %d stopped\n", cpu
);
638 signal_processor(smp_processor_id(), sigp_stop
);
643 #endif /* CONFIG_HOTPLUG_CPU */
645 void __init
smp_prepare_cpus(unsigned int max_cpus
)
648 unsigned long save_area
= 0;
650 unsigned long async_stack
, panic_stack
;
651 struct _lowcore
*lowcore
;
657 /* request the 0x1201 emergency signal external interrupt */
658 if (register_external_interrupt(0x1201, do_ext_call_interrupt
) != 0)
659 panic("Couldn't request external interrupt 0x1201");
660 print_cpu_info(&S390_lowcore
.cpu_data
);
662 /* Reallocate current lowcore, but keep its contents. */
663 lc_order
= sizeof(long) == 8 ? 1 : 0;
664 lowcore
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, lc_order
);
665 panic_stack
= __get_free_page(GFP_KERNEL
);
666 async_stack
= __get_free_pages(GFP_KERNEL
, ASYNC_ORDER
);
668 if (MACHINE_HAS_IEEE
)
669 save_area
= get_zeroed_page(GFP_KERNEL
);
672 local_mcck_disable();
673 lowcore_ptr
[smp_processor_id()] = lowcore
;
674 *lowcore
= S390_lowcore
;
675 lowcore
->panic_stack
= panic_stack
+ PAGE_SIZE
;
676 lowcore
->async_stack
= async_stack
+ ASYNC_SIZE
;
678 if (MACHINE_HAS_IEEE
)
679 lowcore
->extended_save_area_addr
= (u32
) save_area
;
681 set_prefix((u32
)(unsigned long) lowcore
);
684 for_each_possible_cpu(cpu
)
685 if (cpu
!= smp_processor_id())
686 smp_create_idle(cpu
);
689 void __init
smp_prepare_boot_cpu(void)
691 BUG_ON(smp_processor_id() != 0);
693 current_thread_info()->cpu
= 0;
694 cpu_set(0, cpu_present_map
);
695 cpu_set(0, cpu_online_map
);
696 S390_lowcore
.percpu_offset
= __per_cpu_offset
[0];
697 current_set
[0] = current
;
698 smp_cpu_state
[0] = CPU_STATE_CONFIGURED
;
699 smp_cpu_polarization
[0] = POLARIZATION_UNKNWN
;
702 void __init
smp_cpus_done(unsigned int max_cpus
)
707 * the frequency of the profiling timer can be changed
708 * by writing a multiplier value into /proc/profile.
710 * usually you want to run this on all CPUs ;)
712 int setup_profiling_timer(unsigned int multiplier
)
717 #ifdef CONFIG_HOTPLUG_CPU
718 static ssize_t
cpu_configure_show(struct sys_device
*dev
,
719 struct sysdev_attribute
*attr
, char *buf
)
723 mutex_lock(&smp_cpu_state_mutex
);
724 count
= sprintf(buf
, "%d\n", smp_cpu_state
[dev
->id
]);
725 mutex_unlock(&smp_cpu_state_mutex
);
729 static ssize_t
cpu_configure_store(struct sys_device
*dev
,
730 struct sysdev_attribute
*attr
,
731 const char *buf
, size_t count
)
737 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
739 if (val
!= 0 && val
!= 1)
743 mutex_lock(&smp_cpu_state_mutex
);
750 if (smp_cpu_state
[cpu
] == CPU_STATE_CONFIGURED
) {
751 rc
= sclp_cpu_deconfigure(__cpu_logical_map
[cpu
]);
753 smp_cpu_state
[cpu
] = CPU_STATE_STANDBY
;
754 smp_cpu_polarization
[cpu
] = POLARIZATION_UNKNWN
;
759 if (smp_cpu_state
[cpu
] == CPU_STATE_STANDBY
) {
760 rc
= sclp_cpu_configure(__cpu_logical_map
[cpu
]);
762 smp_cpu_state
[cpu
] = CPU_STATE_CONFIGURED
;
763 smp_cpu_polarization
[cpu
] = POLARIZATION_UNKNWN
;
771 mutex_unlock(&smp_cpu_state_mutex
);
773 return rc
? rc
: count
;
775 static SYSDEV_ATTR(configure
, 0644, cpu_configure_show
, cpu_configure_store
);
776 #endif /* CONFIG_HOTPLUG_CPU */
778 static ssize_t
cpu_polarization_show(struct sys_device
*dev
,
779 struct sysdev_attribute
*attr
, char *buf
)
784 mutex_lock(&smp_cpu_state_mutex
);
785 switch (smp_cpu_polarization
[cpu
]) {
786 case POLARIZATION_HRZ
:
787 count
= sprintf(buf
, "horizontal\n");
789 case POLARIZATION_VL
:
790 count
= sprintf(buf
, "vertical:low\n");
792 case POLARIZATION_VM
:
793 count
= sprintf(buf
, "vertical:medium\n");
795 case POLARIZATION_VH
:
796 count
= sprintf(buf
, "vertical:high\n");
799 count
= sprintf(buf
, "unknown\n");
802 mutex_unlock(&smp_cpu_state_mutex
);
805 static SYSDEV_ATTR(polarization
, 0444, cpu_polarization_show
, NULL
);
807 static ssize_t
show_cpu_address(struct sys_device
*dev
,
808 struct sysdev_attribute
*attr
, char *buf
)
810 return sprintf(buf
, "%d\n", __cpu_logical_map
[dev
->id
]);
812 static SYSDEV_ATTR(address
, 0444, show_cpu_address
, NULL
);
815 static struct attribute
*cpu_common_attrs
[] = {
816 #ifdef CONFIG_HOTPLUG_CPU
817 &attr_configure
.attr
,
820 &attr_polarization
.attr
,
824 static struct attribute_group cpu_common_attr_group
= {
825 .attrs
= cpu_common_attrs
,
828 static ssize_t
show_capability(struct sys_device
*dev
,
829 struct sysdev_attribute
*attr
, char *buf
)
831 unsigned int capability
;
834 rc
= get_cpu_capability(&capability
);
837 return sprintf(buf
, "%u\n", capability
);
839 static SYSDEV_ATTR(capability
, 0444, show_capability
, NULL
);
841 static ssize_t
show_idle_count(struct sys_device
*dev
,
842 struct sysdev_attribute
*attr
, char *buf
)
844 struct s390_idle_data
*idle
;
845 unsigned long long idle_count
;
847 idle
= &per_cpu(s390_idle
, dev
->id
);
848 spin_lock_irq(&idle
->lock
);
849 idle_count
= idle
->idle_count
;
850 spin_unlock_irq(&idle
->lock
);
851 return sprintf(buf
, "%llu\n", idle_count
);
853 static SYSDEV_ATTR(idle_count
, 0444, show_idle_count
, NULL
);
855 static ssize_t
show_idle_time(struct sys_device
*dev
,
856 struct sysdev_attribute
*attr
, char *buf
)
858 struct s390_idle_data
*idle
;
859 unsigned long long new_time
;
861 idle
= &per_cpu(s390_idle
, dev
->id
);
862 spin_lock_irq(&idle
->lock
);
864 new_time
= get_clock();
865 idle
->idle_time
+= new_time
- idle
->idle_enter
;
866 idle
->idle_enter
= new_time
;
868 new_time
= idle
->idle_time
;
869 spin_unlock_irq(&idle
->lock
);
870 return sprintf(buf
, "%llu\n", new_time
>> 12);
872 static SYSDEV_ATTR(idle_time_us
, 0444, show_idle_time
, NULL
);
874 static struct attribute
*cpu_online_attrs
[] = {
875 &attr_capability
.attr
,
876 &attr_idle_count
.attr
,
877 &attr_idle_time_us
.attr
,
881 static struct attribute_group cpu_online_attr_group
= {
882 .attrs
= cpu_online_attrs
,
885 static int __cpuinit
smp_cpu_notify(struct notifier_block
*self
,
886 unsigned long action
, void *hcpu
)
888 unsigned int cpu
= (unsigned int)(long)hcpu
;
889 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
890 struct sys_device
*s
= &c
->sysdev
;
891 struct s390_idle_data
*idle
;
895 case CPU_ONLINE_FROZEN
:
896 idle
= &per_cpu(s390_idle
, cpu
);
897 spin_lock_irq(&idle
->lock
);
898 idle
->idle_enter
= 0;
900 idle
->idle_count
= 0;
901 spin_unlock_irq(&idle
->lock
);
902 if (sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
))
906 case CPU_DEAD_FROZEN
:
907 sysfs_remove_group(&s
->kobj
, &cpu_online_attr_group
);
913 static struct notifier_block __cpuinitdata smp_cpu_nb
= {
914 .notifier_call
= smp_cpu_notify
,
917 static int __devinit
smp_add_present_cpu(int cpu
)
919 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
920 struct sys_device
*s
= &c
->sysdev
;
924 rc
= register_cpu(c
, cpu
);
927 rc
= sysfs_create_group(&s
->kobj
, &cpu_common_attr_group
);
930 if (!cpu_online(cpu
))
932 rc
= sysfs_create_group(&s
->kobj
, &cpu_online_attr_group
);
935 sysfs_remove_group(&s
->kobj
, &cpu_common_attr_group
);
937 #ifdef CONFIG_HOTPLUG_CPU
944 #ifdef CONFIG_HOTPLUG_CPU
946 int __ref
smp_rescan_cpus(void)
953 mutex_lock(&smp_cpu_state_mutex
);
954 newcpus
= cpu_present_map
;
955 rc
= __smp_rescan_cpus();
958 cpus_andnot(newcpus
, cpu_present_map
, newcpus
);
959 for_each_cpu_mask(cpu
, newcpus
) {
960 rc
= smp_add_present_cpu(cpu
);
962 cpu_clear(cpu
, cpu_present_map
);
966 mutex_unlock(&smp_cpu_state_mutex
);
968 if (!cpus_empty(newcpus
))
969 topology_schedule_update();
973 static ssize_t __ref
rescan_store(struct sysdev_class
*class, const char *buf
,
978 rc
= smp_rescan_cpus();
979 return rc
? rc
: count
;
981 static SYSDEV_CLASS_ATTR(rescan
, 0200, NULL
, rescan_store
);
982 #endif /* CONFIG_HOTPLUG_CPU */
984 static ssize_t
dispatching_show(struct sysdev_class
*class, char *buf
)
988 mutex_lock(&smp_cpu_state_mutex
);
989 count
= sprintf(buf
, "%d\n", cpu_management
);
990 mutex_unlock(&smp_cpu_state_mutex
);
994 static ssize_t
dispatching_store(struct sysdev_class
*dev
, const char *buf
,
1000 if (sscanf(buf
, "%d %c", &val
, &delim
) != 1)
1002 if (val
!= 0 && val
!= 1)
1006 mutex_lock(&smp_cpu_state_mutex
);
1007 if (cpu_management
== val
)
1009 rc
= topology_set_cpu_management(val
);
1011 cpu_management
= val
;
1013 mutex_unlock(&smp_cpu_state_mutex
);
1015 return rc
? rc
: count
;
1017 static SYSDEV_CLASS_ATTR(dispatching
, 0644, dispatching_show
,
1020 static int __init
topology_init(void)
1025 register_cpu_notifier(&smp_cpu_nb
);
1027 #ifdef CONFIG_HOTPLUG_CPU
1028 rc
= sysdev_class_create_file(&cpu_sysdev_class
, &attr_rescan
);
1032 rc
= sysdev_class_create_file(&cpu_sysdev_class
, &attr_dispatching
);
1035 for_each_present_cpu(cpu
) {
1036 rc
= smp_add_present_cpu(cpu
);
1042 subsys_initcall(topology_init
);