2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/bootmem.h>
14 #include <linux/sched.h>
15 #include <linux/workqueue.h>
16 #include <linux/cpu.h>
17 #include <linux/smp.h>
18 #include <linux/cpuset.h>
19 #include <asm/delay.h>
20 #include <asm/s390_ext.h>
21 #include <asm/sysinfo.h>
26 #define PTF_HORIZONTAL (0UL)
27 #define PTF_VERTICAL (1UL)
28 #define PTF_CHECK (2UL)
31 unsigned char reserved0
[4];
34 unsigned char reserved1
;
35 unsigned short origin
;
36 unsigned long mask
[CPU_BITS
/ BITS_PER_LONG
];
40 unsigned char reserved
[7];
47 struct tl_container container
;
51 unsigned char reserved0
[2];
52 unsigned short length
;
53 unsigned char mag
[NR_MAG
];
54 unsigned char reserved1
;
56 unsigned char reserved2
[4];
57 union tl_entry tle
[0];
61 struct mask_info
*next
;
66 static int topology_enabled
;
67 static void topology_work_fn(struct work_struct
*work
);
68 static struct tl_info
*tl_info
;
69 static int machine_has_topology
;
70 static struct timer_list topology_timer
;
71 static void set_topology_timer(void);
72 static DECLARE_WORK(topology_work
, topology_work_fn
);
73 /* topology_lock protects the core linked list */
74 static DEFINE_SPINLOCK(topology_lock
);
76 static struct mask_info core_info
;
77 cpumask_t cpu_core_map
[NR_CPUS
];
78 unsigned char cpu_core_id
[NR_CPUS
];
80 #ifdef CONFIG_SCHED_BOOK
81 static struct mask_info book_info
;
82 cpumask_t cpu_book_map
[NR_CPUS
];
83 unsigned char cpu_book_id
[NR_CPUS
];
86 static cpumask_t
cpu_group_map(struct mask_info
*info
, unsigned int cpu
)
91 if (!topology_enabled
|| !machine_has_topology
)
92 return cpu_possible_map
;
94 if (cpu_isset(cpu
, info
->mask
)) {
100 if (cpus_empty(mask
))
101 mask
= cpumask_of_cpu(cpu
);
105 static void add_cpus_to_mask(struct tl_cpu
*tl_cpu
, struct mask_info
*book
,
106 struct mask_info
*core
)
110 for (cpu
= find_first_bit(&tl_cpu
->mask
[0], CPU_BITS
);
112 cpu
= find_next_bit(&tl_cpu
->mask
[0], CPU_BITS
, cpu
+ 1))
114 unsigned int rcpu
, lcpu
;
116 rcpu
= CPU_BITS
- 1 - cpu
+ tl_cpu
->origin
;
117 for_each_present_cpu(lcpu
) {
118 if (cpu_logical_map(lcpu
) != rcpu
)
120 #ifdef CONFIG_SCHED_BOOK
121 cpu_set(lcpu
, book
->mask
);
122 cpu_book_id
[lcpu
] = book
->id
;
124 cpu_set(lcpu
, core
->mask
);
125 cpu_core_id
[lcpu
] = core
->id
;
126 smp_cpu_polarization
[lcpu
] = tl_cpu
->pp
;
131 static void clear_masks(void)
133 struct mask_info
*info
;
137 cpus_clear(info
->mask
);
140 #ifdef CONFIG_SCHED_BOOK
143 cpus_clear(info
->mask
);
149 static union tl_entry
*next_tle(union tl_entry
*tle
)
152 return (union tl_entry
*)((struct tl_container
*)tle
+ 1);
154 return (union tl_entry
*)((struct tl_cpu
*)tle
+ 1);
157 static void tl_to_cores(struct tl_info
*info
)
159 #ifdef CONFIG_SCHED_BOOK
160 struct mask_info
*book
= &book_info
;
162 struct mask_info
*book
= NULL
;
164 struct mask_info
*core
= &core_info
;
165 union tl_entry
*tle
, *end
;
168 spin_lock_irq(&topology_lock
);
171 end
= (union tl_entry
*)((unsigned long)info
+ info
->length
);
174 #ifdef CONFIG_SCHED_BOOK
177 book
->id
= tle
->container
.id
;
182 core
->id
= tle
->container
.id
;
185 add_cpus_to_mask(&tle
->cpu
, book
, core
);
189 machine_has_topology
= 0;
195 spin_unlock_irq(&topology_lock
);
198 static void topology_update_polarization_simple(void)
202 mutex_lock(&smp_cpu_state_mutex
);
203 for_each_possible_cpu(cpu
)
204 smp_cpu_polarization
[cpu
] = POLARIZATION_HRZ
;
205 mutex_unlock(&smp_cpu_state_mutex
);
208 static int ptf(unsigned long fc
)
213 " .insn rre,0xb9a20000,%1,%1\n"
221 int topology_set_cpu_management(int fc
)
226 if (!machine_has_topology
)
229 rc
= ptf(PTF_VERTICAL
);
231 rc
= ptf(PTF_HORIZONTAL
);
234 for_each_possible_cpu(cpu
)
235 smp_cpu_polarization
[cpu
] = POLARIZATION_UNKNWN
;
239 static void update_cpu_core_map(void)
244 spin_lock_irqsave(&topology_lock
, flags
);
245 for_each_possible_cpu(cpu
) {
246 cpu_core_map
[cpu
] = cpu_group_map(&core_info
, cpu
);
247 #ifdef CONFIG_SCHED_BOOK
248 cpu_book_map
[cpu
] = cpu_group_map(&book_info
, cpu
);
251 spin_unlock_irqrestore(&topology_lock
, flags
);
254 static void store_topology(struct tl_info
*info
)
256 #ifdef CONFIG_SCHED_BOOK
259 rc
= stsi(info
, 15, 1, 3);
263 stsi(info
, 15, 1, 2);
266 int arch_update_cpu_topology(void)
268 struct tl_info
*info
= tl_info
;
269 struct sys_device
*sysdev
;
272 if (!machine_has_topology
) {
273 update_cpu_core_map();
274 topology_update_polarization_simple();
277 store_topology(info
);
279 update_cpu_core_map();
280 for_each_online_cpu(cpu
) {
281 sysdev
= get_cpu_sysdev(cpu
);
282 kobject_uevent(&sysdev
->kobj
, KOBJ_CHANGE
);
287 static void topology_work_fn(struct work_struct
*work
)
289 rebuild_sched_domains();
292 void topology_schedule_update(void)
294 schedule_work(&topology_work
);
297 static void topology_timer_fn(unsigned long ignored
)
300 topology_schedule_update();
301 set_topology_timer();
304 static void set_topology_timer(void)
306 topology_timer
.function
= topology_timer_fn
;
307 topology_timer
.data
= 0;
308 topology_timer
.expires
= jiffies
+ 60 * HZ
;
309 add_timer(&topology_timer
);
312 static int __init
early_parse_topology(char *p
)
314 if (strncmp(p
, "on", 2))
316 topology_enabled
= 1;
319 early_param("topology", early_parse_topology
);
321 static int __init
init_topology_update(void)
326 if (!machine_has_topology
) {
327 topology_update_polarization_simple();
330 init_timer_deferrable(&topology_timer
);
331 set_topology_timer();
333 update_cpu_core_map();
336 __initcall(init_topology_update
);
338 static void alloc_masks(struct tl_info
*info
, struct mask_info
*mask
, int offset
)
342 nr_masks
= info
->mag
[NR_MAG
- offset
];
343 for (i
= 0; i
< info
->mnest
- offset
; i
++)
344 nr_masks
*= info
->mag
[NR_MAG
- offset
- 1 - i
];
345 nr_masks
= max(nr_masks
, 1);
346 for (i
= 0; i
< nr_masks
; i
++) {
347 mask
->next
= alloc_bootmem(sizeof(struct mask_info
));
352 void __init
s390_init_cpu_topology(void)
354 unsigned long long facility_bits
;
355 struct tl_info
*info
;
358 if (stfle(&facility_bits
, 1) <= 0)
360 if (!(facility_bits
& (1ULL << 52)) || !(facility_bits
& (1ULL << 61)))
362 machine_has_topology
= 1;
364 tl_info
= alloc_bootmem_pages(PAGE_SIZE
);
366 store_topology(info
);
367 pr_info("The CPU configuration topology of the machine is:");
368 for (i
= 0; i
< NR_MAG
; i
++)
369 printk(" %d", info
->mag
[i
]);
370 printk(" / %d\n", info
->mnest
);
371 alloc_masks(info
, &core_info
, 2);
372 #ifdef CONFIG_SCHED_BOOK
373 alloc_masks(info
, &book_info
, 3);