1 /* sysfs.c: Toplogy sysfs support code for sparc64.
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
5 #include <linux/sysdev.h>
8 #include <linux/percpu.h>
9 #include <linux/init.h>
11 #include <asm/hypervisor.h>
12 #include <asm/spitfire.h>
14 static DEFINE_PER_CPU(struct hv_mmu_statistics
, mmu_stats
) __attribute__((aligned(64)));
16 #define SHOW_MMUSTAT_ULONG(NAME) \
17 static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
19 struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \
20 return sprintf(buf, "%lu\n", p->NAME); \
22 static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL)
24 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte
);
25 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte
);
26 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte
);
27 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte
);
28 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte
);
29 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte
);
30 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte
);
31 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte
);
32 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte
);
33 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte
);
34 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte
);
35 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte
);
36 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte
);
37 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte
);
38 SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte
);
39 SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte
);
40 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte
);
41 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte
);
42 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte
);
43 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte
);
44 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte
);
45 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte
);
46 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte
);
47 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte
);
48 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte
);
49 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte
);
50 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte
);
51 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte
);
52 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte
);
53 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte
);
54 SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte
);
55 SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte
);
57 static struct attribute
*mmu_stat_attrs
[] = {
58 &attr_immu_tsb_hits_ctx0_8k_tte
.attr
,
59 &attr_immu_tsb_ticks_ctx0_8k_tte
.attr
,
60 &attr_immu_tsb_hits_ctx0_64k_tte
.attr
,
61 &attr_immu_tsb_ticks_ctx0_64k_tte
.attr
,
62 &attr_immu_tsb_hits_ctx0_4mb_tte
.attr
,
63 &attr_immu_tsb_ticks_ctx0_4mb_tte
.attr
,
64 &attr_immu_tsb_hits_ctx0_256mb_tte
.attr
,
65 &attr_immu_tsb_ticks_ctx0_256mb_tte
.attr
,
66 &attr_immu_tsb_hits_ctxnon0_8k_tte
.attr
,
67 &attr_immu_tsb_ticks_ctxnon0_8k_tte
.attr
,
68 &attr_immu_tsb_hits_ctxnon0_64k_tte
.attr
,
69 &attr_immu_tsb_ticks_ctxnon0_64k_tte
.attr
,
70 &attr_immu_tsb_hits_ctxnon0_4mb_tte
.attr
,
71 &attr_immu_tsb_ticks_ctxnon0_4mb_tte
.attr
,
72 &attr_immu_tsb_hits_ctxnon0_256mb_tte
.attr
,
73 &attr_immu_tsb_ticks_ctxnon0_256mb_tte
.attr
,
74 &attr_dmmu_tsb_hits_ctx0_8k_tte
.attr
,
75 &attr_dmmu_tsb_ticks_ctx0_8k_tte
.attr
,
76 &attr_dmmu_tsb_hits_ctx0_64k_tte
.attr
,
77 &attr_dmmu_tsb_ticks_ctx0_64k_tte
.attr
,
78 &attr_dmmu_tsb_hits_ctx0_4mb_tte
.attr
,
79 &attr_dmmu_tsb_ticks_ctx0_4mb_tte
.attr
,
80 &attr_dmmu_tsb_hits_ctx0_256mb_tte
.attr
,
81 &attr_dmmu_tsb_ticks_ctx0_256mb_tte
.attr
,
82 &attr_dmmu_tsb_hits_ctxnon0_8k_tte
.attr
,
83 &attr_dmmu_tsb_ticks_ctxnon0_8k_tte
.attr
,
84 &attr_dmmu_tsb_hits_ctxnon0_64k_tte
.attr
,
85 &attr_dmmu_tsb_ticks_ctxnon0_64k_tte
.attr
,
86 &attr_dmmu_tsb_hits_ctxnon0_4mb_tte
.attr
,
87 &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte
.attr
,
88 &attr_dmmu_tsb_hits_ctxnon0_256mb_tte
.attr
,
89 &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte
.attr
,
93 static struct attribute_group mmu_stat_group
= {
94 .attrs
= mmu_stat_attrs
,
98 /* XXX convert to rusty's on_one_cpu */
99 static unsigned long run_on_cpu(unsigned long cpu
,
100 unsigned long (*func
)(unsigned long),
103 cpumask_t old_affinity
= current
->cpus_allowed
;
106 /* should return -EINVAL to userspace */
107 if (set_cpus_allowed(current
, cpumask_of_cpu(cpu
)))
112 set_cpus_allowed(current
, old_affinity
);
117 static unsigned long read_mmustat_enable(unsigned long junk
)
119 unsigned long ra
= 0;
121 sun4v_mmustat_info(&ra
);
126 static unsigned long write_mmustat_enable(unsigned long val
)
128 unsigned long ra
, orig_ra
;
131 ra
= __pa(&per_cpu(mmu_stats
, smp_processor_id()));
135 return sun4v_mmustat_conf(ra
, &orig_ra
);
138 static ssize_t
show_mmustat_enable(struct sys_device
*s
, char *buf
)
140 unsigned long val
= run_on_cpu(s
->id
, read_mmustat_enable
, 0);
141 return sprintf(buf
, "%lx\n", val
);
144 static ssize_t
store_mmustat_enable(struct sys_device
*s
, const char *buf
, size_t count
)
146 unsigned long val
, err
;
147 int ret
= sscanf(buf
, "%ld", &val
);
152 err
= run_on_cpu(s
->id
, write_mmustat_enable
, val
);
159 static SYSDEV_ATTR(mmustat_enable
, 0644, show_mmustat_enable
, store_mmustat_enable
);
161 static int mmu_stats_supported
;
163 static int register_mmu_stats(struct sys_device
*s
)
165 if (!mmu_stats_supported
)
167 sysdev_create_file(s
, &attr_mmustat_enable
);
168 return sysfs_create_group(&s
->kobj
, &mmu_stat_group
);
171 #ifdef CONFIG_HOTPLUG_CPU
172 static void unregister_mmu_stats(struct sys_device
*s
)
174 if (!mmu_stats_supported
)
176 sysfs_remove_group(&s
->kobj
, &mmu_stat_group
);
177 sysdev_remove_file(s
, &attr_mmustat_enable
);
181 #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \
182 static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
184 cpuinfo_sparc *c = &cpu_data(dev->id); \
185 return sprintf(buf, "%lu\n", c->MEMBER); \
188 #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \
189 static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
191 cpuinfo_sparc *c = &cpu_data(dev->id); \
192 return sprintf(buf, "%u\n", c->MEMBER); \
195 SHOW_CPUDATA_ULONG_NAME(clock_tick
, clock_tick
);
196 SHOW_CPUDATA_ULONG_NAME(udelay_val
, udelay_val
);
197 SHOW_CPUDATA_UINT_NAME(l1_dcache_size
, dcache_size
);
198 SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size
, dcache_line_size
);
199 SHOW_CPUDATA_UINT_NAME(l1_icache_size
, icache_size
);
200 SHOW_CPUDATA_UINT_NAME(l1_icache_line_size
, icache_line_size
);
201 SHOW_CPUDATA_UINT_NAME(l2_cache_size
, ecache_size
);
202 SHOW_CPUDATA_UINT_NAME(l2_cache_line_size
, ecache_line_size
);
204 static struct sysdev_attribute cpu_core_attrs
[] = {
205 _SYSDEV_ATTR(clock_tick
, 0444, show_clock_tick
, NULL
),
206 _SYSDEV_ATTR(udelay_val
, 0444, show_udelay_val
, NULL
),
207 _SYSDEV_ATTR(l1_dcache_size
, 0444, show_l1_dcache_size
, NULL
),
208 _SYSDEV_ATTR(l1_dcache_line_size
, 0444, show_l1_dcache_line_size
, NULL
),
209 _SYSDEV_ATTR(l1_icache_size
, 0444, show_l1_icache_size
, NULL
),
210 _SYSDEV_ATTR(l1_icache_line_size
, 0444, show_l1_icache_line_size
, NULL
),
211 _SYSDEV_ATTR(l2_cache_size
, 0444, show_l2_cache_size
, NULL
),
212 _SYSDEV_ATTR(l2_cache_line_size
, 0444, show_l2_cache_line_size
, NULL
),
215 static DEFINE_PER_CPU(struct cpu
, cpu_devices
);
217 static void register_cpu_online(unsigned int cpu
)
219 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
220 struct sys_device
*s
= &c
->sysdev
;
223 for (i
= 0; i
< ARRAY_SIZE(cpu_core_attrs
); i
++)
224 sysdev_create_file(s
, &cpu_core_attrs
[i
]);
226 register_mmu_stats(s
);
229 #ifdef CONFIG_HOTPLUG_CPU
230 static void unregister_cpu_online(unsigned int cpu
)
232 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
233 struct sys_device
*s
= &c
->sysdev
;
236 unregister_mmu_stats(s
);
237 for (i
= 0; i
< ARRAY_SIZE(cpu_core_attrs
); i
++)
238 sysdev_remove_file(s
, &cpu_core_attrs
[i
]);
242 static int __cpuinit
sysfs_cpu_notify(struct notifier_block
*self
,
243 unsigned long action
, void *hcpu
)
245 unsigned int cpu
= (unsigned int)(long)hcpu
;
249 case CPU_ONLINE_FROZEN
:
250 register_cpu_online(cpu
);
252 #ifdef CONFIG_HOTPLUG_CPU
254 case CPU_DEAD_FROZEN
:
255 unregister_cpu_online(cpu
);
262 static struct notifier_block __cpuinitdata sysfs_cpu_nb
= {
263 .notifier_call
= sysfs_cpu_notify
,
266 static void __init
check_mmu_stats(void)
268 unsigned long dummy1
, err
;
270 if (tlb_type
!= hypervisor
)
273 err
= sun4v_mmustat_info(&dummy1
);
275 mmu_stats_supported
= 1;
278 static int __init
topology_init(void)
284 register_cpu_notifier(&sysfs_cpu_nb
);
286 for_each_possible_cpu(cpu
) {
287 struct cpu
*c
= &per_cpu(cpu_devices
, cpu
);
289 register_cpu(c
, cpu
);
291 register_cpu_online(cpu
);
297 subsys_initcall(topology_init
);