cpumask: change cpumask_of_cpu_ptr to use new cpumask_of_cpu
[linux-2.6/zen-sources.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
blob6b0a10b002f184406dac952f1370ac5c4eeb34e7
1 /*
2 * Routines to indentify caches on Intel CPU.
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
17 #include <asm/processor.h>
18 #include <asm/smp.h>
20 #define LVL_1_INST 1
21 #define LVL_1_DATA 2
22 #define LVL_2 3
23 #define LVL_3 4
24 #define LVL_TRACE 5
26 struct _cache_table
28 unsigned char descriptor;
29 char cache_type;
30 short size;
33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
34 static struct _cache_table cache_table[] __cpuinitdata =
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
53 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
54 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
55 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
56 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
57 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
58 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
59 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
60 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
61 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
62 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
63 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
64 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
65 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
66 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
67 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
71 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
72 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
73 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
74 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
75 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
80 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
81 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
82 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
83 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
84 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
85 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
86 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
87 { 0x00, 0, 0}
91 enum _cache_type
93 CACHE_TYPE_NULL = 0,
94 CACHE_TYPE_DATA = 1,
95 CACHE_TYPE_INST = 2,
96 CACHE_TYPE_UNIFIED = 3
99 union _cpuid4_leaf_eax {
100 struct {
101 enum _cache_type type:5;
102 unsigned int level:3;
103 unsigned int is_self_initializing:1;
104 unsigned int is_fully_associative:1;
105 unsigned int reserved:4;
106 unsigned int num_threads_sharing:12;
107 unsigned int num_cores_on_die:6;
108 } split;
109 u32 full;
112 union _cpuid4_leaf_ebx {
113 struct {
114 unsigned int coherency_line_size:12;
115 unsigned int physical_line_partition:10;
116 unsigned int ways_of_associativity:10;
117 } split;
118 u32 full;
121 union _cpuid4_leaf_ecx {
122 struct {
123 unsigned int number_of_sets:32;
124 } split;
125 u32 full;
128 struct _cpuid4_info {
129 union _cpuid4_leaf_eax eax;
130 union _cpuid4_leaf_ebx ebx;
131 union _cpuid4_leaf_ecx ecx;
132 unsigned long size;
133 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
136 unsigned short num_cache_leaves;
138 /* AMD doesn't have CPUID4. Emulate it here to report the same
139 information to the user. This makes some assumptions about the machine:
140 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
142 In theory the TLBs could be reported as fake type (they are in "dummy").
143 Maybe later */
144 union l1_cache {
145 struct {
146 unsigned line_size : 8;
147 unsigned lines_per_tag : 8;
148 unsigned assoc : 8;
149 unsigned size_in_kb : 8;
151 unsigned val;
154 union l2_cache {
155 struct {
156 unsigned line_size : 8;
157 unsigned lines_per_tag : 4;
158 unsigned assoc : 4;
159 unsigned size_in_kb : 16;
161 unsigned val;
164 union l3_cache {
165 struct {
166 unsigned line_size : 8;
167 unsigned lines_per_tag : 4;
168 unsigned assoc : 4;
169 unsigned res : 2;
170 unsigned size_encoded : 14;
172 unsigned val;
175 static unsigned short assocs[] __cpuinitdata = {
176 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
177 [8] = 16, [0xa] = 32, [0xb] = 48,
178 [0xc] = 64,
179 [0xf] = 0xffff // ??
182 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
183 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
185 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
186 union _cpuid4_leaf_ebx *ebx,
187 union _cpuid4_leaf_ecx *ecx)
189 unsigned dummy;
190 unsigned line_size, lines_per_tag, assoc, size_in_kb;
191 union l1_cache l1i, l1d;
192 union l2_cache l2;
193 union l3_cache l3;
194 union l1_cache *l1 = &l1d;
196 eax->full = 0;
197 ebx->full = 0;
198 ecx->full = 0;
200 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
201 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
203 switch (leaf) {
204 case 1:
205 l1 = &l1i;
206 case 0:
207 if (!l1->val)
208 return;
209 assoc = l1->assoc;
210 line_size = l1->line_size;
211 lines_per_tag = l1->lines_per_tag;
212 size_in_kb = l1->size_in_kb;
213 break;
214 case 2:
215 if (!l2.val)
216 return;
217 assoc = l2.assoc;
218 line_size = l2.line_size;
219 lines_per_tag = l2.lines_per_tag;
220 /* cpu_data has errata corrections for K7 applied */
221 size_in_kb = current_cpu_data.x86_cache_size;
222 break;
223 case 3:
224 if (!l3.val)
225 return;
226 assoc = l3.assoc;
227 line_size = l3.line_size;
228 lines_per_tag = l3.lines_per_tag;
229 size_in_kb = l3.size_encoded * 512;
230 break;
231 default:
232 return;
235 eax->split.is_self_initializing = 1;
236 eax->split.type = types[leaf];
237 eax->split.level = levels[leaf];
238 if (leaf == 3)
239 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
240 else
241 eax->split.num_threads_sharing = 0;
242 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
245 if (assoc == 0xf)
246 eax->split.is_fully_associative = 1;
247 ebx->split.coherency_line_size = line_size - 1;
248 ebx->split.ways_of_associativity = assocs[assoc] - 1;
249 ebx->split.physical_line_partition = lines_per_tag - 1;
250 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
251 (ebx->split.ways_of_associativity + 1) - 1;
254 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
256 union _cpuid4_leaf_eax eax;
257 union _cpuid4_leaf_ebx ebx;
258 union _cpuid4_leaf_ecx ecx;
259 unsigned edx;
261 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
262 amd_cpuid4(index, &eax, &ebx, &ecx);
263 else
264 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
265 if (eax.split.type == CACHE_TYPE_NULL)
266 return -EIO; /* better error ? */
268 this_leaf->eax = eax;
269 this_leaf->ebx = ebx;
270 this_leaf->ecx = ecx;
271 this_leaf->size = (ecx.split.number_of_sets + 1) *
272 (ebx.split.coherency_line_size + 1) *
273 (ebx.split.physical_line_partition + 1) *
274 (ebx.split.ways_of_associativity + 1);
275 return 0;
278 static int __cpuinit find_num_cache_leaves(void)
280 unsigned int eax, ebx, ecx, edx;
281 union _cpuid4_leaf_eax cache_eax;
282 int i = -1;
284 do {
285 ++i;
286 /* Do cpuid(4) loop to find out num_cache_leaves */
287 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
288 cache_eax.full = eax;
289 } while (cache_eax.split.type != CACHE_TYPE_NULL);
290 return i;
293 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
295 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
296 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
297 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
298 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
299 #ifdef CONFIG_X86_HT
300 unsigned int cpu = c->cpu_index;
301 #endif
303 if (c->cpuid_level > 3) {
304 static int is_initialized;
306 if (is_initialized == 0) {
307 /* Init num_cache_leaves from boot CPU */
308 num_cache_leaves = find_num_cache_leaves();
309 is_initialized++;
313 * Whenever possible use cpuid(4), deterministic cache
314 * parameters cpuid leaf to find the cache details
316 for (i = 0; i < num_cache_leaves; i++) {
317 struct _cpuid4_info this_leaf;
319 int retval;
321 retval = cpuid4_cache_lookup(i, &this_leaf);
322 if (retval >= 0) {
323 switch(this_leaf.eax.split.level) {
324 case 1:
325 if (this_leaf.eax.split.type ==
326 CACHE_TYPE_DATA)
327 new_l1d = this_leaf.size/1024;
328 else if (this_leaf.eax.split.type ==
329 CACHE_TYPE_INST)
330 new_l1i = this_leaf.size/1024;
331 break;
332 case 2:
333 new_l2 = this_leaf.size/1024;
334 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
335 index_msb = get_count_order(num_threads_sharing);
336 l2_id = c->apicid >> index_msb;
337 break;
338 case 3:
339 new_l3 = this_leaf.size/1024;
340 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
341 index_msb = get_count_order(num_threads_sharing);
342 l3_id = c->apicid >> index_msb;
343 break;
344 default:
345 break;
351 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
352 * trace cache
354 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
355 /* supports eax=2 call */
356 int j, n;
357 unsigned int regs[4];
358 unsigned char *dp = (unsigned char *)regs;
359 int only_trace = 0;
361 if (num_cache_leaves != 0 && c->x86 == 15)
362 only_trace = 1;
364 /* Number of times to iterate */
365 n = cpuid_eax(2) & 0xFF;
367 for ( i = 0 ; i < n ; i++ ) {
368 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
370 /* If bit 31 is set, this is an unknown format */
371 for ( j = 0 ; j < 3 ; j++ ) {
372 if (regs[j] & (1 << 31)) regs[j] = 0;
375 /* Byte 0 is level count, not a descriptor */
376 for ( j = 1 ; j < 16 ; j++ ) {
377 unsigned char des = dp[j];
378 unsigned char k = 0;
380 /* look up this descriptor in the table */
381 while (cache_table[k].descriptor != 0)
383 if (cache_table[k].descriptor == des) {
384 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
385 break;
386 switch (cache_table[k].cache_type) {
387 case LVL_1_INST:
388 l1i += cache_table[k].size;
389 break;
390 case LVL_1_DATA:
391 l1d += cache_table[k].size;
392 break;
393 case LVL_2:
394 l2 += cache_table[k].size;
395 break;
396 case LVL_3:
397 l3 += cache_table[k].size;
398 break;
399 case LVL_TRACE:
400 trace += cache_table[k].size;
401 break;
404 break;
407 k++;
413 if (new_l1d)
414 l1d = new_l1d;
416 if (new_l1i)
417 l1i = new_l1i;
419 if (new_l2) {
420 l2 = new_l2;
421 #ifdef CONFIG_X86_HT
422 per_cpu(cpu_llc_id, cpu) = l2_id;
423 #endif
426 if (new_l3) {
427 l3 = new_l3;
428 #ifdef CONFIG_X86_HT
429 per_cpu(cpu_llc_id, cpu) = l3_id;
430 #endif
433 if (trace)
434 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
435 else if ( l1i )
436 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
438 if (l1d)
439 printk(", L1 D cache: %dK\n", l1d);
440 else
441 printk("\n");
443 if (l2)
444 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
446 if (l3)
447 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
449 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
451 return l2;
454 /* pointer to _cpuid4_info array (for each cache leaf) */
455 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
456 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
458 #ifdef CONFIG_SMP
459 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
461 struct _cpuid4_info *this_leaf, *sibling_leaf;
462 unsigned long num_threads_sharing;
463 int index_msb, i;
464 struct cpuinfo_x86 *c = &cpu_data(cpu);
466 this_leaf = CPUID4_INFO_IDX(cpu, index);
467 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
469 if (num_threads_sharing == 1)
470 cpu_set(cpu, this_leaf->shared_cpu_map);
471 else {
472 index_msb = get_count_order(num_threads_sharing);
474 for_each_online_cpu(i) {
475 if (cpu_data(i).apicid >> index_msb ==
476 c->apicid >> index_msb) {
477 cpu_set(i, this_leaf->shared_cpu_map);
478 if (i != cpu && per_cpu(cpuid4_info, i)) {
479 sibling_leaf = CPUID4_INFO_IDX(i, index);
480 cpu_set(cpu, sibling_leaf->shared_cpu_map);
486 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
488 struct _cpuid4_info *this_leaf, *sibling_leaf;
489 int sibling;
491 this_leaf = CPUID4_INFO_IDX(cpu, index);
492 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
493 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
494 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
497 #else
498 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
499 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
500 #endif
502 static void __cpuinit free_cache_attributes(unsigned int cpu)
504 int i;
506 for (i = 0; i < num_cache_leaves; i++)
507 cache_remove_shared_cpu_map(cpu, i);
509 kfree(per_cpu(cpuid4_info, cpu));
510 per_cpu(cpuid4_info, cpu) = NULL;
513 static int __cpuinit detect_cache_attributes(unsigned int cpu)
515 struct _cpuid4_info *this_leaf;
516 unsigned long j;
517 int retval;
518 cpumask_t oldmask;
520 if (num_cache_leaves == 0)
521 return -ENOENT;
523 per_cpu(cpuid4_info, cpu) = kzalloc(
524 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
525 if (per_cpu(cpuid4_info, cpu) == NULL)
526 return -ENOMEM;
528 oldmask = current->cpus_allowed;
529 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
530 if (retval)
531 goto out;
533 /* Do cpuid and store the results */
534 for (j = 0; j < num_cache_leaves; j++) {
535 this_leaf = CPUID4_INFO_IDX(cpu, j);
536 retval = cpuid4_cache_lookup(j, this_leaf);
537 if (unlikely(retval < 0)) {
538 int i;
540 for (i = 0; i < j; i++)
541 cache_remove_shared_cpu_map(cpu, i);
542 break;
544 cache_shared_cpu_map_setup(cpu, j);
546 set_cpus_allowed_ptr(current, &oldmask);
548 out:
549 if (retval) {
550 kfree(per_cpu(cpuid4_info, cpu));
551 per_cpu(cpuid4_info, cpu) = NULL;
554 return retval;
557 #ifdef CONFIG_SYSFS
559 #include <linux/kobject.h>
560 #include <linux/sysfs.h>
562 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
564 /* pointer to kobject for cpuX/cache */
565 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
567 struct _index_kobject {
568 struct kobject kobj;
569 unsigned int cpu;
570 unsigned short index;
573 /* pointer to array of kobjects for cpuX/cache/indexY */
574 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
575 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
577 #define show_one_plus(file_name, object, val) \
578 static ssize_t show_##file_name \
579 (struct _cpuid4_info *this_leaf, char *buf) \
581 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
584 show_one_plus(level, eax.split.level, 0);
585 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
586 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
587 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
588 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
590 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
592 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
595 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
596 int type, char *buf)
598 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
599 int n = 0;
601 if (len > 1) {
602 cpumask_t *mask = &this_leaf->shared_cpu_map;
604 n = type?
605 cpulist_scnprintf(buf, len-2, *mask):
606 cpumask_scnprintf(buf, len-2, *mask);
607 buf[n++] = '\n';
608 buf[n] = '\0';
610 return n;
613 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
615 return show_shared_cpu_map_func(leaf, 0, buf);
618 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
620 return show_shared_cpu_map_func(leaf, 1, buf);
623 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
624 switch(this_leaf->eax.split.type) {
625 case CACHE_TYPE_DATA:
626 return sprintf(buf, "Data\n");
627 break;
628 case CACHE_TYPE_INST:
629 return sprintf(buf, "Instruction\n");
630 break;
631 case CACHE_TYPE_UNIFIED:
632 return sprintf(buf, "Unified\n");
633 break;
634 default:
635 return sprintf(buf, "Unknown\n");
636 break;
640 struct _cache_attr {
641 struct attribute attr;
642 ssize_t (*show)(struct _cpuid4_info *, char *);
643 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
646 #define define_one_ro(_name) \
647 static struct _cache_attr _name = \
648 __ATTR(_name, 0444, show_##_name, NULL)
650 define_one_ro(level);
651 define_one_ro(type);
652 define_one_ro(coherency_line_size);
653 define_one_ro(physical_line_partition);
654 define_one_ro(ways_of_associativity);
655 define_one_ro(number_of_sets);
656 define_one_ro(size);
657 define_one_ro(shared_cpu_map);
658 define_one_ro(shared_cpu_list);
660 static struct attribute * default_attrs[] = {
661 &type.attr,
662 &level.attr,
663 &coherency_line_size.attr,
664 &physical_line_partition.attr,
665 &ways_of_associativity.attr,
666 &number_of_sets.attr,
667 &size.attr,
668 &shared_cpu_map.attr,
669 &shared_cpu_list.attr,
670 NULL
673 #define to_object(k) container_of(k, struct _index_kobject, kobj)
674 #define to_attr(a) container_of(a, struct _cache_attr, attr)
676 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
678 struct _cache_attr *fattr = to_attr(attr);
679 struct _index_kobject *this_leaf = to_object(kobj);
680 ssize_t ret;
682 ret = fattr->show ?
683 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
684 buf) :
686 return ret;
689 static ssize_t store(struct kobject * kobj, struct attribute * attr,
690 const char * buf, size_t count)
692 return 0;
695 static struct sysfs_ops sysfs_ops = {
696 .show = show,
697 .store = store,
700 static struct kobj_type ktype_cache = {
701 .sysfs_ops = &sysfs_ops,
702 .default_attrs = default_attrs,
705 static struct kobj_type ktype_percpu_entry = {
706 .sysfs_ops = &sysfs_ops,
709 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
711 kfree(per_cpu(cache_kobject, cpu));
712 kfree(per_cpu(index_kobject, cpu));
713 per_cpu(cache_kobject, cpu) = NULL;
714 per_cpu(index_kobject, cpu) = NULL;
715 free_cache_attributes(cpu);
718 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
720 int err;
722 if (num_cache_leaves == 0)
723 return -ENOENT;
725 err = detect_cache_attributes(cpu);
726 if (err)
727 return err;
729 /* Allocate all required memory */
730 per_cpu(cache_kobject, cpu) =
731 kzalloc(sizeof(struct kobject), GFP_KERNEL);
732 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
733 goto err_out;
735 per_cpu(index_kobject, cpu) = kzalloc(
736 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
737 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
738 goto err_out;
740 return 0;
742 err_out:
743 cpuid4_cache_sysfs_exit(cpu);
744 return -ENOMEM;
747 static cpumask_t cache_dev_map = CPU_MASK_NONE;
749 /* Add/Remove cache interface for CPU device */
750 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
752 unsigned int cpu = sys_dev->id;
753 unsigned long i, j;
754 struct _index_kobject *this_object;
755 int retval;
757 retval = cpuid4_cache_sysfs_init(cpu);
758 if (unlikely(retval < 0))
759 return retval;
761 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
762 &ktype_percpu_entry,
763 &sys_dev->kobj, "%s", "cache");
764 if (retval < 0) {
765 cpuid4_cache_sysfs_exit(cpu);
766 return retval;
769 for (i = 0; i < num_cache_leaves; i++) {
770 this_object = INDEX_KOBJECT_PTR(cpu,i);
771 this_object->cpu = cpu;
772 this_object->index = i;
773 retval = kobject_init_and_add(&(this_object->kobj),
774 &ktype_cache,
775 per_cpu(cache_kobject, cpu),
776 "index%1lu", i);
777 if (unlikely(retval)) {
778 for (j = 0; j < i; j++) {
779 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
781 kobject_put(per_cpu(cache_kobject, cpu));
782 cpuid4_cache_sysfs_exit(cpu);
783 return retval;
785 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
787 cpu_set(cpu, cache_dev_map);
789 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
790 return 0;
793 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
795 unsigned int cpu = sys_dev->id;
796 unsigned long i;
798 if (per_cpu(cpuid4_info, cpu) == NULL)
799 return;
800 if (!cpu_isset(cpu, cache_dev_map))
801 return;
802 cpu_clear(cpu, cache_dev_map);
804 for (i = 0; i < num_cache_leaves; i++)
805 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
806 kobject_put(per_cpu(cache_kobject, cpu));
807 cpuid4_cache_sysfs_exit(cpu);
810 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
811 unsigned long action, void *hcpu)
813 unsigned int cpu = (unsigned long)hcpu;
814 struct sys_device *sys_dev;
816 sys_dev = get_cpu_sysdev(cpu);
817 switch (action) {
818 case CPU_ONLINE:
819 case CPU_ONLINE_FROZEN:
820 cache_add_dev(sys_dev);
821 break;
822 case CPU_DEAD:
823 case CPU_DEAD_FROZEN:
824 cache_remove_dev(sys_dev);
825 break;
827 return NOTIFY_OK;
830 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
832 .notifier_call = cacheinfo_cpu_callback,
835 static int __cpuinit cache_sysfs_init(void)
837 int i;
839 if (num_cache_leaves == 0)
840 return 0;
842 for_each_online_cpu(i) {
843 int err;
844 struct sys_device *sys_dev = get_cpu_sysdev(i);
846 err = cache_add_dev(sys_dev);
847 if (err)
848 return err;
850 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
851 return 0;
854 device_initcall(cache_sysfs_init);
856 #endif