Merge branch 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
blob650d40f7912bea81023dc7f9698b8b10ced4d6fa
1 /*
2 * Routines to indentify caches on Intel CPU.
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
17 #include <asm/processor.h>
18 #include <asm/smp.h>
20 #define LVL_1_INST 1
21 #define LVL_1_DATA 2
22 #define LVL_2 3
23 #define LVL_3 4
24 #define LVL_TRACE 5
26 struct _cache_table
28 unsigned char descriptor;
29 char cache_type;
30 short size;
33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
34 static struct _cache_table cache_table[] __cpuinitdata =
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
53 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
54 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
55 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
56 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
57 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
58 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
59 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
60 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
61 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
62 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
63 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
64 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
65 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
66 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
67 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
71 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
72 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
73 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
74 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
75 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
80 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
81 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
82 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
83 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
84 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
85 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
86 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
87 { 0x00, 0, 0}
91 enum _cache_type
93 CACHE_TYPE_NULL = 0,
94 CACHE_TYPE_DATA = 1,
95 CACHE_TYPE_INST = 2,
96 CACHE_TYPE_UNIFIED = 3
99 union _cpuid4_leaf_eax {
100 struct {
101 enum _cache_type type:5;
102 unsigned int level:3;
103 unsigned int is_self_initializing:1;
104 unsigned int is_fully_associative:1;
105 unsigned int reserved:4;
106 unsigned int num_threads_sharing:12;
107 unsigned int num_cores_on_die:6;
108 } split;
109 u32 full;
112 union _cpuid4_leaf_ebx {
113 struct {
114 unsigned int coherency_line_size:12;
115 unsigned int physical_line_partition:10;
116 unsigned int ways_of_associativity:10;
117 } split;
118 u32 full;
121 union _cpuid4_leaf_ecx {
122 struct {
123 unsigned int number_of_sets:32;
124 } split;
125 u32 full;
128 struct _cpuid4_info {
129 union _cpuid4_leaf_eax eax;
130 union _cpuid4_leaf_ebx ebx;
131 union _cpuid4_leaf_ecx ecx;
132 unsigned long size;
133 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
136 unsigned short num_cache_leaves;
138 /* AMD doesn't have CPUID4. Emulate it here to report the same
139 information to the user. This makes some assumptions about the machine:
140 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
142 In theory the TLBs could be reported as fake type (they are in "dummy").
143 Maybe later */
144 union l1_cache {
145 struct {
146 unsigned line_size : 8;
147 unsigned lines_per_tag : 8;
148 unsigned assoc : 8;
149 unsigned size_in_kb : 8;
151 unsigned val;
154 union l2_cache {
155 struct {
156 unsigned line_size : 8;
157 unsigned lines_per_tag : 4;
158 unsigned assoc : 4;
159 unsigned size_in_kb : 16;
161 unsigned val;
164 union l3_cache {
165 struct {
166 unsigned line_size : 8;
167 unsigned lines_per_tag : 4;
168 unsigned assoc : 4;
169 unsigned res : 2;
170 unsigned size_encoded : 14;
172 unsigned val;
175 static unsigned short assocs[] __cpuinitdata = {
176 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
177 [8] = 16, [0xa] = 32, [0xb] = 48,
178 [0xc] = 64,
179 [0xf] = 0xffff // ??
182 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
183 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
185 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
186 union _cpuid4_leaf_ebx *ebx,
187 union _cpuid4_leaf_ecx *ecx)
189 unsigned dummy;
190 unsigned line_size, lines_per_tag, assoc, size_in_kb;
191 union l1_cache l1i, l1d;
192 union l2_cache l2;
193 union l3_cache l3;
194 union l1_cache *l1 = &l1d;
196 eax->full = 0;
197 ebx->full = 0;
198 ecx->full = 0;
200 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
201 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
203 switch (leaf) {
204 case 1:
205 l1 = &l1i;
206 case 0:
207 if (!l1->val)
208 return;
209 assoc = l1->assoc;
210 line_size = l1->line_size;
211 lines_per_tag = l1->lines_per_tag;
212 size_in_kb = l1->size_in_kb;
213 break;
214 case 2:
215 if (!l2.val)
216 return;
217 assoc = l2.assoc;
218 line_size = l2.line_size;
219 lines_per_tag = l2.lines_per_tag;
220 /* cpu_data has errata corrections for K7 applied */
221 size_in_kb = current_cpu_data.x86_cache_size;
222 break;
223 case 3:
224 if (!l3.val)
225 return;
226 assoc = l3.assoc;
227 line_size = l3.line_size;
228 lines_per_tag = l3.lines_per_tag;
229 size_in_kb = l3.size_encoded * 512;
230 break;
231 default:
232 return;
235 eax->split.is_self_initializing = 1;
236 eax->split.type = types[leaf];
237 eax->split.level = levels[leaf];
238 if (leaf == 3)
239 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
240 else
241 eax->split.num_threads_sharing = 0;
242 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
245 if (assoc == 0xf)
246 eax->split.is_fully_associative = 1;
247 ebx->split.coherency_line_size = line_size - 1;
248 ebx->split.ways_of_associativity = assocs[assoc] - 1;
249 ebx->split.physical_line_partition = lines_per_tag - 1;
250 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
251 (ebx->split.ways_of_associativity + 1) - 1;
254 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
256 union _cpuid4_leaf_eax eax;
257 union _cpuid4_leaf_ebx ebx;
258 union _cpuid4_leaf_ecx ecx;
259 unsigned edx;
261 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
262 amd_cpuid4(index, &eax, &ebx, &ecx);
263 else
264 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
265 if (eax.split.type == CACHE_TYPE_NULL)
266 return -EIO; /* better error ? */
268 this_leaf->eax = eax;
269 this_leaf->ebx = ebx;
270 this_leaf->ecx = ecx;
271 this_leaf->size = (ecx.split.number_of_sets + 1) *
272 (ebx.split.coherency_line_size + 1) *
273 (ebx.split.physical_line_partition + 1) *
274 (ebx.split.ways_of_associativity + 1);
275 return 0;
278 static int __cpuinit find_num_cache_leaves(void)
280 unsigned int eax, ebx, ecx, edx;
281 union _cpuid4_leaf_eax cache_eax;
282 int i = -1;
284 do {
285 ++i;
286 /* Do cpuid(4) loop to find out num_cache_leaves */
287 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
288 cache_eax.full = eax;
289 } while (cache_eax.split.type != CACHE_TYPE_NULL);
290 return i;
293 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
295 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
296 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
297 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
298 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
299 #ifdef CONFIG_X86_HT
300 unsigned int cpu = c->cpu_index;
301 #endif
303 if (c->cpuid_level > 3) {
304 static int is_initialized;
306 if (is_initialized == 0) {
307 /* Init num_cache_leaves from boot CPU */
308 num_cache_leaves = find_num_cache_leaves();
309 is_initialized++;
313 * Whenever possible use cpuid(4), deterministic cache
314 * parameters cpuid leaf to find the cache details
316 for (i = 0; i < num_cache_leaves; i++) {
317 struct _cpuid4_info this_leaf;
319 int retval;
321 retval = cpuid4_cache_lookup(i, &this_leaf);
322 if (retval >= 0) {
323 switch(this_leaf.eax.split.level) {
324 case 1:
325 if (this_leaf.eax.split.type ==
326 CACHE_TYPE_DATA)
327 new_l1d = this_leaf.size/1024;
328 else if (this_leaf.eax.split.type ==
329 CACHE_TYPE_INST)
330 new_l1i = this_leaf.size/1024;
331 break;
332 case 2:
333 new_l2 = this_leaf.size/1024;
334 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
335 index_msb = get_count_order(num_threads_sharing);
336 l2_id = c->apicid >> index_msb;
337 break;
338 case 3:
339 new_l3 = this_leaf.size/1024;
340 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
341 index_msb = get_count_order(num_threads_sharing);
342 l3_id = c->apicid >> index_msb;
343 break;
344 default:
345 break;
351 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
352 * trace cache
354 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
355 /* supports eax=2 call */
356 int j, n;
357 unsigned int regs[4];
358 unsigned char *dp = (unsigned char *)regs;
359 int only_trace = 0;
361 if (num_cache_leaves != 0 && c->x86 == 15)
362 only_trace = 1;
364 /* Number of times to iterate */
365 n = cpuid_eax(2) & 0xFF;
367 for ( i = 0 ; i < n ; i++ ) {
368 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
370 /* If bit 31 is set, this is an unknown format */
371 for ( j = 0 ; j < 3 ; j++ ) {
372 if (regs[j] & (1 << 31)) regs[j] = 0;
375 /* Byte 0 is level count, not a descriptor */
376 for ( j = 1 ; j < 16 ; j++ ) {
377 unsigned char des = dp[j];
378 unsigned char k = 0;
380 /* look up this descriptor in the table */
381 while (cache_table[k].descriptor != 0)
383 if (cache_table[k].descriptor == des) {
384 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
385 break;
386 switch (cache_table[k].cache_type) {
387 case LVL_1_INST:
388 l1i += cache_table[k].size;
389 break;
390 case LVL_1_DATA:
391 l1d += cache_table[k].size;
392 break;
393 case LVL_2:
394 l2 += cache_table[k].size;
395 break;
396 case LVL_3:
397 l3 += cache_table[k].size;
398 break;
399 case LVL_TRACE:
400 trace += cache_table[k].size;
401 break;
404 break;
407 k++;
413 if (new_l1d)
414 l1d = new_l1d;
416 if (new_l1i)
417 l1i = new_l1i;
419 if (new_l2) {
420 l2 = new_l2;
421 #ifdef CONFIG_X86_HT
422 per_cpu(cpu_llc_id, cpu) = l2_id;
423 #endif
426 if (new_l3) {
427 l3 = new_l3;
428 #ifdef CONFIG_X86_HT
429 per_cpu(cpu_llc_id, cpu) = l3_id;
430 #endif
433 if (trace)
434 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
435 else if ( l1i )
436 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
438 if (l1d)
439 printk(", L1 D cache: %dK\n", l1d);
440 else
441 printk("\n");
443 if (l2)
444 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
446 if (l3)
447 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
449 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
451 return l2;
454 /* pointer to _cpuid4_info array (for each cache leaf) */
455 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
456 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
458 #ifdef CONFIG_SMP
459 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
461 struct _cpuid4_info *this_leaf, *sibling_leaf;
462 unsigned long num_threads_sharing;
463 int index_msb, i;
464 struct cpuinfo_x86 *c = &cpu_data(cpu);
466 this_leaf = CPUID4_INFO_IDX(cpu, index);
467 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
469 if (num_threads_sharing == 1)
470 cpu_set(cpu, this_leaf->shared_cpu_map);
471 else {
472 index_msb = get_count_order(num_threads_sharing);
474 for_each_online_cpu(i) {
475 if (cpu_data(i).apicid >> index_msb ==
476 c->apicid >> index_msb) {
477 cpu_set(i, this_leaf->shared_cpu_map);
478 if (i != cpu && per_cpu(cpuid4_info, i)) {
479 sibling_leaf = CPUID4_INFO_IDX(i, index);
480 cpu_set(cpu, sibling_leaf->shared_cpu_map);
486 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
488 struct _cpuid4_info *this_leaf, *sibling_leaf;
489 int sibling;
491 this_leaf = CPUID4_INFO_IDX(cpu, index);
492 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
493 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
494 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
497 #else
498 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
499 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
500 #endif
502 static void __cpuinit free_cache_attributes(unsigned int cpu)
504 int i;
506 for (i = 0; i < num_cache_leaves; i++)
507 cache_remove_shared_cpu_map(cpu, i);
509 kfree(per_cpu(cpuid4_info, cpu));
510 per_cpu(cpuid4_info, cpu) = NULL;
513 static int __cpuinit detect_cache_attributes(unsigned int cpu)
515 struct _cpuid4_info *this_leaf;
516 unsigned long j;
517 int retval;
518 cpumask_t oldmask;
519 cpumask_of_cpu_ptr(newmask, cpu);
521 if (num_cache_leaves == 0)
522 return -ENOENT;
524 per_cpu(cpuid4_info, cpu) = kzalloc(
525 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
526 if (per_cpu(cpuid4_info, cpu) == NULL)
527 return -ENOMEM;
529 oldmask = current->cpus_allowed;
530 retval = set_cpus_allowed_ptr(current, newmask);
531 if (retval)
532 goto out;
534 /* Do cpuid and store the results */
535 for (j = 0; j < num_cache_leaves; j++) {
536 this_leaf = CPUID4_INFO_IDX(cpu, j);
537 retval = cpuid4_cache_lookup(j, this_leaf);
538 if (unlikely(retval < 0)) {
539 int i;
541 for (i = 0; i < j; i++)
542 cache_remove_shared_cpu_map(cpu, i);
543 break;
545 cache_shared_cpu_map_setup(cpu, j);
547 set_cpus_allowed_ptr(current, &oldmask);
549 out:
550 if (retval) {
551 kfree(per_cpu(cpuid4_info, cpu));
552 per_cpu(cpuid4_info, cpu) = NULL;
555 return retval;
558 #ifdef CONFIG_SYSFS
560 #include <linux/kobject.h>
561 #include <linux/sysfs.h>
563 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
565 /* pointer to kobject for cpuX/cache */
566 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
568 struct _index_kobject {
569 struct kobject kobj;
570 unsigned int cpu;
571 unsigned short index;
574 /* pointer to array of kobjects for cpuX/cache/indexY */
575 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
576 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
578 #define show_one_plus(file_name, object, val) \
579 static ssize_t show_##file_name \
580 (struct _cpuid4_info *this_leaf, char *buf) \
582 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
585 show_one_plus(level, eax.split.level, 0);
586 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
587 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
588 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
589 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
591 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
593 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
596 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
597 int type, char *buf)
599 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
600 int n = 0;
602 if (len > 1) {
603 cpumask_t *mask = &this_leaf->shared_cpu_map;
605 n = type?
606 cpulist_scnprintf(buf, len-2, *mask):
607 cpumask_scnprintf(buf, len-2, *mask);
608 buf[n++] = '\n';
609 buf[n] = '\0';
611 return n;
614 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
616 return show_shared_cpu_map_func(leaf, 0, buf);
619 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
621 return show_shared_cpu_map_func(leaf, 1, buf);
624 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
625 switch(this_leaf->eax.split.type) {
626 case CACHE_TYPE_DATA:
627 return sprintf(buf, "Data\n");
628 break;
629 case CACHE_TYPE_INST:
630 return sprintf(buf, "Instruction\n");
631 break;
632 case CACHE_TYPE_UNIFIED:
633 return sprintf(buf, "Unified\n");
634 break;
635 default:
636 return sprintf(buf, "Unknown\n");
637 break;
641 struct _cache_attr {
642 struct attribute attr;
643 ssize_t (*show)(struct _cpuid4_info *, char *);
644 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
647 #define define_one_ro(_name) \
648 static struct _cache_attr _name = \
649 __ATTR(_name, 0444, show_##_name, NULL)
651 define_one_ro(level);
652 define_one_ro(type);
653 define_one_ro(coherency_line_size);
654 define_one_ro(physical_line_partition);
655 define_one_ro(ways_of_associativity);
656 define_one_ro(number_of_sets);
657 define_one_ro(size);
658 define_one_ro(shared_cpu_map);
659 define_one_ro(shared_cpu_list);
661 static struct attribute * default_attrs[] = {
662 &type.attr,
663 &level.attr,
664 &coherency_line_size.attr,
665 &physical_line_partition.attr,
666 &ways_of_associativity.attr,
667 &number_of_sets.attr,
668 &size.attr,
669 &shared_cpu_map.attr,
670 &shared_cpu_list.attr,
671 NULL
674 #define to_object(k) container_of(k, struct _index_kobject, kobj)
675 #define to_attr(a) container_of(a, struct _cache_attr, attr)
677 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
679 struct _cache_attr *fattr = to_attr(attr);
680 struct _index_kobject *this_leaf = to_object(kobj);
681 ssize_t ret;
683 ret = fattr->show ?
684 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
685 buf) :
687 return ret;
690 static ssize_t store(struct kobject * kobj, struct attribute * attr,
691 const char * buf, size_t count)
693 return 0;
696 static struct sysfs_ops sysfs_ops = {
697 .show = show,
698 .store = store,
701 static struct kobj_type ktype_cache = {
702 .sysfs_ops = &sysfs_ops,
703 .default_attrs = default_attrs,
706 static struct kobj_type ktype_percpu_entry = {
707 .sysfs_ops = &sysfs_ops,
710 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
712 kfree(per_cpu(cache_kobject, cpu));
713 kfree(per_cpu(index_kobject, cpu));
714 per_cpu(cache_kobject, cpu) = NULL;
715 per_cpu(index_kobject, cpu) = NULL;
716 free_cache_attributes(cpu);
719 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
721 int err;
723 if (num_cache_leaves == 0)
724 return -ENOENT;
726 err = detect_cache_attributes(cpu);
727 if (err)
728 return err;
730 /* Allocate all required memory */
731 per_cpu(cache_kobject, cpu) =
732 kzalloc(sizeof(struct kobject), GFP_KERNEL);
733 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
734 goto err_out;
736 per_cpu(index_kobject, cpu) = kzalloc(
737 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
738 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
739 goto err_out;
741 return 0;
743 err_out:
744 cpuid4_cache_sysfs_exit(cpu);
745 return -ENOMEM;
748 static cpumask_t cache_dev_map = CPU_MASK_NONE;
750 /* Add/Remove cache interface for CPU device */
751 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
753 unsigned int cpu = sys_dev->id;
754 unsigned long i, j;
755 struct _index_kobject *this_object;
756 int retval;
758 retval = cpuid4_cache_sysfs_init(cpu);
759 if (unlikely(retval < 0))
760 return retval;
762 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
763 &ktype_percpu_entry,
764 &sys_dev->kobj, "%s", "cache");
765 if (retval < 0) {
766 cpuid4_cache_sysfs_exit(cpu);
767 return retval;
770 for (i = 0; i < num_cache_leaves; i++) {
771 this_object = INDEX_KOBJECT_PTR(cpu,i);
772 this_object->cpu = cpu;
773 this_object->index = i;
774 retval = kobject_init_and_add(&(this_object->kobj),
775 &ktype_cache,
776 per_cpu(cache_kobject, cpu),
777 "index%1lu", i);
778 if (unlikely(retval)) {
779 for (j = 0; j < i; j++) {
780 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
782 kobject_put(per_cpu(cache_kobject, cpu));
783 cpuid4_cache_sysfs_exit(cpu);
784 return retval;
786 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
788 cpu_set(cpu, cache_dev_map);
790 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
791 return 0;
794 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
796 unsigned int cpu = sys_dev->id;
797 unsigned long i;
799 if (per_cpu(cpuid4_info, cpu) == NULL)
800 return;
801 if (!cpu_isset(cpu, cache_dev_map))
802 return;
803 cpu_clear(cpu, cache_dev_map);
805 for (i = 0; i < num_cache_leaves; i++)
806 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
807 kobject_put(per_cpu(cache_kobject, cpu));
808 cpuid4_cache_sysfs_exit(cpu);
811 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
812 unsigned long action, void *hcpu)
814 unsigned int cpu = (unsigned long)hcpu;
815 struct sys_device *sys_dev;
817 sys_dev = get_cpu_sysdev(cpu);
818 switch (action) {
819 case CPU_ONLINE:
820 case CPU_ONLINE_FROZEN:
821 cache_add_dev(sys_dev);
822 break;
823 case CPU_DEAD:
824 case CPU_DEAD_FROZEN:
825 cache_remove_dev(sys_dev);
826 break;
828 return NOTIFY_OK;
831 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
833 .notifier_call = cacheinfo_cpu_callback,
836 static int __cpuinit cache_sysfs_init(void)
838 int i;
840 if (num_cache_leaves == 0)
841 return 0;
843 for_each_online_cpu(i) {
844 int err;
845 struct sys_device *sys_dev = get_cpu_sysdev(i);
847 err = cache_add_dev(sys_dev);
848 if (err)
849 return err;
851 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
852 return 0;
855 device_initcall(cache_sysfs_init);
857 #endif