x86: convert cpu_llc_id to be a per cpu variable
[linux-2.6/linux-loongson.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
blob35c7ebb574231e4f311cc9faf65b6358780396ba
1 /*
2 * Routines to indentify caches on Intel CPU.
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
17 #include <asm/processor.h>
18 #include <asm/smp.h>
20 #define LVL_1_INST 1
21 #define LVL_1_DATA 2
22 #define LVL_2 3
23 #define LVL_3 4
24 #define LVL_TRACE 5
26 struct _cache_table
28 unsigned char descriptor;
29 char cache_type;
30 short size;
33 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
34 static struct _cache_table cache_table[] __cpuinitdata =
36 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
37 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
38 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
39 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
40 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
41 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
42 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
45 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
48 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
49 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
50 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
51 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
52 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
53 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
54 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
55 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
56 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
57 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
58 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
59 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
60 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
61 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
64 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
65 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
66 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
67 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
68 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
69 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
70 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
71 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
72 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
73 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
74 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
75 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
76 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
78 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
79 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
80 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
81 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
82 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
83 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
84 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
85 { 0x00, 0, 0}
89 enum _cache_type
91 CACHE_TYPE_NULL = 0,
92 CACHE_TYPE_DATA = 1,
93 CACHE_TYPE_INST = 2,
94 CACHE_TYPE_UNIFIED = 3
97 union _cpuid4_leaf_eax {
98 struct {
99 enum _cache_type type:5;
100 unsigned int level:3;
101 unsigned int is_self_initializing:1;
102 unsigned int is_fully_associative:1;
103 unsigned int reserved:4;
104 unsigned int num_threads_sharing:12;
105 unsigned int num_cores_on_die:6;
106 } split;
107 u32 full;
110 union _cpuid4_leaf_ebx {
111 struct {
112 unsigned int coherency_line_size:12;
113 unsigned int physical_line_partition:10;
114 unsigned int ways_of_associativity:10;
115 } split;
116 u32 full;
119 union _cpuid4_leaf_ecx {
120 struct {
121 unsigned int number_of_sets:32;
122 } split;
123 u32 full;
126 struct _cpuid4_info {
127 union _cpuid4_leaf_eax eax;
128 union _cpuid4_leaf_ebx ebx;
129 union _cpuid4_leaf_ecx ecx;
130 unsigned long size;
131 cpumask_t shared_cpu_map;
134 unsigned short num_cache_leaves;
136 /* AMD doesn't have CPUID4. Emulate it here to report the same
137 information to the user. This makes some assumptions about the machine:
138 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
140 In theory the TLBs could be reported as fake type (they are in "dummy").
141 Maybe later */
142 union l1_cache {
143 struct {
144 unsigned line_size : 8;
145 unsigned lines_per_tag : 8;
146 unsigned assoc : 8;
147 unsigned size_in_kb : 8;
149 unsigned val;
152 union l2_cache {
153 struct {
154 unsigned line_size : 8;
155 unsigned lines_per_tag : 4;
156 unsigned assoc : 4;
157 unsigned size_in_kb : 16;
159 unsigned val;
162 union l3_cache {
163 struct {
164 unsigned line_size : 8;
165 unsigned lines_per_tag : 4;
166 unsigned assoc : 4;
167 unsigned res : 2;
168 unsigned size_encoded : 14;
170 unsigned val;
173 static unsigned short assocs[] __cpuinitdata = {
174 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
175 [8] = 16, [0xa] = 32, [0xb] = 48,
176 [0xc] = 64,
177 [0xf] = 0xffff // ??
180 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
181 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
183 static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
184 union _cpuid4_leaf_ebx *ebx,
185 union _cpuid4_leaf_ecx *ecx)
187 unsigned dummy;
188 unsigned line_size, lines_per_tag, assoc, size_in_kb;
189 union l1_cache l1i, l1d;
190 union l2_cache l2;
191 union l3_cache l3;
192 union l1_cache *l1 = &l1d;
194 eax->full = 0;
195 ebx->full = 0;
196 ecx->full = 0;
198 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
199 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
201 switch (leaf) {
202 case 1:
203 l1 = &l1i;
204 case 0:
205 if (!l1->val)
206 return;
207 assoc = l1->assoc;
208 line_size = l1->line_size;
209 lines_per_tag = l1->lines_per_tag;
210 size_in_kb = l1->size_in_kb;
211 break;
212 case 2:
213 if (!l2.val)
214 return;
215 assoc = l2.assoc;
216 line_size = l2.line_size;
217 lines_per_tag = l2.lines_per_tag;
218 /* cpu_data has errata corrections for K7 applied */
219 size_in_kb = current_cpu_data.x86_cache_size;
220 break;
221 case 3:
222 if (!l3.val)
223 return;
224 assoc = l3.assoc;
225 line_size = l3.line_size;
226 lines_per_tag = l3.lines_per_tag;
227 size_in_kb = l3.size_encoded * 512;
228 break;
229 default:
230 return;
233 eax->split.is_self_initializing = 1;
234 eax->split.type = types[leaf];
235 eax->split.level = levels[leaf];
236 if (leaf == 3)
237 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
238 else
239 eax->split.num_threads_sharing = 0;
240 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
243 if (assoc == 0xf)
244 eax->split.is_fully_associative = 1;
245 ebx->split.coherency_line_size = line_size - 1;
246 ebx->split.ways_of_associativity = assocs[assoc] - 1;
247 ebx->split.physical_line_partition = lines_per_tag - 1;
248 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
249 (ebx->split.ways_of_associativity + 1) - 1;
252 static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
254 union _cpuid4_leaf_eax eax;
255 union _cpuid4_leaf_ebx ebx;
256 union _cpuid4_leaf_ecx ecx;
257 unsigned edx;
259 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
260 amd_cpuid4(index, &eax, &ebx, &ecx);
261 else
262 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
263 if (eax.split.type == CACHE_TYPE_NULL)
264 return -EIO; /* better error ? */
266 this_leaf->eax = eax;
267 this_leaf->ebx = ebx;
268 this_leaf->ecx = ecx;
269 this_leaf->size = (ecx.split.number_of_sets + 1) *
270 (ebx.split.coherency_line_size + 1) *
271 (ebx.split.physical_line_partition + 1) *
272 (ebx.split.ways_of_associativity + 1);
273 return 0;
276 static int __cpuinit find_num_cache_leaves(void)
278 unsigned int eax, ebx, ecx, edx;
279 union _cpuid4_leaf_eax cache_eax;
280 int i = -1;
282 do {
283 ++i;
284 /* Do cpuid(4) loop to find out num_cache_leaves */
285 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
286 cache_eax.full = eax;
287 } while (cache_eax.split.type != CACHE_TYPE_NULL);
288 return i;
291 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
293 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
294 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
295 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
296 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
297 #ifdef CONFIG_X86_HT
298 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
299 #endif
301 if (c->cpuid_level > 3) {
302 static int is_initialized;
304 if (is_initialized == 0) {
305 /* Init num_cache_leaves from boot CPU */
306 num_cache_leaves = find_num_cache_leaves();
307 is_initialized++;
311 * Whenever possible use cpuid(4), deterministic cache
312 * parameters cpuid leaf to find the cache details
314 for (i = 0; i < num_cache_leaves; i++) {
315 struct _cpuid4_info this_leaf;
317 int retval;
319 retval = cpuid4_cache_lookup(i, &this_leaf);
320 if (retval >= 0) {
321 switch(this_leaf.eax.split.level) {
322 case 1:
323 if (this_leaf.eax.split.type ==
324 CACHE_TYPE_DATA)
325 new_l1d = this_leaf.size/1024;
326 else if (this_leaf.eax.split.type ==
327 CACHE_TYPE_INST)
328 new_l1i = this_leaf.size/1024;
329 break;
330 case 2:
331 new_l2 = this_leaf.size/1024;
332 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
333 index_msb = get_count_order(num_threads_sharing);
334 l2_id = c->apicid >> index_msb;
335 break;
336 case 3:
337 new_l3 = this_leaf.size/1024;
338 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
339 index_msb = get_count_order(num_threads_sharing);
340 l3_id = c->apicid >> index_msb;
341 break;
342 default:
343 break;
349 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
350 * trace cache
352 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
353 /* supports eax=2 call */
354 int i, j, n;
355 int regs[4];
356 unsigned char *dp = (unsigned char *)regs;
357 int only_trace = 0;
359 if (num_cache_leaves != 0 && c->x86 == 15)
360 only_trace = 1;
362 /* Number of times to iterate */
363 n = cpuid_eax(2) & 0xFF;
365 for ( i = 0 ; i < n ; i++ ) {
366 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
368 /* If bit 31 is set, this is an unknown format */
369 for ( j = 0 ; j < 3 ; j++ ) {
370 if ( regs[j] < 0 ) regs[j] = 0;
373 /* Byte 0 is level count, not a descriptor */
374 for ( j = 1 ; j < 16 ; j++ ) {
375 unsigned char des = dp[j];
376 unsigned char k = 0;
378 /* look up this descriptor in the table */
379 while (cache_table[k].descriptor != 0)
381 if (cache_table[k].descriptor == des) {
382 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
383 break;
384 switch (cache_table[k].cache_type) {
385 case LVL_1_INST:
386 l1i += cache_table[k].size;
387 break;
388 case LVL_1_DATA:
389 l1d += cache_table[k].size;
390 break;
391 case LVL_2:
392 l2 += cache_table[k].size;
393 break;
394 case LVL_3:
395 l3 += cache_table[k].size;
396 break;
397 case LVL_TRACE:
398 trace += cache_table[k].size;
399 break;
402 break;
405 k++;
411 if (new_l1d)
412 l1d = new_l1d;
414 if (new_l1i)
415 l1i = new_l1i;
417 if (new_l2) {
418 l2 = new_l2;
419 #ifdef CONFIG_X86_HT
420 per_cpu(cpu_llc_id, cpu) = l2_id;
421 #endif
424 if (new_l3) {
425 l3 = new_l3;
426 #ifdef CONFIG_X86_HT
427 per_cpu(cpu_llc_id, cpu) = l3_id;
428 #endif
431 if (trace)
432 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
433 else if ( l1i )
434 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
436 if (l1d)
437 printk(", L1 D cache: %dK\n", l1d);
438 else
439 printk("\n");
441 if (l2)
442 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
444 if (l3)
445 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
447 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
449 return l2;
452 /* pointer to _cpuid4_info array (for each cache leaf) */
453 static struct _cpuid4_info *cpuid4_info[NR_CPUS];
454 #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
456 #ifdef CONFIG_SMP
457 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
459 struct _cpuid4_info *this_leaf, *sibling_leaf;
460 unsigned long num_threads_sharing;
461 int index_msb, i;
462 struct cpuinfo_x86 *c = cpu_data;
464 this_leaf = CPUID4_INFO_IDX(cpu, index);
465 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
467 if (num_threads_sharing == 1)
468 cpu_set(cpu, this_leaf->shared_cpu_map);
469 else {
470 index_msb = get_count_order(num_threads_sharing);
472 for_each_online_cpu(i) {
473 if (c[i].apicid >> index_msb ==
474 c[cpu].apicid >> index_msb) {
475 cpu_set(i, this_leaf->shared_cpu_map);
476 if (i != cpu && cpuid4_info[i]) {
477 sibling_leaf = CPUID4_INFO_IDX(i, index);
478 cpu_set(cpu, sibling_leaf->shared_cpu_map);
484 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
486 struct _cpuid4_info *this_leaf, *sibling_leaf;
487 int sibling;
489 this_leaf = CPUID4_INFO_IDX(cpu, index);
490 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
491 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
492 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
495 #else
496 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
497 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
498 #endif
500 static void free_cache_attributes(unsigned int cpu)
502 int i;
504 for (i = 0; i < num_cache_leaves; i++)
505 cache_remove_shared_cpu_map(cpu, i);
507 kfree(cpuid4_info[cpu]);
508 cpuid4_info[cpu] = NULL;
511 static int __cpuinit detect_cache_attributes(unsigned int cpu)
513 struct _cpuid4_info *this_leaf;
514 unsigned long j;
515 int retval;
516 cpumask_t oldmask;
518 if (num_cache_leaves == 0)
519 return -ENOENT;
521 cpuid4_info[cpu] = kzalloc(
522 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
523 if (cpuid4_info[cpu] == NULL)
524 return -ENOMEM;
526 oldmask = current->cpus_allowed;
527 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
528 if (retval)
529 goto out;
531 /* Do cpuid and store the results */
532 for (j = 0; j < num_cache_leaves; j++) {
533 this_leaf = CPUID4_INFO_IDX(cpu, j);
534 retval = cpuid4_cache_lookup(j, this_leaf);
535 if (unlikely(retval < 0)) {
536 int i;
538 for (i = 0; i < j; i++)
539 cache_remove_shared_cpu_map(cpu, i);
540 break;
542 cache_shared_cpu_map_setup(cpu, j);
544 set_cpus_allowed(current, oldmask);
546 out:
547 if (retval) {
548 kfree(cpuid4_info[cpu]);
549 cpuid4_info[cpu] = NULL;
552 return retval;
555 #ifdef CONFIG_SYSFS
557 #include <linux/kobject.h>
558 #include <linux/sysfs.h>
560 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
562 /* pointer to kobject for cpuX/cache */
563 static struct kobject * cache_kobject[NR_CPUS];
565 struct _index_kobject {
566 struct kobject kobj;
567 unsigned int cpu;
568 unsigned short index;
571 /* pointer to array of kobjects for cpuX/cache/indexY */
572 static struct _index_kobject *index_kobject[NR_CPUS];
573 #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
575 #define show_one_plus(file_name, object, val) \
576 static ssize_t show_##file_name \
577 (struct _cpuid4_info *this_leaf, char *buf) \
579 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
582 show_one_plus(level, eax.split.level, 0);
583 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
584 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
585 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
586 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
588 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
590 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
593 static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
595 char mask_str[NR_CPUS];
596 cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
597 return sprintf(buf, "%s\n", mask_str);
600 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
601 switch(this_leaf->eax.split.type) {
602 case CACHE_TYPE_DATA:
603 return sprintf(buf, "Data\n");
604 break;
605 case CACHE_TYPE_INST:
606 return sprintf(buf, "Instruction\n");
607 break;
608 case CACHE_TYPE_UNIFIED:
609 return sprintf(buf, "Unified\n");
610 break;
611 default:
612 return sprintf(buf, "Unknown\n");
613 break;
617 struct _cache_attr {
618 struct attribute attr;
619 ssize_t (*show)(struct _cpuid4_info *, char *);
620 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
623 #define define_one_ro(_name) \
624 static struct _cache_attr _name = \
625 __ATTR(_name, 0444, show_##_name, NULL)
627 define_one_ro(level);
628 define_one_ro(type);
629 define_one_ro(coherency_line_size);
630 define_one_ro(physical_line_partition);
631 define_one_ro(ways_of_associativity);
632 define_one_ro(number_of_sets);
633 define_one_ro(size);
634 define_one_ro(shared_cpu_map);
636 static struct attribute * default_attrs[] = {
637 &type.attr,
638 &level.attr,
639 &coherency_line_size.attr,
640 &physical_line_partition.attr,
641 &ways_of_associativity.attr,
642 &number_of_sets.attr,
643 &size.attr,
644 &shared_cpu_map.attr,
645 NULL
648 #define to_object(k) container_of(k, struct _index_kobject, kobj)
649 #define to_attr(a) container_of(a, struct _cache_attr, attr)
651 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
653 struct _cache_attr *fattr = to_attr(attr);
654 struct _index_kobject *this_leaf = to_object(kobj);
655 ssize_t ret;
657 ret = fattr->show ?
658 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
659 buf) :
661 return ret;
664 static ssize_t store(struct kobject * kobj, struct attribute * attr,
665 const char * buf, size_t count)
667 return 0;
670 static struct sysfs_ops sysfs_ops = {
671 .show = show,
672 .store = store,
675 static struct kobj_type ktype_cache = {
676 .sysfs_ops = &sysfs_ops,
677 .default_attrs = default_attrs,
680 static struct kobj_type ktype_percpu_entry = {
681 .sysfs_ops = &sysfs_ops,
684 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
686 kfree(cache_kobject[cpu]);
687 kfree(index_kobject[cpu]);
688 cache_kobject[cpu] = NULL;
689 index_kobject[cpu] = NULL;
690 free_cache_attributes(cpu);
693 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
695 int err;
697 if (num_cache_leaves == 0)
698 return -ENOENT;
700 err = detect_cache_attributes(cpu);
701 if (err)
702 return err;
704 /* Allocate all required memory */
705 cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
706 if (unlikely(cache_kobject[cpu] == NULL))
707 goto err_out;
709 index_kobject[cpu] = kzalloc(
710 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
711 if (unlikely(index_kobject[cpu] == NULL))
712 goto err_out;
714 return 0;
716 err_out:
717 cpuid4_cache_sysfs_exit(cpu);
718 return -ENOMEM;
721 static cpumask_t cache_dev_map = CPU_MASK_NONE;
723 /* Add/Remove cache interface for CPU device */
724 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
726 unsigned int cpu = sys_dev->id;
727 unsigned long i, j;
728 struct _index_kobject *this_object;
729 int retval;
731 retval = cpuid4_cache_sysfs_init(cpu);
732 if (unlikely(retval < 0))
733 return retval;
735 cache_kobject[cpu]->parent = &sys_dev->kobj;
736 kobject_set_name(cache_kobject[cpu], "%s", "cache");
737 cache_kobject[cpu]->ktype = &ktype_percpu_entry;
738 retval = kobject_register(cache_kobject[cpu]);
739 if (retval < 0) {
740 cpuid4_cache_sysfs_exit(cpu);
741 return retval;
744 for (i = 0; i < num_cache_leaves; i++) {
745 this_object = INDEX_KOBJECT_PTR(cpu,i);
746 this_object->cpu = cpu;
747 this_object->index = i;
748 this_object->kobj.parent = cache_kobject[cpu];
749 kobject_set_name(&(this_object->kobj), "index%1lu", i);
750 this_object->kobj.ktype = &ktype_cache;
751 retval = kobject_register(&(this_object->kobj));
752 if (unlikely(retval)) {
753 for (j = 0; j < i; j++) {
754 kobject_unregister(
755 &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
757 kobject_unregister(cache_kobject[cpu]);
758 cpuid4_cache_sysfs_exit(cpu);
759 break;
762 if (!retval)
763 cpu_set(cpu, cache_dev_map);
765 return retval;
768 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
770 unsigned int cpu = sys_dev->id;
771 unsigned long i;
773 if (cpuid4_info[cpu] == NULL)
774 return;
775 if (!cpu_isset(cpu, cache_dev_map))
776 return;
777 cpu_clear(cpu, cache_dev_map);
779 for (i = 0; i < num_cache_leaves; i++)
780 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
781 kobject_unregister(cache_kobject[cpu]);
782 cpuid4_cache_sysfs_exit(cpu);
785 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
786 unsigned long action, void *hcpu)
788 unsigned int cpu = (unsigned long)hcpu;
789 struct sys_device *sys_dev;
791 sys_dev = get_cpu_sysdev(cpu);
792 switch (action) {
793 case CPU_ONLINE:
794 case CPU_ONLINE_FROZEN:
795 cache_add_dev(sys_dev);
796 break;
797 case CPU_DEAD:
798 case CPU_DEAD_FROZEN:
799 cache_remove_dev(sys_dev);
800 break;
802 return NOTIFY_OK;
805 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
807 .notifier_call = cacheinfo_cpu_callback,
810 static int __cpuinit cache_sysfs_init(void)
812 int i;
814 if (num_cache_leaves == 0)
815 return 0;
817 for_each_online_cpu(i) {
818 int err;
819 struct sys_device *sys_dev = get_cpu_sysdev(i);
821 err = cache_add_dev(sys_dev);
822 if (err)
823 return err;
825 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
826 return 0;
829 device_initcall(cache_sysfs_init);
831 #endif