Merge branch 'x86/urgent' into x86/apic
[linux-2.6/mini2440.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
blob7293508d8f5c82e4cd4f196c59e28ca332747bad
1 /*
2 * Routines to indentify caches on Intel CPU.
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <asm/smp.h>
21 #define LVL_1_INST 1
22 #define LVL_1_DATA 2
23 #define LVL_2 3
24 #define LVL_3 4
25 #define LVL_TRACE 5
27 struct _cache_table
29 unsigned char descriptor;
30 char cache_type;
31 short size;
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static struct _cache_table cache_table[] __cpuinitdata =
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
40 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
41 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
42 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
43 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
44 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
45 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
46 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
47 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
48 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
49 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
50 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
53 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
54 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
55 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
56 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
57 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
58 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
59 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
60 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
61 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
62 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
63 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
64 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
65 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
66 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
67 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
68 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
69 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
70 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
71 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
72 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
73 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
74 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
75 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
76 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
77 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
78 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
79 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
81 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
84 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
85 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
86 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
87 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
88 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
89 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
90 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
91 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
93 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
95 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
96 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
97 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
98 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
100 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
101 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
102 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
103 { 0x00, 0, 0}
107 enum _cache_type
109 CACHE_TYPE_NULL = 0,
110 CACHE_TYPE_DATA = 1,
111 CACHE_TYPE_INST = 2,
112 CACHE_TYPE_UNIFIED = 3
115 union _cpuid4_leaf_eax {
116 struct {
117 enum _cache_type type:5;
118 unsigned int level:3;
119 unsigned int is_self_initializing:1;
120 unsigned int is_fully_associative:1;
121 unsigned int reserved:4;
122 unsigned int num_threads_sharing:12;
123 unsigned int num_cores_on_die:6;
124 } split;
125 u32 full;
128 union _cpuid4_leaf_ebx {
129 struct {
130 unsigned int coherency_line_size:12;
131 unsigned int physical_line_partition:10;
132 unsigned int ways_of_associativity:10;
133 } split;
134 u32 full;
137 union _cpuid4_leaf_ecx {
138 struct {
139 unsigned int number_of_sets:32;
140 } split;
141 u32 full;
144 struct _cpuid4_info {
145 union _cpuid4_leaf_eax eax;
146 union _cpuid4_leaf_ebx ebx;
147 union _cpuid4_leaf_ecx ecx;
148 unsigned long size;
149 unsigned long can_disable;
150 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
153 /* subset of above _cpuid4_info w/o shared_cpu_map */
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
159 unsigned long can_disable;
162 #ifdef CONFIG_PCI
163 static struct pci_device_id k8_nb_id[] = {
164 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
165 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
168 #endif
170 unsigned short num_cache_leaves;
172 /* AMD doesn't have CPUID4. Emulate it here to report the same
173 information to the user. This makes some assumptions about the machine:
174 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
176 In theory the TLBs could be reported as fake type (they are in "dummy").
177 Maybe later */
178 union l1_cache {
179 struct {
180 unsigned line_size : 8;
181 unsigned lines_per_tag : 8;
182 unsigned assoc : 8;
183 unsigned size_in_kb : 8;
185 unsigned val;
188 union l2_cache {
189 struct {
190 unsigned line_size : 8;
191 unsigned lines_per_tag : 4;
192 unsigned assoc : 4;
193 unsigned size_in_kb : 16;
195 unsigned val;
198 union l3_cache {
199 struct {
200 unsigned line_size : 8;
201 unsigned lines_per_tag : 4;
202 unsigned assoc : 4;
203 unsigned res : 2;
204 unsigned size_encoded : 14;
206 unsigned val;
209 static unsigned short assocs[] __cpuinitdata = {
210 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
211 [8] = 16, [0xa] = 32, [0xb] = 48,
212 [0xc] = 64,
213 [0xf] = 0xffff // ??
216 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
217 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
219 static void __cpuinit
220 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
221 union _cpuid4_leaf_ebx *ebx,
222 union _cpuid4_leaf_ecx *ecx)
224 unsigned dummy;
225 unsigned line_size, lines_per_tag, assoc, size_in_kb;
226 union l1_cache l1i, l1d;
227 union l2_cache l2;
228 union l3_cache l3;
229 union l1_cache *l1 = &l1d;
231 eax->full = 0;
232 ebx->full = 0;
233 ecx->full = 0;
235 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
236 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
238 switch (leaf) {
239 case 1:
240 l1 = &l1i;
241 case 0:
242 if (!l1->val)
243 return;
244 assoc = l1->assoc;
245 line_size = l1->line_size;
246 lines_per_tag = l1->lines_per_tag;
247 size_in_kb = l1->size_in_kb;
248 break;
249 case 2:
250 if (!l2.val)
251 return;
252 assoc = l2.assoc;
253 line_size = l2.line_size;
254 lines_per_tag = l2.lines_per_tag;
255 /* cpu_data has errata corrections for K7 applied */
256 size_in_kb = current_cpu_data.x86_cache_size;
257 break;
258 case 3:
259 if (!l3.val)
260 return;
261 assoc = l3.assoc;
262 line_size = l3.line_size;
263 lines_per_tag = l3.lines_per_tag;
264 size_in_kb = l3.size_encoded * 512;
265 break;
266 default:
267 return;
270 eax->split.is_self_initializing = 1;
271 eax->split.type = types[leaf];
272 eax->split.level = levels[leaf];
273 if (leaf == 3)
274 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
275 else
276 eax->split.num_threads_sharing = 0;
277 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
280 if (assoc == 0xf)
281 eax->split.is_fully_associative = 1;
282 ebx->split.coherency_line_size = line_size - 1;
283 ebx->split.ways_of_associativity = assocs[assoc] - 1;
284 ebx->split.physical_line_partition = lines_per_tag - 1;
285 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
286 (ebx->split.ways_of_associativity + 1) - 1;
289 static void __cpuinit
290 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
292 if (index < 3)
293 return;
294 this_leaf->can_disable = 1;
297 static int
298 __cpuinit cpuid4_cache_lookup_regs(int index,
299 struct _cpuid4_info_regs *this_leaf)
301 union _cpuid4_leaf_eax eax;
302 union _cpuid4_leaf_ebx ebx;
303 union _cpuid4_leaf_ecx ecx;
304 unsigned edx;
306 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
307 amd_cpuid4(index, &eax, &ebx, &ecx);
308 if (boot_cpu_data.x86 >= 0x10)
309 amd_check_l3_disable(index, this_leaf);
310 } else {
311 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
314 if (eax.split.type == CACHE_TYPE_NULL)
315 return -EIO; /* better error ? */
317 this_leaf->eax = eax;
318 this_leaf->ebx = ebx;
319 this_leaf->ecx = ecx;
320 this_leaf->size = (ecx.split.number_of_sets + 1) *
321 (ebx.split.coherency_line_size + 1) *
322 (ebx.split.physical_line_partition + 1) *
323 (ebx.split.ways_of_associativity + 1);
324 return 0;
327 static int
328 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
330 struct _cpuid4_info_regs *leaf_regs =
331 (struct _cpuid4_info_regs *)this_leaf;
333 return cpuid4_cache_lookup_regs(index, leaf_regs);
336 static int __cpuinit find_num_cache_leaves(void)
338 unsigned int eax, ebx, ecx, edx;
339 union _cpuid4_leaf_eax cache_eax;
340 int i = -1;
342 do {
343 ++i;
344 /* Do cpuid(4) loop to find out num_cache_leaves */
345 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
346 cache_eax.full = eax;
347 } while (cache_eax.split.type != CACHE_TYPE_NULL);
348 return i;
351 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
353 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
354 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
355 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
356 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
357 #ifdef CONFIG_X86_HT
358 unsigned int cpu = c->cpu_index;
359 #endif
361 if (c->cpuid_level > 3) {
362 static int is_initialized;
364 if (is_initialized == 0) {
365 /* Init num_cache_leaves from boot CPU */
366 num_cache_leaves = find_num_cache_leaves();
367 is_initialized++;
371 * Whenever possible use cpuid(4), deterministic cache
372 * parameters cpuid leaf to find the cache details
374 for (i = 0; i < num_cache_leaves; i++) {
375 struct _cpuid4_info_regs this_leaf;
376 int retval;
378 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
379 if (retval >= 0) {
380 switch(this_leaf.eax.split.level) {
381 case 1:
382 if (this_leaf.eax.split.type ==
383 CACHE_TYPE_DATA)
384 new_l1d = this_leaf.size/1024;
385 else if (this_leaf.eax.split.type ==
386 CACHE_TYPE_INST)
387 new_l1i = this_leaf.size/1024;
388 break;
389 case 2:
390 new_l2 = this_leaf.size/1024;
391 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
392 index_msb = get_count_order(num_threads_sharing);
393 l2_id = c->apicid >> index_msb;
394 break;
395 case 3:
396 new_l3 = this_leaf.size/1024;
397 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
398 index_msb = get_count_order(num_threads_sharing);
399 l3_id = c->apicid >> index_msb;
400 break;
401 default:
402 break;
408 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
409 * trace cache
411 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
412 /* supports eax=2 call */
413 int j, n;
414 unsigned int regs[4];
415 unsigned char *dp = (unsigned char *)regs;
416 int only_trace = 0;
418 if (num_cache_leaves != 0 && c->x86 == 15)
419 only_trace = 1;
421 /* Number of times to iterate */
422 n = cpuid_eax(2) & 0xFF;
424 for ( i = 0 ; i < n ; i++ ) {
425 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
427 /* If bit 31 is set, this is an unknown format */
428 for ( j = 0 ; j < 3 ; j++ ) {
429 if (regs[j] & (1 << 31)) regs[j] = 0;
432 /* Byte 0 is level count, not a descriptor */
433 for ( j = 1 ; j < 16 ; j++ ) {
434 unsigned char des = dp[j];
435 unsigned char k = 0;
437 /* look up this descriptor in the table */
438 while (cache_table[k].descriptor != 0)
440 if (cache_table[k].descriptor == des) {
441 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
442 break;
443 switch (cache_table[k].cache_type) {
444 case LVL_1_INST:
445 l1i += cache_table[k].size;
446 break;
447 case LVL_1_DATA:
448 l1d += cache_table[k].size;
449 break;
450 case LVL_2:
451 l2 += cache_table[k].size;
452 break;
453 case LVL_3:
454 l3 += cache_table[k].size;
455 break;
456 case LVL_TRACE:
457 trace += cache_table[k].size;
458 break;
461 break;
464 k++;
470 if (new_l1d)
471 l1d = new_l1d;
473 if (new_l1i)
474 l1i = new_l1i;
476 if (new_l2) {
477 l2 = new_l2;
478 #ifdef CONFIG_X86_HT
479 per_cpu(cpu_llc_id, cpu) = l2_id;
480 #endif
483 if (new_l3) {
484 l3 = new_l3;
485 #ifdef CONFIG_X86_HT
486 per_cpu(cpu_llc_id, cpu) = l3_id;
487 #endif
490 if (trace)
491 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
492 else if ( l1i )
493 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
495 if (l1d)
496 printk(", L1 D cache: %dK\n", l1d);
497 else
498 printk("\n");
500 if (l2)
501 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
503 if (l3)
504 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
506 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
508 return l2;
511 /* pointer to _cpuid4_info array (for each cache leaf) */
512 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
513 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
515 #ifdef CONFIG_SMP
516 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
518 struct _cpuid4_info *this_leaf, *sibling_leaf;
519 unsigned long num_threads_sharing;
520 int index_msb, i;
521 struct cpuinfo_x86 *c = &cpu_data(cpu);
523 this_leaf = CPUID4_INFO_IDX(cpu, index);
524 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
526 if (num_threads_sharing == 1)
527 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
528 else {
529 index_msb = get_count_order(num_threads_sharing);
531 for_each_online_cpu(i) {
532 if (cpu_data(i).apicid >> index_msb ==
533 c->apicid >> index_msb) {
534 cpumask_set_cpu(i,
535 to_cpumask(this_leaf->shared_cpu_map));
536 if (i != cpu && per_cpu(cpuid4_info, i)) {
537 sibling_leaf =
538 CPUID4_INFO_IDX(i, index);
539 cpumask_set_cpu(cpu, to_cpumask(
540 sibling_leaf->shared_cpu_map));
546 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
548 struct _cpuid4_info *this_leaf, *sibling_leaf;
549 int sibling;
551 this_leaf = CPUID4_INFO_IDX(cpu, index);
552 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
553 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
554 cpumask_clear_cpu(cpu,
555 to_cpumask(sibling_leaf->shared_cpu_map));
558 #else
559 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
560 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
561 #endif
563 static void __cpuinit free_cache_attributes(unsigned int cpu)
565 int i;
567 for (i = 0; i < num_cache_leaves; i++)
568 cache_remove_shared_cpu_map(cpu, i);
570 kfree(per_cpu(cpuid4_info, cpu));
571 per_cpu(cpuid4_info, cpu) = NULL;
574 static void __cpuinit get_cpu_leaves(void *_retval)
576 int j, *retval = _retval, cpu = smp_processor_id();
578 /* Do cpuid and store the results */
579 for (j = 0; j < num_cache_leaves; j++) {
580 struct _cpuid4_info *this_leaf;
581 this_leaf = CPUID4_INFO_IDX(cpu, j);
582 *retval = cpuid4_cache_lookup(j, this_leaf);
583 if (unlikely(*retval < 0)) {
584 int i;
586 for (i = 0; i < j; i++)
587 cache_remove_shared_cpu_map(cpu, i);
588 break;
590 cache_shared_cpu_map_setup(cpu, j);
594 static int __cpuinit detect_cache_attributes(unsigned int cpu)
596 int retval;
598 if (num_cache_leaves == 0)
599 return -ENOENT;
601 per_cpu(cpuid4_info, cpu) = kzalloc(
602 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
603 if (per_cpu(cpuid4_info, cpu) == NULL)
604 return -ENOMEM;
606 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
607 if (retval) {
608 kfree(per_cpu(cpuid4_info, cpu));
609 per_cpu(cpuid4_info, cpu) = NULL;
612 return retval;
615 #ifdef CONFIG_SYSFS
617 #include <linux/kobject.h>
618 #include <linux/sysfs.h>
620 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
622 /* pointer to kobject for cpuX/cache */
623 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
625 struct _index_kobject {
626 struct kobject kobj;
627 unsigned int cpu;
628 unsigned short index;
631 /* pointer to array of kobjects for cpuX/cache/indexY */
632 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
633 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
635 #define show_one_plus(file_name, object, val) \
636 static ssize_t show_##file_name \
637 (struct _cpuid4_info *this_leaf, char *buf) \
639 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
642 show_one_plus(level, eax.split.level, 0);
643 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
644 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
645 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
646 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
648 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
650 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
653 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
654 int type, char *buf)
656 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
657 int n = 0;
659 if (len > 1) {
660 const struct cpumask *mask;
662 mask = to_cpumask(this_leaf->shared_cpu_map);
663 n = type?
664 cpulist_scnprintf(buf, len-2, mask) :
665 cpumask_scnprintf(buf, len-2, mask);
666 buf[n++] = '\n';
667 buf[n] = '\0';
669 return n;
672 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
674 return show_shared_cpu_map_func(leaf, 0, buf);
677 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
679 return show_shared_cpu_map_func(leaf, 1, buf);
682 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
684 switch (this_leaf->eax.split.type) {
685 case CACHE_TYPE_DATA:
686 return sprintf(buf, "Data\n");
687 case CACHE_TYPE_INST:
688 return sprintf(buf, "Instruction\n");
689 case CACHE_TYPE_UNIFIED:
690 return sprintf(buf, "Unified\n");
691 default:
692 return sprintf(buf, "Unknown\n");
696 #define to_object(k) container_of(k, struct _index_kobject, kobj)
697 #define to_attr(a) container_of(a, struct _cache_attr, attr)
699 #ifdef CONFIG_PCI
700 static struct pci_dev *get_k8_northbridge(int node)
702 struct pci_dev *dev = NULL;
703 int i;
705 for (i = 0; i <= node; i++) {
706 do {
707 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
708 if (!dev)
709 break;
710 } while (!pci_match_id(&k8_nb_id[0], dev));
711 if (!dev)
712 break;
714 return dev;
716 #else
717 static struct pci_dev *get_k8_northbridge(int node)
719 return NULL;
721 #endif
723 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
725 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
726 int node = cpu_to_node(cpumask_first(mask));
727 struct pci_dev *dev = NULL;
728 ssize_t ret = 0;
729 int i;
731 if (!this_leaf->can_disable)
732 return sprintf(buf, "Feature not enabled\n");
734 dev = get_k8_northbridge(node);
735 if (!dev) {
736 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
737 return -EINVAL;
740 for (i = 0; i < 2; i++) {
741 unsigned int reg;
743 pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
745 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
746 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
747 buf,
748 reg & 0x80000000 ? "Disabled" : "Allowed",
749 reg & 0x40000000 ? "Disabled" : "Allowed");
750 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
751 buf, (reg & 0x30000) >> 16, reg & 0xfff);
753 return ret;
756 static ssize_t
757 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
758 size_t count)
760 const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
761 int node = cpu_to_node(cpumask_first(mask));
762 struct pci_dev *dev = NULL;
763 unsigned int ret, index, val;
765 if (!this_leaf->can_disable)
766 return 0;
768 if (strlen(buf) > 15)
769 return -EINVAL;
771 ret = sscanf(buf, "%x %x", &index, &val);
772 if (ret != 2)
773 return -EINVAL;
774 if (index > 1)
775 return -EINVAL;
777 val |= 0xc0000000;
778 dev = get_k8_northbridge(node);
779 if (!dev) {
780 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
781 return -EINVAL;
784 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
785 wbinvd();
786 pci_write_config_dword(dev, 0x1BC + index * 4, val);
788 return 1;
791 struct _cache_attr {
792 struct attribute attr;
793 ssize_t (*show)(struct _cpuid4_info *, char *);
794 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
797 #define define_one_ro(_name) \
798 static struct _cache_attr _name = \
799 __ATTR(_name, 0444, show_##_name, NULL)
801 define_one_ro(level);
802 define_one_ro(type);
803 define_one_ro(coherency_line_size);
804 define_one_ro(physical_line_partition);
805 define_one_ro(ways_of_associativity);
806 define_one_ro(number_of_sets);
807 define_one_ro(size);
808 define_one_ro(shared_cpu_map);
809 define_one_ro(shared_cpu_list);
811 static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
813 static struct attribute * default_attrs[] = {
814 &type.attr,
815 &level.attr,
816 &coherency_line_size.attr,
817 &physical_line_partition.attr,
818 &ways_of_associativity.attr,
819 &number_of_sets.attr,
820 &size.attr,
821 &shared_cpu_map.attr,
822 &shared_cpu_list.attr,
823 &cache_disable.attr,
824 NULL
827 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
829 struct _cache_attr *fattr = to_attr(attr);
830 struct _index_kobject *this_leaf = to_object(kobj);
831 ssize_t ret;
833 ret = fattr->show ?
834 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
835 buf) :
837 return ret;
840 static ssize_t store(struct kobject * kobj, struct attribute * attr,
841 const char * buf, size_t count)
843 struct _cache_attr *fattr = to_attr(attr);
844 struct _index_kobject *this_leaf = to_object(kobj);
845 ssize_t ret;
847 ret = fattr->store ?
848 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
849 buf, count) :
851 return ret;
854 static struct sysfs_ops sysfs_ops = {
855 .show = show,
856 .store = store,
859 static struct kobj_type ktype_cache = {
860 .sysfs_ops = &sysfs_ops,
861 .default_attrs = default_attrs,
864 static struct kobj_type ktype_percpu_entry = {
865 .sysfs_ops = &sysfs_ops,
868 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
870 kfree(per_cpu(cache_kobject, cpu));
871 kfree(per_cpu(index_kobject, cpu));
872 per_cpu(cache_kobject, cpu) = NULL;
873 per_cpu(index_kobject, cpu) = NULL;
874 free_cache_attributes(cpu);
877 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
879 int err;
881 if (num_cache_leaves == 0)
882 return -ENOENT;
884 err = detect_cache_attributes(cpu);
885 if (err)
886 return err;
888 /* Allocate all required memory */
889 per_cpu(cache_kobject, cpu) =
890 kzalloc(sizeof(struct kobject), GFP_KERNEL);
891 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
892 goto err_out;
894 per_cpu(index_kobject, cpu) = kzalloc(
895 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
896 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
897 goto err_out;
899 return 0;
901 err_out:
902 cpuid4_cache_sysfs_exit(cpu);
903 return -ENOMEM;
906 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
908 /* Add/Remove cache interface for CPU device */
909 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
911 unsigned int cpu = sys_dev->id;
912 unsigned long i, j;
913 struct _index_kobject *this_object;
914 int retval;
916 retval = cpuid4_cache_sysfs_init(cpu);
917 if (unlikely(retval < 0))
918 return retval;
920 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
921 &ktype_percpu_entry,
922 &sys_dev->kobj, "%s", "cache");
923 if (retval < 0) {
924 cpuid4_cache_sysfs_exit(cpu);
925 return retval;
928 for (i = 0; i < num_cache_leaves; i++) {
929 this_object = INDEX_KOBJECT_PTR(cpu,i);
930 this_object->cpu = cpu;
931 this_object->index = i;
932 retval = kobject_init_and_add(&(this_object->kobj),
933 &ktype_cache,
934 per_cpu(cache_kobject, cpu),
935 "index%1lu", i);
936 if (unlikely(retval)) {
937 for (j = 0; j < i; j++) {
938 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
940 kobject_put(per_cpu(cache_kobject, cpu));
941 cpuid4_cache_sysfs_exit(cpu);
942 return retval;
944 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
946 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
948 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
949 return 0;
952 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
954 unsigned int cpu = sys_dev->id;
955 unsigned long i;
957 if (per_cpu(cpuid4_info, cpu) == NULL)
958 return;
959 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
960 return;
961 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
963 for (i = 0; i < num_cache_leaves; i++)
964 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
965 kobject_put(per_cpu(cache_kobject, cpu));
966 cpuid4_cache_sysfs_exit(cpu);
969 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
970 unsigned long action, void *hcpu)
972 unsigned int cpu = (unsigned long)hcpu;
973 struct sys_device *sys_dev;
975 sys_dev = get_cpu_sysdev(cpu);
976 switch (action) {
977 case CPU_ONLINE:
978 case CPU_ONLINE_FROZEN:
979 cache_add_dev(sys_dev);
980 break;
981 case CPU_DEAD:
982 case CPU_DEAD_FROZEN:
983 cache_remove_dev(sys_dev);
984 break;
986 return NOTIFY_OK;
989 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
991 .notifier_call = cacheinfo_cpu_callback,
994 static int __cpuinit cache_sysfs_init(void)
996 int i;
998 if (num_cache_leaves == 0)
999 return 0;
1001 for_each_online_cpu(i) {
1002 int err;
1003 struct sys_device *sys_dev = get_cpu_sysdev(i);
1005 err = cache_add_dev(sys_dev);
1006 if (err)
1007 return err;
1009 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1010 return 0;
1013 device_initcall(cache_sysfs_init);
1015 #endif