Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[linux-2.6/suspend2-2.6.18.git] / arch / ia64 / kernel / palinfo.c
blob0b546e2b36ac61c113b73ada6e98775a30294e25
1 /*
2 * palinfo.c
4 * Prints processor specific information reported by PAL.
5 * This code is based on specification of PAL as of the
6 * Intel IA-64 Architecture Software Developer's Manual v1.0.
9 * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2004 Intel Corporation
12 * Ashok Raj <ashok.raj@intel.com>
14 * 05/26/2000 S.Eranian initial release
15 * 08/21/2000 S.Eranian updated to July 2000 PAL specs
16 * 02/05/2001 S.Eranian fixed module support
17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/proc_fs.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/efi.h>
27 #include <linux/notifier.h>
28 #include <linux/cpu.h>
29 #include <linux/cpumask.h>
31 #include <asm/pal.h>
32 #include <asm/sal.h>
33 #include <asm/page.h>
34 #include <asm/processor.h>
35 #include <linux/smp.h>
37 MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
38 MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
39 MODULE_LICENSE("GPL");
41 #define PALINFO_VERSION "0.5"
43 typedef int (*palinfo_func_t)(char*);
45 typedef struct {
46 const char *name; /* name of the proc entry */
47 palinfo_func_t proc_read; /* function to call for reading */
48 struct proc_dir_entry *entry; /* registered entry (removal) */
49 } palinfo_entry_t;
53 * A bunch of string array to get pretty printing
56 static char *cache_types[] = {
57 "", /* not used */
58 "Instruction",
59 "Data",
60 "Data/Instruction" /* unified */
63 static const char *cache_mattrib[]={
64 "WriteThrough",
65 "WriteBack",
66 "", /* reserved */
67 "" /* reserved */
70 static const char *cache_st_hints[]={
71 "Temporal, level 1",
72 "Reserved",
73 "Reserved",
74 "Non-temporal, all levels",
75 "Reserved",
76 "Reserved",
77 "Reserved",
78 "Reserved"
81 static const char *cache_ld_hints[]={
82 "Temporal, level 1",
83 "Non-temporal, level 1",
84 "Reserved",
85 "Non-temporal, all levels",
86 "Reserved",
87 "Reserved",
88 "Reserved",
89 "Reserved"
92 static const char *rse_hints[]={
93 "enforced lazy",
94 "eager stores",
95 "eager loads",
96 "eager loads and stores"
99 #define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
101 static const char *mem_attrib[]={
102 "WB", /* 000 */
103 "SW", /* 001 */
104 "010", /* 010 */
105 "011", /* 011 */
106 "UC", /* 100 */
107 "UCE", /* 101 */
108 "WC", /* 110 */
109 "NaTPage" /* 111 */
113 * Take a 64bit vector and produces a string such that
114 * if bit n is set then 2^n in clear text is generated. The adjustment
115 * to the right unit is also done.
117 * Input:
118 * - a pointer to a buffer to hold the string
119 * - a 64-bit vector
120 * Ouput:
121 * - a pointer to the end of the buffer
124 static char *
125 bitvector_process(char *p, u64 vector)
127 int i,j;
128 const char *units[]={ "", "K", "M", "G", "T" };
130 for (i=0, j=0; i < 64; i++ , j=i/10) {
131 if (vector & 0x1) {
132 p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]);
134 vector >>= 1;
136 return p;
140 * Take a 64bit vector and produces a string such that
141 * if bit n is set then register n is present. The function
142 * takes into account consecutive registers and prints out ranges.
144 * Input:
145 * - a pointer to a buffer to hold the string
146 * - a 64-bit vector
147 * Ouput:
148 * - a pointer to the end of the buffer
151 static char *
152 bitregister_process(char *p, u64 *reg_info, int max)
154 int i, begin, skip = 0;
155 u64 value = reg_info[0];
157 value >>= i = begin = ffs(value) - 1;
159 for(; i < max; i++ ) {
161 if (i != 0 && (i%64) == 0) value = *++reg_info;
163 if ((value & 0x1) == 0 && skip == 0) {
164 if (begin <= i - 2)
165 p += sprintf(p, "%d-%d ", begin, i-1);
166 else
167 p += sprintf(p, "%d ", i-1);
168 skip = 1;
169 begin = -1;
170 } else if ((value & 0x1) && skip == 1) {
171 skip = 0;
172 begin = i;
174 value >>=1;
176 if (begin > -1) {
177 if (begin < 127)
178 p += sprintf(p, "%d-127", begin);
179 else
180 p += sprintf(p, "127");
183 return p;
186 static int
187 power_info(char *page)
189 s64 status;
190 char *p = page;
191 u64 halt_info_buffer[8];
192 pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
193 int i;
195 status = ia64_pal_halt_info(halt_info);
196 if (status != 0) return 0;
198 for (i=0; i < 8 ; i++ ) {
199 if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
200 p += sprintf(p, "Power level %d:\n"
201 "\tentry_latency : %d cycles\n"
202 "\texit_latency : %d cycles\n"
203 "\tpower consumption : %d mW\n"
204 "\tCache+TLB coherency : %s\n", i,
205 halt_info[i].pal_power_mgmt_info_s.entry_latency,
206 halt_info[i].pal_power_mgmt_info_s.exit_latency,
207 halt_info[i].pal_power_mgmt_info_s.power_consumption,
208 halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
209 } else {
210 p += sprintf(p,"Power level %d: not implemented\n",i);
213 return p - page;
216 static int
217 cache_info(char *page)
219 char *p = page;
220 u64 i, levels, unique_caches;
221 pal_cache_config_info_t cci;
222 int j, k;
223 s64 status;
225 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
226 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
227 return 0;
230 p += sprintf(p, "Cache levels : %ld\nUnique caches : %ld\n\n", levels, unique_caches);
232 for (i=0; i < levels; i++) {
234 for (j=2; j >0 ; j--) {
236 /* even without unification some level may not be present */
237 if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
238 continue;
240 p += sprintf(p,
241 "%s Cache level %lu:\n"
242 "\tSize : %u bytes\n"
243 "\tAttributes : ",
244 cache_types[j+cci.pcci_unified], i+1,
245 cci.pcci_cache_size);
247 if (cci.pcci_unified) p += sprintf(p, "Unified ");
249 p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
251 p += sprintf(p,
252 "\tAssociativity : %d\n"
253 "\tLine size : %d bytes\n"
254 "\tStride : %d bytes\n",
255 cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride);
256 if (j == 1)
257 p += sprintf(p, "\tStore latency : N/A\n");
258 else
259 p += sprintf(p, "\tStore latency : %d cycle(s)\n",
260 cci.pcci_st_latency);
262 p += sprintf(p,
263 "\tLoad latency : %d cycle(s)\n"
264 "\tStore hints : ", cci.pcci_ld_latency);
266 for(k=0; k < 8; k++ ) {
267 if ( cci.pcci_st_hints & 0x1)
268 p += sprintf(p, "[%s]", cache_st_hints[k]);
269 cci.pcci_st_hints >>=1;
271 p += sprintf(p, "\n\tLoad hints : ");
273 for(k=0; k < 8; k++ ) {
274 if (cci.pcci_ld_hints & 0x1)
275 p += sprintf(p, "[%s]", cache_ld_hints[k]);
276 cci.pcci_ld_hints >>=1;
278 p += sprintf(p,
279 "\n\tAlias boundary : %d byte(s)\n"
280 "\tTag LSB : %d\n"
281 "\tTag MSB : %d\n",
282 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
283 cci.pcci_tag_msb);
285 /* when unified, data(j=2) is enough */
286 if (cci.pcci_unified) break;
289 return p - page;
293 static int
294 vm_info(char *page)
296 char *p = page;
297 u64 tr_pages =0, vw_pages=0, tc_pages;
298 u64 attrib;
299 pal_vm_info_1_u_t vm_info_1;
300 pal_vm_info_2_u_t vm_info_2;
301 pal_tc_info_u_t tc_info;
302 ia64_ptce_info_t ptce;
303 const char *sep;
304 int i, j;
305 s64 status;
307 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
308 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
309 } else {
311 p += sprintf(p,
312 "Physical Address Space : %d bits\n"
313 "Virtual Address Space : %d bits\n"
314 "Protection Key Registers(PKR) : %d\n"
315 "Implemented bits in PKR.key : %d\n"
316 "Hash Tag ID : 0x%x\n"
317 "Size of RR.rid : %d\n",
318 vm_info_1.pal_vm_info_1_s.phys_add_size,
319 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
320 vm_info_1.pal_vm_info_1_s.max_pkr+1,
321 vm_info_1.pal_vm_info_1_s.key_size,
322 vm_info_1.pal_vm_info_1_s.hash_tag_id,
323 vm_info_2.pal_vm_info_2_s.rid_size);
326 if (ia64_pal_mem_attrib(&attrib) == 0) {
327 p += sprintf(p, "Supported memory attributes : ");
328 sep = "";
329 for (i = 0; i < 8; i++) {
330 if (attrib & (1 << i)) {
331 p += sprintf(p, "%s%s", sep, mem_attrib[i]);
332 sep = ", ";
335 p += sprintf(p, "\n");
338 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
339 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
340 } else {
342 p += sprintf(p,
343 "\nTLB walker : %simplemented\n"
344 "Number of DTR : %d\n"
345 "Number of ITR : %d\n"
346 "TLB insertable page sizes : ",
347 vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
348 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
349 vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
352 p = bitvector_process(p, tr_pages);
354 p += sprintf(p, "\nTLB purgeable page sizes : ");
356 p = bitvector_process(p, vw_pages);
358 if ((status=ia64_get_ptce(&ptce)) != 0) {
359 printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
360 } else {
361 p += sprintf(p,
362 "\nPurge base address : 0x%016lx\n"
363 "Purge outer loop count : %d\n"
364 "Purge inner loop count : %d\n"
365 "Purge outer loop stride : %d\n"
366 "Purge inner loop stride : %d\n",
367 ptce.base, ptce.count[0], ptce.count[1],
368 ptce.stride[0], ptce.stride[1]);
370 p += sprintf(p,
371 "TC Levels : %d\n"
372 "Unique TC(s) : %d\n",
373 vm_info_1.pal_vm_info_1_s.num_tc_levels,
374 vm_info_1.pal_vm_info_1_s.max_unique_tcs);
376 for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
377 for (j=2; j>0 ; j--) {
378 tc_pages = 0; /* just in case */
381 /* even without unification, some levels may not be present */
382 if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
383 continue;
386 p += sprintf(p,
387 "\n%s Translation Cache Level %d:\n"
388 "\tHash sets : %d\n"
389 "\tAssociativity : %d\n"
390 "\tNumber of entries : %d\n"
391 "\tFlags : ",
392 cache_types[j+tc_info.tc_unified], i+1,
393 tc_info.tc_num_sets,
394 tc_info.tc_associativity,
395 tc_info.tc_num_entries);
397 if (tc_info.tc_pf)
398 p += sprintf(p, "PreferredPageSizeOptimized ");
399 if (tc_info.tc_unified)
400 p += sprintf(p, "Unified ");
401 if (tc_info.tc_reduce_tr)
402 p += sprintf(p, "TCReduction");
404 p += sprintf(p, "\n\tSupported page sizes: ");
406 p = bitvector_process(p, tc_pages);
408 /* when unified date (j=2) is enough */
409 if (tc_info.tc_unified)
410 break;
414 p += sprintf(p, "\n");
416 return p - page;
420 static int
421 register_info(char *page)
423 char *p = page;
424 u64 reg_info[2];
425 u64 info;
426 u64 phys_stacked;
427 pal_hints_u_t hints;
428 u64 iregs, dregs;
429 char *info_type[]={
430 "Implemented AR(s)",
431 "AR(s) with read side-effects",
432 "Implemented CR(s)",
433 "CR(s) with read side-effects",
436 for(info=0; info < 4; info++) {
438 if (ia64_pal_register_info(info, &reg_info[0], &reg_info[1]) != 0) return 0;
440 p += sprintf(p, "%-32s : ", info_type[info]);
442 p = bitregister_process(p, reg_info, 128);
444 p += sprintf(p, "\n");
447 if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) {
449 p += sprintf(p,
450 "RSE stacked physical registers : %ld\n"
451 "RSE load/store hints : %ld (%s)\n",
452 phys_stacked, hints.ph_data,
453 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
455 if (ia64_pal_debug_info(&iregs, &dregs))
456 return 0;
458 p += sprintf(p,
459 "Instruction debug register pairs : %ld\n"
460 "Data debug register pairs : %ld\n", iregs, dregs);
462 return p - page;
465 static const char *proc_features[]={
466 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
467 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
468 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
469 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
470 NULL,NULL,NULL,NULL,NULL,
471 "XIP,XPSR,XFS implemented",
472 "XR1-XR3 implemented",
473 "Disable dynamic predicate prediction",
474 "Disable processor physical number",
475 "Disable dynamic data cache prefetch",
476 "Disable dynamic inst cache prefetch",
477 "Disable dynamic branch prediction",
478 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
479 "Disable BINIT on processor time-out",
480 "Disable dynamic power management (DPM)",
481 "Disable coherency",
482 "Disable cache",
483 "Enable CMCI promotion",
484 "Enable MCA to BINIT promotion",
485 "Enable MCA promotion",
486 "Enable BERR promotion"
490 static int
491 processor_info(char *page)
493 char *p = page;
494 const char **v = proc_features;
495 u64 avail=1, status=1, control=1;
496 int i;
497 s64 ret;
499 if ((ret=ia64_pal_proc_get_features(&avail, &status, &control)) != 0) return 0;
501 for(i=0; i < 64; i++, v++,avail >>=1, status >>=1, control >>=1) {
502 if ( ! *v ) continue;
503 p += sprintf(p, "%-40s : %s%s %s\n", *v,
504 avail & 0x1 ? "" : "NotImpl",
505 avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
506 avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
508 return p - page;
511 static const char *bus_features[]={
512 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
513 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
514 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
515 NULL,NULL,
516 "Request Bus Parking",
517 "Bus Lock Mask",
518 "Enable Half Transfer",
519 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
520 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
521 NULL, NULL, NULL, NULL,
522 "Enable Cache Line Repl. Shared",
523 "Enable Cache Line Repl. Exclusive",
524 "Disable Transaction Queuing",
525 "Disable Response Error Checking",
526 "Disable Bus Error Checking",
527 "Disable Bus Requester Internal Error Signalling",
528 "Disable Bus Requester Error Signalling",
529 "Disable Bus Initialization Event Checking",
530 "Disable Bus Initialization Event Signalling",
531 "Disable Bus Address Error Checking",
532 "Disable Bus Address Error Signalling",
533 "Disable Bus Data Error Checking"
537 static int
538 bus_info(char *page)
540 char *p = page;
541 const char **v = bus_features;
542 pal_bus_features_u_t av, st, ct;
543 u64 avail, status, control;
544 int i;
545 s64 ret;
547 if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0;
549 avail = av.pal_bus_features_val;
550 status = st.pal_bus_features_val;
551 control = ct.pal_bus_features_val;
553 for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
554 if ( ! *v ) continue;
555 p += sprintf(p, "%-48s : %s%s %s\n", *v,
556 avail & 0x1 ? "" : "NotImpl",
557 avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
558 avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
560 return p - page;
563 static int
564 version_info(char *page)
566 pal_version_u_t min_ver, cur_ver;
567 char *p = page;
569 if (ia64_pal_version(&min_ver, &cur_ver) != 0)
570 return 0;
572 p += sprintf(p,
573 "PAL_vendor : 0x%02x (min=0x%02x)\n"
574 "PAL_A : %02x.%02x (min=%02x.%02x)\n"
575 "PAL_B : %02x.%02x (min=%02x.%02x)\n",
576 cur_ver.pal_version_s.pv_pal_vendor,
577 min_ver.pal_version_s.pv_pal_vendor,
578 cur_ver.pal_version_s.pv_pal_a_model,
579 cur_ver.pal_version_s.pv_pal_a_rev,
580 min_ver.pal_version_s.pv_pal_a_model,
581 min_ver.pal_version_s.pv_pal_a_rev,
582 cur_ver.pal_version_s.pv_pal_b_model,
583 cur_ver.pal_version_s.pv_pal_b_rev,
584 min_ver.pal_version_s.pv_pal_b_model,
585 min_ver.pal_version_s.pv_pal_b_rev);
586 return p - page;
589 static int
590 perfmon_info(char *page)
592 char *p = page;
593 u64 pm_buffer[16];
594 pal_perf_mon_info_u_t pm_info;
596 if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0;
598 p += sprintf(p,
599 "PMC/PMD pairs : %d\n"
600 "Counter width : %d bits\n"
601 "Cycle event number : %d\n"
602 "Retired event number : %d\n"
603 "Implemented PMC : ",
604 pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width,
605 pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired);
607 p = bitregister_process(p, pm_buffer, 256);
608 p += sprintf(p, "\nImplemented PMD : ");
609 p = bitregister_process(p, pm_buffer+4, 256);
610 p += sprintf(p, "\nCycles count capable : ");
611 p = bitregister_process(p, pm_buffer+8, 256);
612 p += sprintf(p, "\nRetired bundles count capable : ");
614 #ifdef CONFIG_ITANIUM
616 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
617 * which is wrong, both PMC4 and PMD5 support it.
619 if (pm_buffer[12] == 0x10) pm_buffer[12]=0x30;
620 #endif
622 p = bitregister_process(p, pm_buffer+12, 256);
624 p += sprintf(p, "\n");
626 return p - page;
629 static int
630 frequency_info(char *page)
632 char *p = page;
633 struct pal_freq_ratio proc, itc, bus;
634 u64 base;
636 if (ia64_pal_freq_base(&base) == -1)
637 p += sprintf(p, "Output clock : not implemented\n");
638 else
639 p += sprintf(p, "Output clock : %ld ticks/s\n", base);
641 if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
643 p += sprintf(p,
644 "Processor/Clock ratio : %d/%d\n"
645 "Bus/Clock ratio : %d/%d\n"
646 "ITC/Clock ratio : %d/%d\n",
647 proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
649 return p - page;
652 static int
653 tr_info(char *page)
655 char *p = page;
656 s64 status;
657 pal_tr_valid_u_t tr_valid;
658 u64 tr_buffer[4];
659 pal_vm_info_1_u_t vm_info_1;
660 pal_vm_info_2_u_t vm_info_2;
661 u64 i, j;
662 u64 max[3], pgm;
663 struct ifa_reg {
664 u64 valid:1;
665 u64 ig:11;
666 u64 vpn:52;
667 } *ifa_reg;
668 struct itir_reg {
669 u64 rv1:2;
670 u64 ps:6;
671 u64 key:24;
672 u64 rv2:32;
673 } *itir_reg;
674 struct gr_reg {
675 u64 p:1;
676 u64 rv1:1;
677 u64 ma:3;
678 u64 a:1;
679 u64 d:1;
680 u64 pl:2;
681 u64 ar:3;
682 u64 ppn:38;
683 u64 rv2:2;
684 u64 ed:1;
685 u64 ig:11;
686 } *gr_reg;
687 struct rid_reg {
688 u64 ig1:1;
689 u64 rv1:1;
690 u64 ig2:6;
691 u64 rid:24;
692 u64 rv2:32;
693 } *rid_reg;
695 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
696 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
697 return 0;
699 max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
700 max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
702 for (i=0; i < 2; i++ ) {
703 for (j=0; j < max[i]; j++) {
705 status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
706 if (status != 0) {
707 printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
708 i, j, status);
709 continue;
712 ifa_reg = (struct ifa_reg *)&tr_buffer[2];
714 if (ifa_reg->valid == 0) continue;
716 gr_reg = (struct gr_reg *)tr_buffer;
717 itir_reg = (struct itir_reg *)&tr_buffer[1];
718 rid_reg = (struct rid_reg *)&tr_buffer[3];
720 pgm = -1 << (itir_reg->ps - 12);
721 p += sprintf(p,
722 "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
723 "\tppn : 0x%lx\n"
724 "\tvpn : 0x%lx\n"
725 "\tps : ",
726 "ID"[i], j,
727 tr_valid.pal_tr_valid_s.access_rights_valid,
728 tr_valid.pal_tr_valid_s.priv_level_valid,
729 tr_valid.pal_tr_valid_s.dirty_bit_valid,
730 tr_valid.pal_tr_valid_s.mem_attr_valid,
731 (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
733 p = bitvector_process(p, 1<< itir_reg->ps);
735 p += sprintf(p,
736 "\n\tpl : %d\n"
737 "\tar : %d\n"
738 "\trid : %x\n"
739 "\tp : %d\n"
740 "\tma : %d\n"
741 "\td : %d\n",
742 gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
743 gr_reg->d);
746 return p - page;
752 * List {name,function} pairs for every entry in /proc/palinfo/cpu*
754 static palinfo_entry_t palinfo_entries[]={
755 { "version_info", version_info, },
756 { "vm_info", vm_info, },
757 { "cache_info", cache_info, },
758 { "power_info", power_info, },
759 { "register_info", register_info, },
760 { "processor_info", processor_info, },
761 { "perfmon_info", perfmon_info, },
762 { "frequency_info", frequency_info, },
763 { "bus_info", bus_info },
764 { "tr_info", tr_info, }
767 #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
770 * this array is used to keep track of the proc entries we create. This is
771 * required in the module mode when we need to remove all entries. The procfs code
772 * does not do recursion of deletion
774 * Notes:
775 * - +1 accounts for the cpuN directory entry in /proc/pal
777 #define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
779 static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
780 static struct proc_dir_entry *palinfo_dir;
783 * This data structure is used to pass which cpu,function is being requested
784 * It must fit in a 64bit quantity to be passed to the proc callback routine
786 * In SMP mode, when we get a request for another CPU, we must call that
787 * other CPU using IPI and wait for the result before returning.
789 typedef union {
790 u64 value;
791 struct {
792 unsigned req_cpu: 32; /* for which CPU this info is */
793 unsigned func_id: 32; /* which function is requested */
794 } pal_func_cpu;
795 } pal_func_cpu_u_t;
797 #define req_cpu pal_func_cpu.req_cpu
798 #define func_id pal_func_cpu.func_id
800 #ifdef CONFIG_SMP
803 * used to hold information about final function to call
805 typedef struct {
806 palinfo_func_t func; /* pointer to function to call */
807 char *page; /* buffer to store results */
808 int ret; /* return value from call */
809 } palinfo_smp_data_t;
813 * this function does the actual final call and he called
814 * from the smp code, i.e., this is the palinfo callback routine
816 static void
817 palinfo_smp_call(void *info)
819 palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
820 if (data == NULL) {
821 printk(KERN_ERR "palinfo: data pointer is NULL\n");
822 data->ret = 0; /* no output */
823 return;
825 /* does this actual call */
826 data->ret = (*data->func)(data->page);
830 * function called to trigger the IPI, we need to access a remote CPU
831 * Return:
832 * 0 : error or nothing to output
833 * otherwise how many bytes in the "page" buffer were written
835 static
836 int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
838 palinfo_smp_data_t ptr;
839 int ret;
841 ptr.func = palinfo_entries[f->func_id].proc_read;
842 ptr.page = page;
843 ptr.ret = 0; /* just in case */
846 /* will send IPI to other CPU and wait for completion of remote call */
847 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) {
848 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
849 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
850 return 0;
852 return ptr.ret;
854 #else /* ! CONFIG_SMP */
855 static
856 int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
858 printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
859 return 0;
861 #endif /* CONFIG_SMP */
864 * Entry point routine: all calls go through this function
866 static int
867 palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
869 int len=0;
870 pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&data;
873 * in SMP mode, we may need to call another CPU to get correct
874 * information. PAL, by definition, is processor specific
876 if (f->req_cpu == get_cpu())
877 len = (*palinfo_entries[f->func_id].proc_read)(page);
878 else
879 len = palinfo_handle_smp(f, page);
881 put_cpu();
883 if (len <= off+count) *eof = 1;
885 *start = page + off;
886 len -= off;
888 if (len>count) len = count;
889 if (len<0) len = 0;
891 return len;
894 static void
895 create_palinfo_proc_entries(unsigned int cpu)
897 # define CPUSTR "cpu%d"
899 pal_func_cpu_u_t f;
900 struct proc_dir_entry **pdir;
901 struct proc_dir_entry *cpu_dir;
902 int j;
903 char cpustr[sizeof(CPUSTR)];
907 * we keep track of created entries in a depth-first order for
908 * cleanup purposes. Each entry is stored into palinfo_proc_entries
910 sprintf(cpustr,CPUSTR, cpu);
912 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
914 f.req_cpu = cpu;
917 * Compute the location to store per cpu entries
918 * We dont store the top level entry in this list, but
919 * remove it finally after removing all cpu entries.
921 pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
922 *pdir++ = cpu_dir;
923 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
924 f.func_id = j;
925 *pdir = create_proc_read_entry(
926 palinfo_entries[j].name, 0, cpu_dir,
927 palinfo_read_entry, (void *)f.value);
928 if (*pdir)
929 (*pdir)->owner = THIS_MODULE;
930 pdir++;
934 static void
935 remove_palinfo_proc_entries(unsigned int hcpu)
937 int j;
938 struct proc_dir_entry *cpu_dir, **pdir;
940 pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
941 cpu_dir = *pdir;
942 *pdir++=NULL;
943 for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
944 if ((*pdir)) {
945 remove_proc_entry ((*pdir)->name, cpu_dir);
946 *pdir ++= NULL;
950 if (cpu_dir) {
951 remove_proc_entry(cpu_dir->name, palinfo_dir);
955 #ifdef CONFIG_HOTPLUG_CPU
956 static int palinfo_cpu_callback(struct notifier_block *nfb,
957 unsigned long action, void *hcpu)
959 unsigned int hotcpu = (unsigned long)hcpu;
961 switch (action) {
962 case CPU_ONLINE:
963 create_palinfo_proc_entries(hotcpu);
964 break;
965 case CPU_DEAD:
966 remove_palinfo_proc_entries(hotcpu);
967 break;
969 return NOTIFY_OK;
972 static struct notifier_block palinfo_cpu_notifier =
974 .notifier_call = palinfo_cpu_callback,
975 .priority = 0,
977 #endif
979 static int __init
980 palinfo_init(void)
982 int i = 0;
984 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
985 palinfo_dir = proc_mkdir("pal", NULL);
987 /* Create palinfo dirs in /proc for all online cpus */
988 for_each_online_cpu(i) {
989 create_palinfo_proc_entries(i);
992 /* Register for future delivery via notify registration */
993 register_hotcpu_notifier(&palinfo_cpu_notifier);
995 return 0;
998 static void __exit
999 palinfo_exit(void)
1001 int i = 0;
1003 /* remove all nodes: depth first pass. Could optimize this */
1004 for_each_online_cpu(i) {
1005 remove_palinfo_proc_entries(i);
1009 * Remove the top level entry finally
1011 remove_proc_entry(palinfo_dir->name, NULL);
1014 * Unregister from cpu notifier callbacks
1016 unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1019 module_init(palinfo_init);
1020 module_exit(palinfo_exit);