2 * Cell Broadband Engine OProfile Support
4 * (C) Copyright IBM Corporation 2006
6 * Author: David Erb (djerb@us.ibm.com)
8 * Carl Love <carll@us.ibm.com>
9 * Maynard Johnson <maynardj@us.ibm.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/cpufreq.h>
18 #include <linux/delay.h>
19 #include <linux/init.h>
20 #include <linux/jiffies.h>
21 #include <linux/kthread.h>
22 #include <linux/oprofile.h>
23 #include <linux/percpu.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/timer.h>
27 #include <asm/cell-pmu.h>
28 #include <asm/cputable.h>
29 #include <asm/firmware.h>
31 #include <asm/oprofile_impl.h>
32 #include <asm/processor.h>
34 #include <asm/ptrace.h>
37 #include <asm/system.h>
39 #include "../platforms/cell/interrupt.h"
41 #define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
42 #define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
44 #define NUM_THREADS 2 /* number of physical threads in
47 #define NUM_TRACE_BUS_WORDS 4
48 #define NUM_INPUT_BUS_WORDS 2
51 struct pmc_cntrl_data
{
55 unsigned long enabled
;
59 * ibm,cbe-perftools rtas parameters
63 u16 cpu
; /* Processor to modify */
64 u16 sub_unit
; /* hw subunit this applies to (if applicable) */
65 u16 signal_group
; /* Signal Group to Enable/Disable */
66 u8 bus_word
; /* Enable/Disable on this Trace/Trigger/Event
67 * Bus Word(s) (bitmask)
69 u8 bit
; /* Trigger/Event bit (if applicable) */
78 SUBFUNC_DEACTIVATE
= 3,
95 u32 debug_bus_control
;
96 struct pm_cntrl pm_cntrl
;
97 u32 pm07_cntrl
[NR_PHYS_CTRS
];
100 #define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
101 #define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
102 #define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
103 #define GET_POLARITY(x) ((x & 0x00000002) >> 1)
104 #define GET_COUNT_CYCLES(x) (x & 0x00000001)
105 #define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
107 static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS
], pmc_values
);
109 static struct pmc_cntrl_data pmc_cntrl
[NUM_THREADS
][NR_PHYS_CTRS
];
111 /* Interpetation of hdw_thread:
112 * 0 - even virtual cpus 0, 2, 4,...
113 * 1 - odd virtual cpus 1, 3, 5, ...
115 static u32 hdw_thread
;
117 static u32 virt_cntr_inter_mask
;
118 static struct timer_list timer_virt_cntr
;
120 /* pm_signal needs to be global since it is initialized in
121 * cell_reg_setup at the time when the necessary information
124 static struct pm_signal pm_signal
[NR_PHYS_CTRS
];
125 static int pm_rtas_token
;
127 static u32 reset_value
[NR_PHYS_CTRS
];
128 static int num_counters
;
129 static int oprofile_running
;
130 static spinlock_t virt_cntr_lock
= SPIN_LOCK_UNLOCKED
;
132 static u32 ctr_enabled
;
134 static unsigned char trace_bus
[NUM_TRACE_BUS_WORDS
];
135 static unsigned char input_bus
[NUM_INPUT_BUS_WORDS
];
138 * Firmware interface functions
141 rtas_ibm_cbe_perftools(int subfunc
, int passthru
,
142 void *address
, unsigned long length
)
144 u64 paddr
= __pa(address
);
146 return rtas_call(pm_rtas_token
, 5, 1, NULL
, subfunc
, passthru
,
147 paddr
>> 32, paddr
& 0xffffffff, length
);
150 static void pm_rtas_reset_signals(u32 node
)
153 struct pm_signal pm_signal_local
;
155 /* The debug bus is being set to the passthru disable state.
156 * However, the FW still expects atleast one legal signal routing
157 * entry or it will return an error on the arguments. If we don't
158 * supply a valid entry, we must ignore all return values. Ignoring
159 * all return values means we might miss an error we should be
163 /* fw expects physical cpu #. */
164 pm_signal_local
.cpu
= node
;
165 pm_signal_local
.signal_group
= 21;
166 pm_signal_local
.bus_word
= 1;
167 pm_signal_local
.sub_unit
= 0;
168 pm_signal_local
.bit
= 0;
170 ret
= rtas_ibm_cbe_perftools(SUBFUNC_RESET
, PASSTHRU_DISABLE
,
172 sizeof(struct pm_signal
));
175 printk(KERN_WARNING
"%s: rtas returned: %d\n",
179 static void pm_rtas_activate_signals(u32 node
, u32 count
)
183 struct pm_signal pm_signal_local
[NR_PHYS_CTRS
];
185 for (j
= 0; j
< count
; j
++) {
186 /* fw expects physical cpu # */
187 pm_signal_local
[j
].cpu
= node
;
188 pm_signal_local
[j
].signal_group
189 = pm_signal
[j
].signal_group
;
190 pm_signal_local
[j
].bus_word
= pm_signal
[j
].bus_word
;
191 pm_signal_local
[j
].sub_unit
= pm_signal
[j
].sub_unit
;
192 pm_signal_local
[j
].bit
= pm_signal
[j
].bit
;
195 ret
= rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE
, PASSTHRU_ENABLE
,
197 count
* sizeof(struct pm_signal
));
200 printk(KERN_WARNING
"%s: rtas returned: %d\n",
205 * PM Signal functions
207 static void set_pm_event(u32 ctr
, int event
, u32 unit_mask
)
211 u32 bus_word
, bus_type
, count_cycles
, polarity
, input_control
;
214 if (event
== PPU_CYCLES_EVENT_NUM
) {
215 /* Special Event: Count all cpu cycles */
216 pm_regs
.pm07_cntrl
[ctr
] = CBE_COUNT_ALL_CYCLES
;
217 p
= &(pm_signal
[ctr
]);
218 p
->signal_group
= 21;
224 pm_regs
.pm07_cntrl
[ctr
] = 0;
227 bus_word
= GET_BUS_WORD(unit_mask
);
228 bus_type
= GET_BUS_TYPE(unit_mask
);
229 count_cycles
= GET_COUNT_CYCLES(unit_mask
);
230 polarity
= GET_POLARITY(unit_mask
);
231 input_control
= GET_INPUT_CONTROL(unit_mask
);
232 signal_bit
= (event
% 100);
234 p
= &(pm_signal
[ctr
]);
236 p
->signal_group
= event
/ 100;
237 p
->bus_word
= bus_word
;
238 p
->sub_unit
= (unit_mask
& 0x0000f000) >> 12;
240 pm_regs
.pm07_cntrl
[ctr
] = 0;
241 pm_regs
.pm07_cntrl
[ctr
] |= PM07_CTR_COUNT_CYCLES(count_cycles
);
242 pm_regs
.pm07_cntrl
[ctr
] |= PM07_CTR_POLARITY(polarity
);
243 pm_regs
.pm07_cntrl
[ctr
] |= PM07_CTR_INPUT_CONTROL(input_control
);
245 /* Some of the islands signal selection is based on 64 bit words.
246 * The debug bus words are 32 bits, the input words to the performance
247 * counters are defined as 32 bits. Need to convert the 64 bit island
248 * specification to the appropriate 32 input bit and bus word for the
249 * performance counter event selection. See the CELL Performance
250 * monitoring signals manual and the Perf cntr hardware descriptions
253 if (input_control
== 0) {
254 if (signal_bit
> 31) {
258 else if (bus_word
== 0xc)
262 if ((bus_type
== 0) && p
->signal_group
>= 60)
264 if ((bus_type
== 1) && p
->signal_group
>= 50)
267 pm_regs
.pm07_cntrl
[ctr
] |= PM07_CTR_INPUT_MUX(signal_bit
);
269 pm_regs
.pm07_cntrl
[ctr
] = 0;
273 for (i
= 0; i
< NUM_TRACE_BUS_WORDS
; i
++) {
274 if (bus_word
& (1 << i
)) {
275 pm_regs
.debug_bus_control
|=
276 (bus_type
<< (31 - (2 * i
) + 1));
278 for (j
= 0; j
< NUM_INPUT_BUS_WORDS
; j
++) {
279 if (input_bus
[j
] == 0xff) {
281 pm_regs
.group_control
|=
292 static void write_pm_cntrl(int cpu
)
294 /* Oprofile will use 32 bit counters, set bits 7:10 to 0
295 * pmregs.pm_cntrl is a global
299 if (pm_regs
.pm_cntrl
.enable
== 1)
300 val
|= CBE_PM_ENABLE_PERF_MON
;
302 if (pm_regs
.pm_cntrl
.stop_at_max
== 1)
303 val
|= CBE_PM_STOP_AT_MAX
;
305 if (pm_regs
.pm_cntrl
.trace_mode
== 1)
306 val
|= CBE_PM_TRACE_MODE_SET(pm_regs
.pm_cntrl
.trace_mode
);
308 if (pm_regs
.pm_cntrl
.freeze
== 1)
309 val
|= CBE_PM_FREEZE_ALL_CTRS
;
311 /* Routine set_count_mode must be called previously to set
312 * the count mode based on the user selection of user and kernel.
314 val
|= CBE_PM_COUNT_MODE_SET(pm_regs
.pm_cntrl
.count_mode
);
315 cbe_write_pm(cpu
, pm_control
, val
);
319 set_count_mode(u32 kernel
, u32 user
)
321 /* The user must specify user and kernel if they want them. If
322 * neither is specified, OProfile will count in hypervisor mode.
323 * pm_regs.pm_cntrl is a global
327 pm_regs
.pm_cntrl
.count_mode
= CBE_COUNT_ALL_MODES
;
329 pm_regs
.pm_cntrl
.count_mode
=
330 CBE_COUNT_SUPERVISOR_MODE
;
333 pm_regs
.pm_cntrl
.count_mode
= CBE_COUNT_PROBLEM_MODE
;
335 pm_regs
.pm_cntrl
.count_mode
=
336 CBE_COUNT_HYPERVISOR_MODE
;
340 static inline void enable_ctr(u32 cpu
, u32 ctr
, u32
* pm07_cntrl
)
343 pm07_cntrl
[ctr
] |= CBE_PM_CTR_ENABLE
;
344 cbe_write_pm07_control(cpu
, ctr
, pm07_cntrl
[ctr
]);
348 * Oprofile is expected to collect data on all CPUs simultaneously.
349 * However, there is one set of performance counters per node. There are
350 * two hardware threads or virtual CPUs on each node. Hence, OProfile must
351 * multiplex in time the performance counter collection on the two virtual
352 * CPUs. The multiplexing of the performance counters is done by this
353 * virtual counter routine.
355 * The pmc_values used below is defined as 'per-cpu' but its use is
356 * more akin to 'per-node'. We need to store two sets of counter
357 * values per node -- one for the previous run and one for the next.
358 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
359 * pair of per-cpu arrays is used for storing the previous and next
360 * pmc values for a given node.
361 * NOTE: We use the per-cpu variable to improve cache performance.
363 static void cell_virtual_cntr(unsigned long data
)
365 /* This routine will alternate loading the virtual counters for
368 int i
, prev_hdw_thread
, next_hdw_thread
;
372 /* Make sure that the interrupt_hander and
373 * the virt counter are not both playing with
374 * the counters on the same node.
377 spin_lock_irqsave(&virt_cntr_lock
, flags
);
379 prev_hdw_thread
= hdw_thread
;
381 /* switch the cpu handling the interrupts */
382 hdw_thread
= 1 ^ hdw_thread
;
383 next_hdw_thread
= hdw_thread
;
385 for (i
= 0; i
< num_counters
; i
++)
386 /* There are some per thread events. Must do the
387 * set event, for the thread that is being started
390 pmc_cntrl
[next_hdw_thread
][i
].evnts
,
391 pmc_cntrl
[next_hdw_thread
][i
].masks
);
393 /* The following is done only once per each node, but
394 * we need cpu #, not node #, to pass to the cbe_xxx functions.
396 for_each_online_cpu(cpu
) {
397 if (cbe_get_hw_thread_id(cpu
))
400 /* stop counters, save counter values, restore counts
401 * for previous thread
404 cbe_disable_pm_interrupts(cpu
);
405 for (i
= 0; i
< num_counters
; i
++) {
406 per_cpu(pmc_values
, cpu
+ prev_hdw_thread
)[i
]
407 = cbe_read_ctr(cpu
, i
);
409 if (per_cpu(pmc_values
, cpu
+ next_hdw_thread
)[i
]
411 /* If the cntr value is 0xffffffff, we must
412 * reset that to 0xfffffff0 when the current
413 * thread is restarted. This will generate a
414 * new interrupt and make sure that we never
415 * restore the counters to the max value. If
416 * the counters were restored to the max value,
417 * they do not increment and no interrupts are
418 * generated. Hence no more samples will be
419 * collected on that cpu.
421 cbe_write_ctr(cpu
, i
, 0xFFFFFFF0);
423 cbe_write_ctr(cpu
, i
,
426 next_hdw_thread
)[i
]);
429 /* Switch to the other thread. Change the interrupt
430 * and control regs to be scheduled on the CPU
431 * corresponding to the thread to execute.
433 for (i
= 0; i
< num_counters
; i
++) {
434 if (pmc_cntrl
[next_hdw_thread
][i
].enabled
) {
435 /* There are some per thread events.
436 * Must do the set event, enable_cntr
442 cbe_write_pm07_control(cpu
, i
, 0);
446 /* Enable interrupts on the CPU thread that is starting */
447 cbe_enable_pm_interrupts(cpu
, next_hdw_thread
,
448 virt_cntr_inter_mask
);
452 spin_unlock_irqrestore(&virt_cntr_lock
, flags
);
454 mod_timer(&timer_virt_cntr
, jiffies
+ HZ
/ 10);
457 static void start_virt_cntrs(void)
459 init_timer(&timer_virt_cntr
);
460 timer_virt_cntr
.function
= cell_virtual_cntr
;
461 timer_virt_cntr
.data
= 0UL;
462 timer_virt_cntr
.expires
= jiffies
+ HZ
/ 10;
463 add_timer(&timer_virt_cntr
);
466 /* This function is called once for all cpus combined */
468 cell_reg_setup(struct op_counter_config
*ctr
,
469 struct op_system_config
*sys
, int num_ctrs
)
473 pm_rtas_token
= rtas_token("ibm,cbe-perftools");
474 if (pm_rtas_token
== RTAS_UNKNOWN_SERVICE
) {
475 printk(KERN_WARNING
"%s: RTAS_UNKNOWN_SERVICE\n",
480 num_counters
= num_ctrs
;
482 pm_regs
.group_control
= 0;
483 pm_regs
.debug_bus_control
= 0;
485 /* setup the pm_control register */
486 memset(&pm_regs
.pm_cntrl
, 0, sizeof(struct pm_cntrl
));
487 pm_regs
.pm_cntrl
.stop_at_max
= 1;
488 pm_regs
.pm_cntrl
.trace_mode
= 0;
489 pm_regs
.pm_cntrl
.freeze
= 1;
491 set_count_mode(sys
->enable_kernel
, sys
->enable_user
);
493 /* Setup the thread 0 events */
494 for (i
= 0; i
< num_ctrs
; ++i
) {
496 pmc_cntrl
[0][i
].evnts
= ctr
[i
].event
;
497 pmc_cntrl
[0][i
].masks
= ctr
[i
].unit_mask
;
498 pmc_cntrl
[0][i
].enabled
= ctr
[i
].enabled
;
499 pmc_cntrl
[0][i
].vcntr
= i
;
501 for_each_possible_cpu(j
)
502 per_cpu(pmc_values
, j
)[i
] = 0;
505 /* Setup the thread 1 events, map the thread 0 event to the
506 * equivalent thread 1 event.
508 for (i
= 0; i
< num_ctrs
; ++i
) {
509 if ((ctr
[i
].event
>= 2100) && (ctr
[i
].event
<= 2111))
510 pmc_cntrl
[1][i
].evnts
= ctr
[i
].event
+ 19;
511 else if (ctr
[i
].event
== 2203)
512 pmc_cntrl
[1][i
].evnts
= ctr
[i
].event
;
513 else if ((ctr
[i
].event
>= 2200) && (ctr
[i
].event
<= 2215))
514 pmc_cntrl
[1][i
].evnts
= ctr
[i
].event
+ 16;
516 pmc_cntrl
[1][i
].evnts
= ctr
[i
].event
;
518 pmc_cntrl
[1][i
].masks
= ctr
[i
].unit_mask
;
519 pmc_cntrl
[1][i
].enabled
= ctr
[i
].enabled
;
520 pmc_cntrl
[1][i
].vcntr
= i
;
523 for (i
= 0; i
< NUM_TRACE_BUS_WORDS
; i
++)
526 for (i
= 0; i
< NUM_INPUT_BUS_WORDS
; i
++)
529 /* Our counters count up, and "count" refers to
530 * how much before the next interrupt, and we interrupt
531 * on overflow. So we calculate the starting value
532 * which will give us "count" until overflow.
533 * Then we set the events on the enabled counters.
535 for (i
= 0; i
< num_counters
; ++i
) {
536 /* start with virtual counter set 0 */
537 if (pmc_cntrl
[0][i
].enabled
) {
538 /* Using 32bit counters, reset max - count */
539 reset_value
[i
] = 0xFFFFFFFF - ctr
[i
].count
;
541 pmc_cntrl
[0][i
].evnts
,
542 pmc_cntrl
[0][i
].masks
);
544 /* global, used by cell_cpu_setup */
545 ctr_enabled
|= (1 << i
);
549 /* initialize the previous counts for the virtual cntrs */
550 for_each_online_cpu(cpu
)
551 for (i
= 0; i
< num_counters
; ++i
) {
552 per_cpu(pmc_values
, cpu
)[i
] = reset_value
[i
];
558 /* This function is called once for each cpu */
559 static void cell_cpu_setup(struct op_counter_config
*cntr
)
561 u32 cpu
= smp_processor_id();
565 /* There is one performance monitor per processor chip (i.e. node),
566 * so we only need to perform this function once per node.
568 if (cbe_get_hw_thread_id(cpu
))
571 if (pm_rtas_token
== RTAS_UNKNOWN_SERVICE
) {
572 printk(KERN_WARNING
"%s: RTAS_UNKNOWN_SERVICE\n",
577 /* Stop all counters */
579 cbe_disable_pm_interrupts(cpu
);
581 cbe_write_pm(cpu
, pm_interval
, 0);
582 cbe_write_pm(cpu
, pm_start_stop
, 0);
583 cbe_write_pm(cpu
, group_control
, pm_regs
.group_control
);
584 cbe_write_pm(cpu
, debug_bus_control
, pm_regs
.debug_bus_control
);
587 for (i
= 0; i
< num_counters
; ++i
) {
588 if (ctr_enabled
& (1 << i
)) {
589 pm_signal
[num_enabled
].cpu
= cbe_cpu_to_node(cpu
);
594 pm_rtas_activate_signals(cbe_cpu_to_node(cpu
), num_enabled
);
599 static void cell_global_start(struct op_counter_config
*ctr
)
602 u32 interrupt_mask
= 0;
605 /* This routine gets called once for the system.
606 * There is one performance monitor per node, so we
607 * only need to perform this function once per node.
609 for_each_online_cpu(cpu
) {
610 if (cbe_get_hw_thread_id(cpu
))
615 for (i
= 0; i
< num_counters
; ++i
) {
616 if (ctr_enabled
& (1 << i
)) {
617 cbe_write_ctr(cpu
, i
, reset_value
[i
]);
618 enable_ctr(cpu
, i
, pm_regs
.pm07_cntrl
);
620 CBE_PM_CTR_OVERFLOW_INTR(i
);
622 /* Disable counter */
623 cbe_write_pm07_control(cpu
, i
, 0);
627 cbe_get_and_clear_pm_interrupts(cpu
);
628 cbe_enable_pm_interrupts(cpu
, hdw_thread
, interrupt_mask
);
632 virt_cntr_inter_mask
= interrupt_mask
;
633 oprofile_running
= 1;
636 /* NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
637 * executed which manipulates the PMU. We start the "virtual counter"
638 * here so that we do not need to synchronize access to the PMU in
639 * the above for-loop.
644 static void cell_global_stop(void)
648 /* This routine will be called once for the system.
649 * There is one performance monitor per node, so we
650 * only need to perform this function once per node.
652 del_timer_sync(&timer_virt_cntr
);
653 oprofile_running
= 0;
656 for_each_online_cpu(cpu
) {
657 if (cbe_get_hw_thread_id(cpu
))
660 cbe_sync_irq(cbe_cpu_to_node(cpu
));
661 /* Stop the counters */
664 /* Deactivate the signals */
665 pm_rtas_reset_signals(cbe_cpu_to_node(cpu
));
667 /* Deactivate interrupts */
668 cbe_disable_pm_interrupts(cpu
);
673 cell_handle_interrupt(struct pt_regs
*regs
, struct op_counter_config
*ctr
)
678 unsigned long flags
= 0;
682 cpu
= smp_processor_id();
684 /* Need to make sure the interrupt handler and the virt counter
685 * routine are not running at the same time. See the
686 * cell_virtual_cntr() routine for additional comments.
688 spin_lock_irqsave(&virt_cntr_lock
, flags
);
690 /* Need to disable and reenable the performance counters
691 * to get the desired behavior from the hardware. This
692 * is hardware specific.
697 interrupt_mask
= cbe_get_and_clear_pm_interrupts(cpu
);
699 /* If the interrupt mask has been cleared, then the virt cntr
700 * has cleared the interrupt. When the thread that generated
701 * the interrupt is restored, the data count will be restored to
702 * 0xffffff0 to cause the interrupt to be regenerated.
705 if ((oprofile_running
== 1) && (interrupt_mask
!= 0)) {
707 is_kernel
= is_kernel_addr(pc
);
709 for (i
= 0; i
< num_counters
; ++i
) {
710 if ((interrupt_mask
& CBE_PM_CTR_OVERFLOW_INTR(i
))
712 oprofile_add_pc(pc
, is_kernel
, i
);
713 cbe_write_ctr(cpu
, i
, reset_value
[i
]);
717 /* The counters were frozen by the interrupt.
718 * Reenable the interrupt and restart the counters.
719 * If there was a race between the interrupt handler and
720 * the virtual counter routine. The virutal counter
721 * routine may have cleared the interrupts. Hence must
722 * use the virt_cntr_inter_mask to re-enable the interrupts.
724 cbe_enable_pm_interrupts(cpu
, hdw_thread
,
725 virt_cntr_inter_mask
);
727 /* The writes to the various performance counters only writes
728 * to a latch. The new values (interrupt setting bits, reset
729 * counter value etc.) are not copied to the actual registers
730 * until the performance monitor is enabled. In order to get
731 * this to work as desired, the permormance monitor needs to
732 * be disabled while writting to the latches. This is a
737 spin_unlock_irqrestore(&virt_cntr_lock
, flags
);
740 struct op_powerpc_model op_model_cell
= {
741 .reg_setup
= cell_reg_setup
,
742 .cpu_setup
= cell_cpu_setup
,
743 .global_start
= cell_global_start
,
744 .global_stop
= cell_global_stop
,
745 .handle_interrupt
= cell_handle_interrupt
,