2 * Netburst Perfomance Events (P4, old Xeon)
4 * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
5 * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
7 * For licencing details see kernel-base/COPYING
10 #ifdef CONFIG_CPU_SUP_INTEL
12 #include <asm/perf_event_p4.h>
14 #define P4_CNTR_LIMIT 3
16 * array indices: 0,1 - HT threads, used with HT enabled cpu
18 struct p4_event_bind
{
19 unsigned int opcode
; /* Event code and ESCR selector */
20 unsigned int escr_msr
[2]; /* ESCR MSR for this event */
21 char cntr
[2][P4_CNTR_LIMIT
]; /* counter index (offset), -1 on abscence */
25 unsigned int metric_pebs
;
26 unsigned int metric_vert
;
29 /* it sets P4_PEBS_ENABLE_UOP_TAG as well */
30 #define P4_GEN_PEBS_BIND(name, pebs, vert) \
31 [P4_PEBS_METRIC__##name] = { \
32 .metric_pebs = pebs | P4_PEBS_ENABLE_UOP_TAG, \
33 .metric_vert = vert, \
37 * note we have P4_PEBS_ENABLE_UOP_TAG always set here
39 * it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
40 * event configuration to find out which values are to be
41 * written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
44 static struct p4_pebs_bind p4_pebs_bind_map
[] = {
45 P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired
, 0x0000001, 0x0000001),
46 P4_GEN_PEBS_BIND(2ndl_cache_load_miss_retired
, 0x0000002, 0x0000001),
47 P4_GEN_PEBS_BIND(dtlb_load_miss_retired
, 0x0000004, 0x0000001),
48 P4_GEN_PEBS_BIND(dtlb_store_miss_retired
, 0x0000004, 0x0000002),
49 P4_GEN_PEBS_BIND(dtlb_all_miss_retired
, 0x0000004, 0x0000003),
50 P4_GEN_PEBS_BIND(tagged_mispred_branch
, 0x0018000, 0x0000010),
51 P4_GEN_PEBS_BIND(mob_load_replay_retired
, 0x0000200, 0x0000001),
52 P4_GEN_PEBS_BIND(split_load_retired
, 0x0000400, 0x0000001),
53 P4_GEN_PEBS_BIND(split_store_retired
, 0x0000400, 0x0000002),
57 * Note that we don't use CCCR1 here, there is an
58 * exception for P4_BSQ_ALLOCATION but we just have
61 * consider this binding as resources which particular
62 * event may borrow, it doesn't contain EventMask,
63 * Tags and friends -- they are left to a caller
65 static struct p4_event_bind p4_event_bind_map
[] = {
66 [P4_EVENT_TC_DELIVER_MODE
] = {
67 .opcode
= P4_OPCODE(P4_EVENT_TC_DELIVER_MODE
),
68 .escr_msr
= { MSR_P4_TC_ESCR0
, MSR_P4_TC_ESCR1
},
69 .cntr
= { {4, 5, -1}, {6, 7, -1} },
71 [P4_EVENT_BPU_FETCH_REQUEST
] = {
72 .opcode
= P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST
),
73 .escr_msr
= { MSR_P4_BPU_ESCR0
, MSR_P4_BPU_ESCR1
},
74 .cntr
= { {0, -1, -1}, {2, -1, -1} },
76 [P4_EVENT_ITLB_REFERENCE
] = {
77 .opcode
= P4_OPCODE(P4_EVENT_ITLB_REFERENCE
),
78 .escr_msr
= { MSR_P4_ITLB_ESCR0
, MSR_P4_ITLB_ESCR1
},
79 .cntr
= { {0, -1, -1}, {2, -1, -1} },
81 [P4_EVENT_MEMORY_CANCEL
] = {
82 .opcode
= P4_OPCODE(P4_EVENT_MEMORY_CANCEL
),
83 .escr_msr
= { MSR_P4_DAC_ESCR0
, MSR_P4_DAC_ESCR1
},
84 .cntr
= { {8, 9, -1}, {10, 11, -1} },
86 [P4_EVENT_MEMORY_COMPLETE
] = {
87 .opcode
= P4_OPCODE(P4_EVENT_MEMORY_COMPLETE
),
88 .escr_msr
= { MSR_P4_SAAT_ESCR0
, MSR_P4_SAAT_ESCR1
},
89 .cntr
= { {8, 9, -1}, {10, 11, -1} },
91 [P4_EVENT_LOAD_PORT_REPLAY
] = {
92 .opcode
= P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY
),
93 .escr_msr
= { MSR_P4_SAAT_ESCR0
, MSR_P4_SAAT_ESCR1
},
94 .cntr
= { {8, 9, -1}, {10, 11, -1} },
96 [P4_EVENT_STORE_PORT_REPLAY
] = {
97 .opcode
= P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY
),
98 .escr_msr
= { MSR_P4_SAAT_ESCR0
, MSR_P4_SAAT_ESCR1
},
99 .cntr
= { {8, 9, -1}, {10, 11, -1} },
101 [P4_EVENT_MOB_LOAD_REPLAY
] = {
102 .opcode
= P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY
),
103 .escr_msr
= { MSR_P4_MOB_ESCR0
, MSR_P4_MOB_ESCR1
},
104 .cntr
= { {0, -1, -1}, {2, -1, -1} },
106 [P4_EVENT_PAGE_WALK_TYPE
] = {
107 .opcode
= P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE
),
108 .escr_msr
= { MSR_P4_PMH_ESCR0
, MSR_P4_PMH_ESCR1
},
109 .cntr
= { {0, -1, -1}, {2, -1, -1} },
111 [P4_EVENT_BSQ_CACHE_REFERENCE
] = {
112 .opcode
= P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE
),
113 .escr_msr
= { MSR_P4_BSU_ESCR0
, MSR_P4_BSU_ESCR1
},
114 .cntr
= { {0, -1, -1}, {2, -1, -1} },
116 [P4_EVENT_IOQ_ALLOCATION
] = {
117 .opcode
= P4_OPCODE(P4_EVENT_IOQ_ALLOCATION
),
118 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
119 .cntr
= { {0, -1, -1}, {2, -1, -1} },
121 [P4_EVENT_IOQ_ACTIVE_ENTRIES
] = { /* shared ESCR */
122 .opcode
= P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES
),
123 .escr_msr
= { MSR_P4_FSB_ESCR1
, MSR_P4_FSB_ESCR1
},
124 .cntr
= { {2, -1, -1}, {3, -1, -1} },
126 [P4_EVENT_FSB_DATA_ACTIVITY
] = {
127 .opcode
= P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY
),
128 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
129 .cntr
= { {0, -1, -1}, {2, -1, -1} },
131 [P4_EVENT_BSQ_ALLOCATION
] = { /* shared ESCR, broken CCCR1 */
132 .opcode
= P4_OPCODE(P4_EVENT_BSQ_ALLOCATION
),
133 .escr_msr
= { MSR_P4_BSU_ESCR0
, MSR_P4_BSU_ESCR0
},
134 .cntr
= { {0, -1, -1}, {1, -1, -1} },
136 [P4_EVENT_BSQ_ACTIVE_ENTRIES
] = { /* shared ESCR */
137 .opcode
= P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES
),
138 .escr_msr
= { MSR_P4_BSU_ESCR1
, MSR_P4_BSU_ESCR1
},
139 .cntr
= { {2, -1, -1}, {3, -1, -1} },
141 [P4_EVENT_SSE_INPUT_ASSIST
] = {
142 .opcode
= P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST
),
143 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
144 .cntr
= { {8, 9, -1}, {10, 11, -1} },
146 [P4_EVENT_PACKED_SP_UOP
] = {
147 .opcode
= P4_OPCODE(P4_EVENT_PACKED_SP_UOP
),
148 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
149 .cntr
= { {8, 9, -1}, {10, 11, -1} },
151 [P4_EVENT_PACKED_DP_UOP
] = {
152 .opcode
= P4_OPCODE(P4_EVENT_PACKED_DP_UOP
),
153 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
154 .cntr
= { {8, 9, -1}, {10, 11, -1} },
156 [P4_EVENT_SCALAR_SP_UOP
] = {
157 .opcode
= P4_OPCODE(P4_EVENT_SCALAR_SP_UOP
),
158 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
159 .cntr
= { {8, 9, -1}, {10, 11, -1} },
161 [P4_EVENT_SCALAR_DP_UOP
] = {
162 .opcode
= P4_OPCODE(P4_EVENT_SCALAR_DP_UOP
),
163 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
164 .cntr
= { {8, 9, -1}, {10, 11, -1} },
166 [P4_EVENT_64BIT_MMX_UOP
] = {
167 .opcode
= P4_OPCODE(P4_EVENT_64BIT_MMX_UOP
),
168 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
169 .cntr
= { {8, 9, -1}, {10, 11, -1} },
171 [P4_EVENT_128BIT_MMX_UOP
] = {
172 .opcode
= P4_OPCODE(P4_EVENT_128BIT_MMX_UOP
),
173 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
174 .cntr
= { {8, 9, -1}, {10, 11, -1} },
176 [P4_EVENT_X87_FP_UOP
] = {
177 .opcode
= P4_OPCODE(P4_EVENT_X87_FP_UOP
),
178 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
179 .cntr
= { {8, 9, -1}, {10, 11, -1} },
181 [P4_EVENT_TC_MISC
] = {
182 .opcode
= P4_OPCODE(P4_EVENT_TC_MISC
),
183 .escr_msr
= { MSR_P4_TC_ESCR0
, MSR_P4_TC_ESCR1
},
184 .cntr
= { {4, 5, -1}, {6, 7, -1} },
186 [P4_EVENT_GLOBAL_POWER_EVENTS
] = {
187 .opcode
= P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS
),
188 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
189 .cntr
= { {0, -1, -1}, {2, -1, -1} },
191 [P4_EVENT_TC_MS_XFER
] = {
192 .opcode
= P4_OPCODE(P4_EVENT_TC_MS_XFER
),
193 .escr_msr
= { MSR_P4_MS_ESCR0
, MSR_P4_MS_ESCR1
},
194 .cntr
= { {4, 5, -1}, {6, 7, -1} },
196 [P4_EVENT_UOP_QUEUE_WRITES
] = {
197 .opcode
= P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES
),
198 .escr_msr
= { MSR_P4_MS_ESCR0
, MSR_P4_MS_ESCR1
},
199 .cntr
= { {4, 5, -1}, {6, 7, -1} },
201 [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
] = {
202 .opcode
= P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
),
203 .escr_msr
= { MSR_P4_TBPU_ESCR0
, MSR_P4_TBPU_ESCR0
},
204 .cntr
= { {4, 5, -1}, {6, 7, -1} },
206 [P4_EVENT_RETIRED_BRANCH_TYPE
] = {
207 .opcode
= P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE
),
208 .escr_msr
= { MSR_P4_TBPU_ESCR0
, MSR_P4_TBPU_ESCR1
},
209 .cntr
= { {4, 5, -1}, {6, 7, -1} },
211 [P4_EVENT_RESOURCE_STALL
] = {
212 .opcode
= P4_OPCODE(P4_EVENT_RESOURCE_STALL
),
213 .escr_msr
= { MSR_P4_ALF_ESCR0
, MSR_P4_ALF_ESCR1
},
214 .cntr
= { {12, 13, 16}, {14, 15, 17} },
216 [P4_EVENT_WC_BUFFER
] = {
217 .opcode
= P4_OPCODE(P4_EVENT_WC_BUFFER
),
218 .escr_msr
= { MSR_P4_DAC_ESCR0
, MSR_P4_DAC_ESCR1
},
219 .cntr
= { {8, 9, -1}, {10, 11, -1} },
221 [P4_EVENT_B2B_CYCLES
] = {
222 .opcode
= P4_OPCODE(P4_EVENT_B2B_CYCLES
),
223 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
224 .cntr
= { {0, -1, -1}, {2, -1, -1} },
227 .opcode
= P4_OPCODE(P4_EVENT_BNR
),
228 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
229 .cntr
= { {0, -1, -1}, {2, -1, -1} },
232 .opcode
= P4_OPCODE(P4_EVENT_SNOOP
),
233 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
234 .cntr
= { {0, -1, -1}, {2, -1, -1} },
236 [P4_EVENT_RESPONSE
] = {
237 .opcode
= P4_OPCODE(P4_EVENT_RESPONSE
),
238 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
239 .cntr
= { {0, -1, -1}, {2, -1, -1} },
241 [P4_EVENT_FRONT_END_EVENT
] = {
242 .opcode
= P4_OPCODE(P4_EVENT_FRONT_END_EVENT
),
243 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
244 .cntr
= { {12, 13, 16}, {14, 15, 17} },
246 [P4_EVENT_EXECUTION_EVENT
] = {
247 .opcode
= P4_OPCODE(P4_EVENT_EXECUTION_EVENT
),
248 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
249 .cntr
= { {12, 13, 16}, {14, 15, 17} },
251 [P4_EVENT_REPLAY_EVENT
] = {
252 .opcode
= P4_OPCODE(P4_EVENT_REPLAY_EVENT
),
253 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
254 .cntr
= { {12, 13, 16}, {14, 15, 17} },
256 [P4_EVENT_INSTR_RETIRED
] = {
257 .opcode
= P4_OPCODE(P4_EVENT_INSTR_RETIRED
),
258 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
259 .cntr
= { {12, 13, 16}, {14, 15, 17} },
261 [P4_EVENT_UOPS_RETIRED
] = {
262 .opcode
= P4_OPCODE(P4_EVENT_UOPS_RETIRED
),
263 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
264 .cntr
= { {12, 13, 16}, {14, 15, 17} },
266 [P4_EVENT_UOP_TYPE
] = {
267 .opcode
= P4_OPCODE(P4_EVENT_UOP_TYPE
),
268 .escr_msr
= { MSR_P4_RAT_ESCR0
, MSR_P4_RAT_ESCR1
},
269 .cntr
= { {12, 13, 16}, {14, 15, 17} },
271 [P4_EVENT_BRANCH_RETIRED
] = {
272 .opcode
= P4_OPCODE(P4_EVENT_BRANCH_RETIRED
),
273 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
274 .cntr
= { {12, 13, 16}, {14, 15, 17} },
276 [P4_EVENT_MISPRED_BRANCH_RETIRED
] = {
277 .opcode
= P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED
),
278 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
279 .cntr
= { {12, 13, 16}, {14, 15, 17} },
281 [P4_EVENT_X87_ASSIST
] = {
282 .opcode
= P4_OPCODE(P4_EVENT_X87_ASSIST
),
283 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
284 .cntr
= { {12, 13, 16}, {14, 15, 17} },
286 [P4_EVENT_MACHINE_CLEAR
] = {
287 .opcode
= P4_OPCODE(P4_EVENT_MACHINE_CLEAR
),
288 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
289 .cntr
= { {12, 13, 16}, {14, 15, 17} },
291 [P4_EVENT_INSTR_COMPLETED
] = {
292 .opcode
= P4_OPCODE(P4_EVENT_INSTR_COMPLETED
),
293 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
294 .cntr
= { {12, 13, 16}, {14, 15, 17} },
298 #define P4_GEN_CACHE_EVENT(event, bit, metric) \
299 p4_config_pack_escr(P4_ESCR_EVENT(event) | \
300 P4_ESCR_EMASK_BIT(event, bit)) | \
301 p4_config_pack_cccr(metric | \
302 P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
304 static __initconst
const u64 p4_hw_cache_event_ids
305 [PERF_COUNT_HW_CACHE_MAX
]
306 [PERF_COUNT_HW_CACHE_OP_MAX
]
307 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
311 [ C(RESULT_ACCESS
) ] = 0x0,
312 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
313 P4_PEBS_METRIC__1stl_cache_load_miss_retired
),
318 [ C(RESULT_ACCESS
) ] = 0x0,
319 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
320 P4_PEBS_METRIC__2ndl_cache_load_miss_retired
),
325 [ C(RESULT_ACCESS
) ] = 0x0,
326 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
327 P4_PEBS_METRIC__dtlb_load_miss_retired
),
330 [ C(RESULT_ACCESS
) ] = 0x0,
331 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
332 P4_PEBS_METRIC__dtlb_store_miss_retired
),
337 [ C(RESULT_ACCESS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE
, HIT
,
338 P4_PEBS_METRIC__none
),
339 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE
, MISS
,
340 P4_PEBS_METRIC__none
),
343 [ C(RESULT_ACCESS
) ] = -1,
344 [ C(RESULT_MISS
) ] = -1,
346 [ C(OP_PREFETCH
) ] = {
347 [ C(RESULT_ACCESS
) ] = -1,
348 [ C(RESULT_MISS
) ] = -1,
353 static u64 p4_general_events
[PERF_COUNT_HW_MAX
] = {
354 /* non-halted CPU clocks */
355 [PERF_COUNT_HW_CPU_CYCLES
] =
356 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS
) |
357 P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS
, RUNNING
)),
360 * retired instructions
361 * in a sake of simplicity we don't use the FSB tagging
363 [PERF_COUNT_HW_INSTRUCTIONS
] =
364 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED
) |
365 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, NBOGUSNTAG
) |
366 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, BOGUSNTAG
)),
369 [PERF_COUNT_HW_CACHE_REFERENCES
] =
370 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE
) |
371 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITS
) |
372 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITE
) |
373 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITM
) |
374 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITS
) |
375 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITE
) |
376 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITM
)),
379 [PERF_COUNT_HW_CACHE_MISSES
] =
380 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE
) |
381 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_MISS
) |
382 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_MISS
) |
383 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, WR_2ndL_MISS
)),
385 /* branch instructions retired */
386 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] =
387 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE
) |
388 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, CONDITIONAL
) |
389 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, CALL
) |
390 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, RETURN
) |
391 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, INDIRECT
)),
393 /* mispredicted branches retired */
394 [PERF_COUNT_HW_BRANCH_MISSES
] =
395 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED
) |
396 P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED
, NBOGUS
)),
398 /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
399 [PERF_COUNT_HW_BUS_CYCLES
] =
400 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY
) |
401 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DRDY_DRV
) |
402 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DRDY_OWN
)) |
403 p4_config_pack_cccr(P4_CCCR_EDGE
| P4_CCCR_COMPARE
),
406 static struct p4_event_bind
*p4_config_get_bind(u64 config
)
408 unsigned int evnt
= p4_config_unpack_event(config
);
409 struct p4_event_bind
*bind
= NULL
;
411 if (evnt
< ARRAY_SIZE(p4_event_bind_map
))
412 bind
= &p4_event_bind_map
[evnt
];
417 static u64
p4_pmu_event_map(int hw_event
)
419 struct p4_event_bind
*bind
;
423 config
= p4_general_events
[hw_event
];
424 bind
= p4_config_get_bind(config
);
425 esel
= P4_OPCODE_ESEL(bind
->opcode
);
426 config
|= p4_config_pack_cccr(P4_CCCR_ESEL(esel
));
431 static int p4_validate_raw_event(struct perf_event
*event
)
435 /* user data may have out-of-bound event index */
436 v
= p4_config_unpack_event(event
->attr
.config
);
437 if (v
>= ARRAY_SIZE(p4_event_bind_map
)) {
438 pr_warning("P4 PMU: Unknown event code: %d\n", v
);
443 * it may have some screwed PEBS bits
445 if (p4_config_pebs_has(event
->attr
.config
, P4_PEBS_CONFIG_ENABLE
)) {
446 pr_warning("P4 PMU: PEBS are not supported yet\n");
449 v
= p4_config_unpack_metric(event
->attr
.config
);
450 if (v
>= ARRAY_SIZE(p4_pebs_bind_map
)) {
451 pr_warning("P4 PMU: Unknown metric code: %d\n", v
);
458 static int p4_hw_config(struct perf_event
*event
)
465 * the reason we use cpu that early is that: if we get scheduled
466 * first time on the same cpu -- we will not need swap thread
467 * specific flags in config (and will save some cpu cycles)
470 cccr
= p4_default_cccr_conf(cpu
);
471 escr
= p4_default_escr_conf(cpu
, event
->attr
.exclude_kernel
,
472 event
->attr
.exclude_user
);
473 event
->hw
.config
= p4_config_pack_escr(escr
) |
474 p4_config_pack_cccr(cccr
);
476 if (p4_ht_active() && p4_ht_thread(cpu
))
477 event
->hw
.config
= p4_set_ht_bit(event
->hw
.config
);
479 if (event
->attr
.type
== PERF_TYPE_RAW
) {
481 rc
= p4_validate_raw_event(event
);
486 * We don't control raw events so it's up to the caller
487 * to pass sane values (and we don't count the thread number
488 * on HT machine but allow HT-compatible specifics to be
491 * Note that for RAW events we allow user to use P4_CCCR_RESERVED
492 * bits since we keep additional info here (for cache events and etc)
494 * XXX: HT wide things should check perf_paranoid_cpu() &&
497 event
->hw
.config
|= event
->attr
.config
&
498 (p4_config_pack_escr(P4_ESCR_MASK_HT
) |
499 p4_config_pack_cccr(P4_CCCR_MASK_HT
| P4_CCCR_RESERVED
));
502 rc
= x86_setup_perfctr(event
);
508 static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event
*hwc
)
513 rdmsr(hwc
->config_base
+ hwc
->idx
, low
, high
);
515 /* we need to check high bit for unflagged overflows */
516 if ((low
& P4_CCCR_OVF
) || !(high
& (1 << 31))) {
518 (void)checking_wrmsrl(hwc
->config_base
+ hwc
->idx
,
519 ((u64
)low
) & ~P4_CCCR_OVF
);
525 static void p4_pmu_disable_pebs(void)
530 * It's still allowed that two threads setup same cache
531 * events so we can't simply clear metrics until we knew
532 * noone is depending on us, so we need kind of counter
533 * for "ReplayEvent" users.
535 * What is more complex -- RAW events, if user (for some
536 * reason) will pass some cache event metric with improper
537 * event opcode -- it's fine from hardware point of view
538 * but completely nonsence from "meaning" of such action.
540 * So at moment let leave metrics turned on forever -- it's
541 * ok for now but need to be revisited!
543 * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
544 * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
548 static inline void p4_pmu_disable_event(struct perf_event
*event
)
550 struct hw_perf_event
*hwc
= &event
->hw
;
553 * If event gets disabled while counter is in overflowed
554 * state we need to clear P4_CCCR_OVF, otherwise interrupt get
555 * asserted again and again
557 (void)checking_wrmsrl(hwc
->config_base
+ hwc
->idx
,
558 (u64
)(p4_config_unpack_cccr(hwc
->config
)) &
559 ~P4_CCCR_ENABLE
& ~P4_CCCR_OVF
& ~P4_CCCR_RESERVED
);
562 static void p4_pmu_disable_all(void)
564 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
567 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
568 struct perf_event
*event
= cpuc
->events
[idx
];
569 if (!test_bit(idx
, cpuc
->active_mask
))
571 p4_pmu_disable_event(event
);
574 p4_pmu_disable_pebs();
577 /* configuration must be valid */
578 static void p4_pmu_enable_pebs(u64 config
)
580 struct p4_pebs_bind
*bind
;
583 BUILD_BUG_ON(P4_PEBS_METRIC__max
> P4_PEBS_CONFIG_METRIC_MASK
);
585 idx
= p4_config_unpack_metric(config
);
586 if (idx
== P4_PEBS_METRIC__none
)
589 bind
= &p4_pebs_bind_map
[idx
];
591 (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE
, (u64
)bind
->metric_pebs
);
592 (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT
, (u64
)bind
->metric_vert
);
595 static void p4_pmu_enable_event(struct perf_event
*event
)
597 struct hw_perf_event
*hwc
= &event
->hw
;
598 int thread
= p4_ht_config_thread(hwc
->config
);
599 u64 escr_conf
= p4_config_unpack_escr(p4_clear_ht_bit(hwc
->config
));
600 unsigned int idx
= p4_config_unpack_event(hwc
->config
);
601 struct p4_event_bind
*bind
;
604 bind
= &p4_event_bind_map
[idx
];
605 escr_addr
= (u64
)bind
->escr_msr
[thread
];
608 * - we dont support cascaded counters yet
609 * - and counter 1 is broken (erratum)
611 WARN_ON_ONCE(p4_is_event_cascaded(hwc
->config
));
612 WARN_ON_ONCE(hwc
->idx
== 1);
614 /* we need a real Event value */
615 escr_conf
&= ~P4_ESCR_EVENT_MASK
;
616 escr_conf
|= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind
->opcode
));
618 cccr
= p4_config_unpack_cccr(hwc
->config
);
621 * it could be Cache event so we need to write metrics
622 * into additional MSRs
624 p4_pmu_enable_pebs(hwc
->config
);
626 (void)checking_wrmsrl(escr_addr
, escr_conf
);
627 (void)checking_wrmsrl(hwc
->config_base
+ hwc
->idx
,
628 (cccr
& ~P4_CCCR_RESERVED
) | P4_CCCR_ENABLE
);
631 static void p4_pmu_enable_all(int added
)
633 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
636 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
637 struct perf_event
*event
= cpuc
->events
[idx
];
638 if (!test_bit(idx
, cpuc
->active_mask
))
640 p4_pmu_enable_event(event
);
644 static int p4_pmu_handle_irq(struct pt_regs
*regs
)
646 struct perf_sample_data data
;
647 struct cpu_hw_events
*cpuc
;
648 struct perf_event
*event
;
649 struct hw_perf_event
*hwc
;
650 int idx
, handled
= 0;
656 cpuc
= &__get_cpu_var(cpu_hw_events
);
658 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
660 if (!test_bit(idx
, cpuc
->active_mask
))
663 event
= cpuc
->events
[idx
];
666 WARN_ON_ONCE(hwc
->idx
!= idx
);
668 /* it might be unflagged overflow */
669 handled
= p4_pmu_clear_cccr_ovf(hwc
);
671 val
= x86_perf_event_update(event
);
672 if (!handled
&& (val
& (1ULL << (x86_pmu
.cntval_bits
- 1))))
675 /* event overflow for sure */
676 data
.period
= event
->hw
.last_period
;
678 if (!x86_perf_event_set_period(event
))
680 if (perf_event_overflow(event
, 1, &data
, regs
))
681 p4_pmu_disable_event(event
);
685 /* p4 quirk: unmask it again */
686 apic_write(APIC_LVTPC
, apic_read(APIC_LVTPC
) & ~APIC_LVT_MASKED
);
687 inc_irq_stat(apic_perf_irqs
);
694 * swap thread specific fields according to a thread
695 * we are going to run on
697 static void p4_pmu_swap_config_ts(struct hw_perf_event
*hwc
, int cpu
)
702 * we either lucky and continue on same cpu or no HT support
704 if (!p4_should_swap_ts(hwc
->config
, cpu
))
708 * the event is migrated from an another logical
709 * cpu, so we need to swap thread specific flags
712 escr
= p4_config_unpack_escr(hwc
->config
);
713 cccr
= p4_config_unpack_cccr(hwc
->config
);
715 if (p4_ht_thread(cpu
)) {
716 cccr
&= ~P4_CCCR_OVF_PMI_T0
;
717 cccr
|= P4_CCCR_OVF_PMI_T1
;
718 if (escr
& P4_ESCR_T0_OS
) {
719 escr
&= ~P4_ESCR_T0_OS
;
720 escr
|= P4_ESCR_T1_OS
;
722 if (escr
& P4_ESCR_T0_USR
) {
723 escr
&= ~P4_ESCR_T0_USR
;
724 escr
|= P4_ESCR_T1_USR
;
726 hwc
->config
= p4_config_pack_escr(escr
);
727 hwc
->config
|= p4_config_pack_cccr(cccr
);
728 hwc
->config
|= P4_CONFIG_HT
;
730 cccr
&= ~P4_CCCR_OVF_PMI_T1
;
731 cccr
|= P4_CCCR_OVF_PMI_T0
;
732 if (escr
& P4_ESCR_T1_OS
) {
733 escr
&= ~P4_ESCR_T1_OS
;
734 escr
|= P4_ESCR_T0_OS
;
736 if (escr
& P4_ESCR_T1_USR
) {
737 escr
&= ~P4_ESCR_T1_USR
;
738 escr
|= P4_ESCR_T0_USR
;
740 hwc
->config
= p4_config_pack_escr(escr
);
741 hwc
->config
|= p4_config_pack_cccr(cccr
);
742 hwc
->config
&= ~P4_CONFIG_HT
;
747 * ESCR address hashing is tricky, ESCRs are not sequential
748 * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
749 * the metric between any ESCRs is laid in range [0xa0,0xe1]
751 * so we make ~70% filled hashtable
754 #define P4_ESCR_MSR_BASE 0x000003a0
755 #define P4_ESCR_MSR_MAX 0x000003e1
756 #define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
757 #define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
758 #define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr
760 static const unsigned int p4_escr_table
[P4_ESCR_MSR_TABLE_SIZE
] = {
761 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0
),
762 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1
),
763 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0
),
764 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1
),
765 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0
),
766 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1
),
767 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0
),
768 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1
),
769 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2
),
770 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3
),
771 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4
),
772 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5
),
773 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0
),
774 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1
),
775 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0
),
776 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1
),
777 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0
),
778 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1
),
779 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0
),
780 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1
),
781 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0
),
782 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1
),
783 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0
),
784 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1
),
785 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0
),
786 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1
),
787 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0
),
788 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1
),
789 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0
),
790 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1
),
791 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0
),
792 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1
),
793 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0
),
794 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1
),
795 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0
),
796 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1
),
797 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0
),
798 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1
),
799 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0
),
800 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1
),
801 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0
),
802 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1
),
803 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0
),
804 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1
),
805 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0
),
806 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1
),
809 static int p4_get_escr_idx(unsigned int addr
)
811 unsigned int idx
= P4_ESCR_MSR_IDX(addr
);
813 if (unlikely(idx
>= P4_ESCR_MSR_TABLE_SIZE
||
814 !p4_escr_table
[idx
] ||
815 p4_escr_table
[idx
] != addr
)) {
816 WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr
);
823 static int p4_next_cntr(int thread
, unsigned long *used_mask
,
824 struct p4_event_bind
*bind
)
828 for (i
= 0; i
< P4_CNTR_LIMIT
; i
++) {
829 j
= bind
->cntr
[thread
][i
];
830 if (j
!= -1 && !test_bit(j
, used_mask
))
837 static int p4_pmu_schedule_events(struct cpu_hw_events
*cpuc
, int n
, int *assign
)
839 unsigned long used_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
840 unsigned long escr_mask
[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE
)];
841 int cpu
= smp_processor_id();
842 struct hw_perf_event
*hwc
;
843 struct p4_event_bind
*bind
;
844 unsigned int i
, thread
, num
;
845 int cntr_idx
, escr_idx
;
847 bitmap_zero(used_mask
, X86_PMC_IDX_MAX
);
848 bitmap_zero(escr_mask
, P4_ESCR_MSR_TABLE_SIZE
);
850 for (i
= 0, num
= n
; i
< n
; i
++, num
--) {
852 hwc
= &cpuc
->event_list
[i
]->hw
;
853 thread
= p4_ht_thread(cpu
);
854 bind
= p4_config_get_bind(hwc
->config
);
855 escr_idx
= p4_get_escr_idx(bind
->escr_msr
[thread
]);
856 if (unlikely(escr_idx
== -1))
859 if (hwc
->idx
!= -1 && !p4_should_swap_ts(hwc
->config
, cpu
)) {
862 assign
[i
] = hwc
->idx
;
866 cntr_idx
= p4_next_cntr(thread
, used_mask
, bind
);
867 if (cntr_idx
== -1 || test_bit(escr_idx
, escr_mask
))
870 p4_pmu_swap_config_ts(hwc
, cpu
);
872 assign
[i
] = cntr_idx
;
874 set_bit(cntr_idx
, used_mask
);
875 set_bit(escr_idx
, escr_mask
);
879 return num
? -ENOSPC
: 0;
882 static __initconst
const struct x86_pmu p4_pmu
= {
883 .name
= "Netburst P4/Xeon",
884 .handle_irq
= p4_pmu_handle_irq
,
885 .disable_all
= p4_pmu_disable_all
,
886 .enable_all
= p4_pmu_enable_all
,
887 .enable
= p4_pmu_enable_event
,
888 .disable
= p4_pmu_disable_event
,
889 .eventsel
= MSR_P4_BPU_CCCR0
,
890 .perfctr
= MSR_P4_BPU_PERFCTR0
,
891 .event_map
= p4_pmu_event_map
,
892 .max_events
= ARRAY_SIZE(p4_general_events
),
893 .get_event_constraints
= x86_get_event_constraints
,
895 * IF HT disabled we may need to use all
896 * ARCH_P4_MAX_CCCR counters simulaneously
897 * though leave it restricted at moment assuming
900 .num_counters
= ARCH_P4_MAX_CCCR
,
903 .cntval_mask
= (1ULL << 40) - 1,
904 .max_period
= (1ULL << 39) - 1,
905 .hw_config
= p4_hw_config
,
906 .schedule_events
= p4_pmu_schedule_events
,
908 * This handles erratum N15 in intel doc 249199-029,
909 * the counter may not be updated correctly on write
910 * so we need a second write operation to do the trick
911 * (the official workaround didn't work)
913 * the former idea is taken from OProfile code
915 .perfctr_second_write
= 1,
918 static __init
int p4_pmu_init(void)
920 unsigned int low
, high
;
922 /* If we get stripped -- indexig fails */
923 BUILD_BUG_ON(ARCH_P4_MAX_CCCR
> X86_PMC_MAX_GENERIC
);
925 rdmsr(MSR_IA32_MISC_ENABLE
, low
, high
);
926 if (!(low
& (1 << 7))) {
927 pr_cont("unsupported Netburst CPU model %d ",
928 boot_cpu_data
.x86_model
);
932 memcpy(hw_cache_event_ids
, p4_hw_cache_event_ids
,
933 sizeof(hw_cache_event_ids
));
935 pr_cont("Netburst events, ");
942 #endif /* CONFIG_CPU_SUP_INTEL */