2 * Netburst Perfomance Events (P4, old Xeon)
4 * Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
5 * Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
7 * For licencing details see kernel-base/COPYING
10 #ifdef CONFIG_CPU_SUP_INTEL
12 #include <asm/perf_event_p4.h>
14 #define P4_CNTR_LIMIT 3
16 * array indices: 0,1 - HT threads, used with HT enabled cpu
18 struct p4_event_bind
{
19 unsigned int opcode
; /* Event code and ESCR selector */
20 unsigned int escr_msr
[2]; /* ESCR MSR for this event */
21 char cntr
[2][P4_CNTR_LIMIT
]; /* counter index (offset), -1 on abscence */
24 struct p4_cache_event_bind
{
25 unsigned int metric_pebs
;
26 unsigned int metric_vert
;
29 #define P4_GEN_CACHE_EVENT_BIND(name) \
30 [P4_CACHE__##name] = { \
31 .metric_pebs = P4_PEBS__##name, \
32 .metric_vert = P4_VERT__##name, \
35 static struct p4_cache_event_bind p4_cache_event_bind_map
[] = {
36 P4_GEN_CACHE_EVENT_BIND(1stl_cache_load_miss_retired
),
37 P4_GEN_CACHE_EVENT_BIND(2ndl_cache_load_miss_retired
),
38 P4_GEN_CACHE_EVENT_BIND(dtlb_load_miss_retired
),
39 P4_GEN_CACHE_EVENT_BIND(dtlb_store_miss_retired
),
43 * Note that we don't use CCCR1 here, there is an
44 * exception for P4_BSQ_ALLOCATION but we just have
47 * consider this binding as resources which particular
48 * event may borrow, it doesn't contain EventMask,
49 * Tags and friends -- they are left to a caller
51 static struct p4_event_bind p4_event_bind_map
[] = {
52 [P4_EVENT_TC_DELIVER_MODE
] = {
53 .opcode
= P4_OPCODE(P4_EVENT_TC_DELIVER_MODE
),
54 .escr_msr
= { MSR_P4_TC_ESCR0
, MSR_P4_TC_ESCR1
},
55 .cntr
= { {4, 5, -1}, {6, 7, -1} },
57 [P4_EVENT_BPU_FETCH_REQUEST
] = {
58 .opcode
= P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST
),
59 .escr_msr
= { MSR_P4_BPU_ESCR0
, MSR_P4_BPU_ESCR1
},
60 .cntr
= { {0, -1, -1}, {2, -1, -1} },
62 [P4_EVENT_ITLB_REFERENCE
] = {
63 .opcode
= P4_OPCODE(P4_EVENT_ITLB_REFERENCE
),
64 .escr_msr
= { MSR_P4_ITLB_ESCR0
, MSR_P4_ITLB_ESCR1
},
65 .cntr
= { {0, -1, -1}, {2, -1, -1} },
67 [P4_EVENT_MEMORY_CANCEL
] = {
68 .opcode
= P4_OPCODE(P4_EVENT_MEMORY_CANCEL
),
69 .escr_msr
= { MSR_P4_DAC_ESCR0
, MSR_P4_DAC_ESCR1
},
70 .cntr
= { {8, 9, -1}, {10, 11, -1} },
72 [P4_EVENT_MEMORY_COMPLETE
] = {
73 .opcode
= P4_OPCODE(P4_EVENT_MEMORY_COMPLETE
),
74 .escr_msr
= { MSR_P4_SAAT_ESCR0
, MSR_P4_SAAT_ESCR1
},
75 .cntr
= { {8, 9, -1}, {10, 11, -1} },
77 [P4_EVENT_LOAD_PORT_REPLAY
] = {
78 .opcode
= P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY
),
79 .escr_msr
= { MSR_P4_SAAT_ESCR0
, MSR_P4_SAAT_ESCR1
},
80 .cntr
= { {8, 9, -1}, {10, 11, -1} },
82 [P4_EVENT_STORE_PORT_REPLAY
] = {
83 .opcode
= P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY
),
84 .escr_msr
= { MSR_P4_SAAT_ESCR0
, MSR_P4_SAAT_ESCR1
},
85 .cntr
= { {8, 9, -1}, {10, 11, -1} },
87 [P4_EVENT_MOB_LOAD_REPLAY
] = {
88 .opcode
= P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY
),
89 .escr_msr
= { MSR_P4_MOB_ESCR0
, MSR_P4_MOB_ESCR1
},
90 .cntr
= { {0, -1, -1}, {2, -1, -1} },
92 [P4_EVENT_PAGE_WALK_TYPE
] = {
93 .opcode
= P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE
),
94 .escr_msr
= { MSR_P4_PMH_ESCR0
, MSR_P4_PMH_ESCR1
},
95 .cntr
= { {0, -1, -1}, {2, -1, -1} },
97 [P4_EVENT_BSQ_CACHE_REFERENCE
] = {
98 .opcode
= P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE
),
99 .escr_msr
= { MSR_P4_BSU_ESCR0
, MSR_P4_BSU_ESCR1
},
100 .cntr
= { {0, -1, -1}, {2, -1, -1} },
102 [P4_EVENT_IOQ_ALLOCATION
] = {
103 .opcode
= P4_OPCODE(P4_EVENT_IOQ_ALLOCATION
),
104 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
105 .cntr
= { {0, -1, -1}, {2, -1, -1} },
107 [P4_EVENT_IOQ_ACTIVE_ENTRIES
] = { /* shared ESCR */
108 .opcode
= P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES
),
109 .escr_msr
= { MSR_P4_FSB_ESCR1
, MSR_P4_FSB_ESCR1
},
110 .cntr
= { {2, -1, -1}, {3, -1, -1} },
112 [P4_EVENT_FSB_DATA_ACTIVITY
] = {
113 .opcode
= P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY
),
114 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
115 .cntr
= { {0, -1, -1}, {2, -1, -1} },
117 [P4_EVENT_BSQ_ALLOCATION
] = { /* shared ESCR, broken CCCR1 */
118 .opcode
= P4_OPCODE(P4_EVENT_BSQ_ALLOCATION
),
119 .escr_msr
= { MSR_P4_BSU_ESCR0
, MSR_P4_BSU_ESCR0
},
120 .cntr
= { {0, -1, -1}, {1, -1, -1} },
122 [P4_EVENT_BSQ_ACTIVE_ENTRIES
] = { /* shared ESCR */
123 .opcode
= P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES
),
124 .escr_msr
= { MSR_P4_BSU_ESCR1
, MSR_P4_BSU_ESCR1
},
125 .cntr
= { {2, -1, -1}, {3, -1, -1} },
127 [P4_EVENT_SSE_INPUT_ASSIST
] = {
128 .opcode
= P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST
),
129 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
130 .cntr
= { {8, 9, -1}, {10, 11, -1} },
132 [P4_EVENT_PACKED_SP_UOP
] = {
133 .opcode
= P4_OPCODE(P4_EVENT_PACKED_SP_UOP
),
134 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
135 .cntr
= { {8, 9, -1}, {10, 11, -1} },
137 [P4_EVENT_PACKED_DP_UOP
] = {
138 .opcode
= P4_OPCODE(P4_EVENT_PACKED_DP_UOP
),
139 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
140 .cntr
= { {8, 9, -1}, {10, 11, -1} },
142 [P4_EVENT_SCALAR_SP_UOP
] = {
143 .opcode
= P4_OPCODE(P4_EVENT_SCALAR_SP_UOP
),
144 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
145 .cntr
= { {8, 9, -1}, {10, 11, -1} },
147 [P4_EVENT_SCALAR_DP_UOP
] = {
148 .opcode
= P4_OPCODE(P4_EVENT_SCALAR_DP_UOP
),
149 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
150 .cntr
= { {8, 9, -1}, {10, 11, -1} },
152 [P4_EVENT_64BIT_MMX_UOP
] = {
153 .opcode
= P4_OPCODE(P4_EVENT_64BIT_MMX_UOP
),
154 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
155 .cntr
= { {8, 9, -1}, {10, 11, -1} },
157 [P4_EVENT_128BIT_MMX_UOP
] = {
158 .opcode
= P4_OPCODE(P4_EVENT_128BIT_MMX_UOP
),
159 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
160 .cntr
= { {8, 9, -1}, {10, 11, -1} },
162 [P4_EVENT_X87_FP_UOP
] = {
163 .opcode
= P4_OPCODE(P4_EVENT_X87_FP_UOP
),
164 .escr_msr
= { MSR_P4_FIRM_ESCR0
, MSR_P4_FIRM_ESCR1
},
165 .cntr
= { {8, 9, -1}, {10, 11, -1} },
167 [P4_EVENT_TC_MISC
] = {
168 .opcode
= P4_OPCODE(P4_EVENT_TC_MISC
),
169 .escr_msr
= { MSR_P4_TC_ESCR0
, MSR_P4_TC_ESCR1
},
170 .cntr
= { {4, 5, -1}, {6, 7, -1} },
172 [P4_EVENT_GLOBAL_POWER_EVENTS
] = {
173 .opcode
= P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS
),
174 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
175 .cntr
= { {0, -1, -1}, {2, -1, -1} },
177 [P4_EVENT_TC_MS_XFER
] = {
178 .opcode
= P4_OPCODE(P4_EVENT_TC_MS_XFER
),
179 .escr_msr
= { MSR_P4_MS_ESCR0
, MSR_P4_MS_ESCR1
},
180 .cntr
= { {4, 5, -1}, {6, 7, -1} },
182 [P4_EVENT_UOP_QUEUE_WRITES
] = {
183 .opcode
= P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES
),
184 .escr_msr
= { MSR_P4_MS_ESCR0
, MSR_P4_MS_ESCR1
},
185 .cntr
= { {4, 5, -1}, {6, 7, -1} },
187 [P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
] = {
188 .opcode
= P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE
),
189 .escr_msr
= { MSR_P4_TBPU_ESCR0
, MSR_P4_TBPU_ESCR0
},
190 .cntr
= { {4, 5, -1}, {6, 7, -1} },
192 [P4_EVENT_RETIRED_BRANCH_TYPE
] = {
193 .opcode
= P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE
),
194 .escr_msr
= { MSR_P4_TBPU_ESCR0
, MSR_P4_TBPU_ESCR1
},
195 .cntr
= { {4, 5, -1}, {6, 7, -1} },
197 [P4_EVENT_RESOURCE_STALL
] = {
198 .opcode
= P4_OPCODE(P4_EVENT_RESOURCE_STALL
),
199 .escr_msr
= { MSR_P4_ALF_ESCR0
, MSR_P4_ALF_ESCR1
},
200 .cntr
= { {12, 13, 16}, {14, 15, 17} },
202 [P4_EVENT_WC_BUFFER
] = {
203 .opcode
= P4_OPCODE(P4_EVENT_WC_BUFFER
),
204 .escr_msr
= { MSR_P4_DAC_ESCR0
, MSR_P4_DAC_ESCR1
},
205 .cntr
= { {8, 9, -1}, {10, 11, -1} },
207 [P4_EVENT_B2B_CYCLES
] = {
208 .opcode
= P4_OPCODE(P4_EVENT_B2B_CYCLES
),
209 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
210 .cntr
= { {0, -1, -1}, {2, -1, -1} },
213 .opcode
= P4_OPCODE(P4_EVENT_BNR
),
214 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
215 .cntr
= { {0, -1, -1}, {2, -1, -1} },
218 .opcode
= P4_OPCODE(P4_EVENT_SNOOP
),
219 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
220 .cntr
= { {0, -1, -1}, {2, -1, -1} },
222 [P4_EVENT_RESPONSE
] = {
223 .opcode
= P4_OPCODE(P4_EVENT_RESPONSE
),
224 .escr_msr
= { MSR_P4_FSB_ESCR0
, MSR_P4_FSB_ESCR1
},
225 .cntr
= { {0, -1, -1}, {2, -1, -1} },
227 [P4_EVENT_FRONT_END_EVENT
] = {
228 .opcode
= P4_OPCODE(P4_EVENT_FRONT_END_EVENT
),
229 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
230 .cntr
= { {12, 13, 16}, {14, 15, 17} },
232 [P4_EVENT_EXECUTION_EVENT
] = {
233 .opcode
= P4_OPCODE(P4_EVENT_EXECUTION_EVENT
),
234 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
235 .cntr
= { {12, 13, 16}, {14, 15, 17} },
237 [P4_EVENT_REPLAY_EVENT
] = {
238 .opcode
= P4_OPCODE(P4_EVENT_REPLAY_EVENT
),
239 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
240 .cntr
= { {12, 13, 16}, {14, 15, 17} },
242 [P4_EVENT_INSTR_RETIRED
] = {
243 .opcode
= P4_OPCODE(P4_EVENT_INSTR_RETIRED
),
244 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
245 .cntr
= { {12, 13, 16}, {14, 15, 17} },
247 [P4_EVENT_UOPS_RETIRED
] = {
248 .opcode
= P4_OPCODE(P4_EVENT_UOPS_RETIRED
),
249 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
250 .cntr
= { {12, 13, 16}, {14, 15, 17} },
252 [P4_EVENT_UOP_TYPE
] = {
253 .opcode
= P4_OPCODE(P4_EVENT_UOP_TYPE
),
254 .escr_msr
= { MSR_P4_RAT_ESCR0
, MSR_P4_RAT_ESCR1
},
255 .cntr
= { {12, 13, 16}, {14, 15, 17} },
257 [P4_EVENT_BRANCH_RETIRED
] = {
258 .opcode
= P4_OPCODE(P4_EVENT_BRANCH_RETIRED
),
259 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
260 .cntr
= { {12, 13, 16}, {14, 15, 17} },
262 [P4_EVENT_MISPRED_BRANCH_RETIRED
] = {
263 .opcode
= P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED
),
264 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
265 .cntr
= { {12, 13, 16}, {14, 15, 17} },
267 [P4_EVENT_X87_ASSIST
] = {
268 .opcode
= P4_OPCODE(P4_EVENT_X87_ASSIST
),
269 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
270 .cntr
= { {12, 13, 16}, {14, 15, 17} },
272 [P4_EVENT_MACHINE_CLEAR
] = {
273 .opcode
= P4_OPCODE(P4_EVENT_MACHINE_CLEAR
),
274 .escr_msr
= { MSR_P4_CRU_ESCR2
, MSR_P4_CRU_ESCR3
},
275 .cntr
= { {12, 13, 16}, {14, 15, 17} },
277 [P4_EVENT_INSTR_COMPLETED
] = {
278 .opcode
= P4_OPCODE(P4_EVENT_INSTR_COMPLETED
),
279 .escr_msr
= { MSR_P4_CRU_ESCR0
, MSR_P4_CRU_ESCR1
},
280 .cntr
= { {12, 13, 16}, {14, 15, 17} },
284 #define P4_GEN_CACHE_EVENT(event, bit, cache_event) \
285 p4_config_pack_escr(P4_ESCR_EVENT(event) | \
286 P4_ESCR_EMASK_BIT(event, bit)) | \
287 p4_config_pack_cccr(cache_event | \
288 P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
290 static __initconst
const u64 p4_hw_cache_event_ids
291 [PERF_COUNT_HW_CACHE_MAX
]
292 [PERF_COUNT_HW_CACHE_OP_MAX
]
293 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
297 [ C(RESULT_ACCESS
) ] = 0x0,
298 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
299 P4_CACHE__1stl_cache_load_miss_retired
),
304 [ C(RESULT_ACCESS
) ] = 0x0,
305 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
306 P4_CACHE__2ndl_cache_load_miss_retired
),
311 [ C(RESULT_ACCESS
) ] = 0x0,
312 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
313 P4_CACHE__dtlb_load_miss_retired
),
316 [ C(RESULT_ACCESS
) ] = 0x0,
317 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT
, NBOGUS
,
318 P4_CACHE__dtlb_store_miss_retired
),
323 [ C(RESULT_ACCESS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE
, HIT
,
324 P4_CACHE__itlb_reference_hit
),
325 [ C(RESULT_MISS
) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE
, MISS
,
326 P4_CACHE__itlb_reference_miss
),
329 [ C(RESULT_ACCESS
) ] = -1,
330 [ C(RESULT_MISS
) ] = -1,
332 [ C(OP_PREFETCH
) ] = {
333 [ C(RESULT_ACCESS
) ] = -1,
334 [ C(RESULT_MISS
) ] = -1,
339 static u64 p4_general_events
[PERF_COUNT_HW_MAX
] = {
340 /* non-halted CPU clocks */
341 [PERF_COUNT_HW_CPU_CYCLES
] =
342 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS
) |
343 P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS
, RUNNING
)),
346 * retired instructions
347 * in a sake of simplicity we don't use the FSB tagging
349 [PERF_COUNT_HW_INSTRUCTIONS
] =
350 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED
) |
351 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, NBOGUSNTAG
) |
352 P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED
, BOGUSNTAG
)),
355 [PERF_COUNT_HW_CACHE_REFERENCES
] =
356 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE
) |
357 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITS
) |
358 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITE
) |
359 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_HITM
) |
360 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITS
) |
361 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITE
) |
362 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_HITM
)),
365 [PERF_COUNT_HW_CACHE_MISSES
] =
366 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE
) |
367 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_2ndL_MISS
) |
368 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, RD_3rdL_MISS
) |
369 P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE
, WR_2ndL_MISS
)),
371 /* branch instructions retired */
372 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] =
373 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE
) |
374 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, CONDITIONAL
) |
375 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, CALL
) |
376 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, RETURN
) |
377 P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE
, INDIRECT
)),
379 /* mispredicted branches retired */
380 [PERF_COUNT_HW_BRANCH_MISSES
] =
381 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED
) |
382 P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED
, NBOGUS
)),
384 /* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
385 [PERF_COUNT_HW_BUS_CYCLES
] =
386 p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY
) |
387 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DRDY_DRV
) |
388 P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY
, DRDY_OWN
)) |
389 p4_config_pack_cccr(P4_CCCR_EDGE
| P4_CCCR_COMPARE
),
392 static struct p4_event_bind
*p4_config_get_bind(u64 config
)
394 unsigned int evnt
= p4_config_unpack_event(config
);
395 struct p4_event_bind
*bind
= NULL
;
397 if (evnt
< ARRAY_SIZE(p4_event_bind_map
))
398 bind
= &p4_event_bind_map
[evnt
];
403 static u64
p4_pmu_event_map(int hw_event
)
405 struct p4_event_bind
*bind
;
409 config
= p4_general_events
[hw_event
];
410 bind
= p4_config_get_bind(config
);
411 esel
= P4_OPCODE_ESEL(bind
->opcode
);
412 config
|= p4_config_pack_cccr(P4_CCCR_ESEL(esel
));
417 static int p4_hw_config(struct perf_event
*event
)
425 * the reason we use cpu that early is that: if we get scheduled
426 * first time on the same cpu -- we will not need swap thread
427 * specific flags in config (and will save some cpu cycles)
430 cccr
= p4_default_cccr_conf(cpu
);
431 escr
= p4_default_escr_conf(cpu
, event
->attr
.exclude_kernel
,
432 event
->attr
.exclude_user
);
433 event
->hw
.config
= p4_config_pack_escr(escr
) |
434 p4_config_pack_cccr(cccr
);
436 if (p4_ht_active() && p4_ht_thread(cpu
))
437 event
->hw
.config
= p4_set_ht_bit(event
->hw
.config
);
439 if (event
->attr
.type
== PERF_TYPE_RAW
) {
441 /* user data may have out-of-bound event index */
442 evnt
= p4_config_unpack_event(event
->attr
.config
);
443 if (evnt
>= ARRAY_SIZE(p4_event_bind_map
)) {
449 * We don't control raw events so it's up to the caller
450 * to pass sane values (and we don't count the thread number
451 * on HT machine but allow HT-compatible specifics to be
454 * XXX: HT wide things should check perf_paranoid_cpu() &&
457 event
->hw
.config
|= event
->attr
.config
&
458 (p4_config_pack_escr(P4_ESCR_MASK_HT
) |
459 p4_config_pack_cccr(P4_CCCR_MASK_HT
));
462 rc
= x86_setup_perfctr(event
);
468 static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event
*hwc
)
473 rdmsr(hwc
->config_base
+ hwc
->idx
, low
, high
);
475 /* we need to check high bit for unflagged overflows */
476 if ((low
& P4_CCCR_OVF
) || !(high
& (1 << 31))) {
478 (void)checking_wrmsrl(hwc
->config_base
+ hwc
->idx
,
479 ((u64
)low
) & ~P4_CCCR_OVF
);
485 static inline void p4_pmu_disable_event(struct perf_event
*event
)
487 struct hw_perf_event
*hwc
= &event
->hw
;
490 * If event gets disabled while counter is in overflowed
491 * state we need to clear P4_CCCR_OVF, otherwise interrupt get
492 * asserted again and again
494 (void)checking_wrmsrl(hwc
->config_base
+ hwc
->idx
,
495 (u64
)(p4_config_unpack_cccr(hwc
->config
)) &
496 ~P4_CCCR_ENABLE
& ~P4_CCCR_OVF
& ~P4_CCCR_RESERVED
);
499 static void p4_pmu_disable_all(void)
501 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
504 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
505 struct perf_event
*event
= cpuc
->events
[idx
];
506 if (!test_bit(idx
, cpuc
->active_mask
))
508 p4_pmu_disable_event(event
);
512 static void p4_pmu_enable_event(struct perf_event
*event
)
514 struct hw_perf_event
*hwc
= &event
->hw
;
515 int thread
= p4_ht_config_thread(hwc
->config
);
516 u64 escr_conf
= p4_config_unpack_escr(p4_clear_ht_bit(hwc
->config
));
517 unsigned int idx
= p4_config_unpack_event(hwc
->config
);
518 unsigned int idx_cache
= p4_config_unpack_cache_event(hwc
->config
);
519 struct p4_event_bind
*bind
;
520 struct p4_cache_event_bind
*bind_cache
;
523 bind
= &p4_event_bind_map
[idx
];
524 escr_addr
= (u64
)bind
->escr_msr
[thread
];
527 * - we dont support cascaded counters yet
528 * - and counter 1 is broken (erratum)
530 WARN_ON_ONCE(p4_is_event_cascaded(hwc
->config
));
531 WARN_ON_ONCE(hwc
->idx
== 1);
533 /* we need a real Event value */
534 escr_conf
&= ~P4_ESCR_EVENT_MASK
;
535 escr_conf
|= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind
->opcode
));
537 cccr
= p4_config_unpack_cccr(hwc
->config
);
540 * it could be Cache event so that we need to
541 * set metrics into additional MSRs
543 BUILD_BUG_ON(P4_CACHE__MAX
> P4_CCCR_CACHE_OPS_MASK
);
544 if (idx_cache
> P4_CACHE__NONE
&&
545 idx_cache
< ARRAY_SIZE(p4_cache_event_bind_map
)) {
546 bind_cache
= &p4_cache_event_bind_map
[idx_cache
];
547 (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE
, (u64
)bind_cache
->metric_pebs
);
548 (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT
, (u64
)bind_cache
->metric_vert
);
551 (void)checking_wrmsrl(escr_addr
, escr_conf
);
552 (void)checking_wrmsrl(hwc
->config_base
+ hwc
->idx
,
553 (cccr
& ~P4_CCCR_RESERVED
) | P4_CCCR_ENABLE
);
556 static void p4_pmu_enable_all(int added
)
558 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
561 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
562 struct perf_event
*event
= cpuc
->events
[idx
];
563 if (!test_bit(idx
, cpuc
->active_mask
))
565 p4_pmu_enable_event(event
);
569 static int p4_pmu_handle_irq(struct pt_regs
*regs
)
571 struct perf_sample_data data
;
572 struct cpu_hw_events
*cpuc
;
573 struct perf_event
*event
;
574 struct hw_perf_event
*hwc
;
575 int idx
, handled
= 0;
581 cpuc
= &__get_cpu_var(cpu_hw_events
);
583 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
585 if (!test_bit(idx
, cpuc
->active_mask
))
588 event
= cpuc
->events
[idx
];
591 WARN_ON_ONCE(hwc
->idx
!= idx
);
593 /* it might be unflagged overflow */
594 handled
= p4_pmu_clear_cccr_ovf(hwc
);
596 val
= x86_perf_event_update(event
);
597 if (!handled
&& (val
& (1ULL << (x86_pmu
.cntval_bits
- 1))))
600 /* event overflow for sure */
601 data
.period
= event
->hw
.last_period
;
603 if (!x86_perf_event_set_period(event
))
605 if (perf_event_overflow(event
, 1, &data
, regs
))
606 p4_pmu_disable_event(event
);
610 /* p4 quirk: unmask it again */
611 apic_write(APIC_LVTPC
, apic_read(APIC_LVTPC
) & ~APIC_LVT_MASKED
);
612 inc_irq_stat(apic_perf_irqs
);
619 * swap thread specific fields according to a thread
620 * we are going to run on
622 static void p4_pmu_swap_config_ts(struct hw_perf_event
*hwc
, int cpu
)
627 * we either lucky and continue on same cpu or no HT support
629 if (!p4_should_swap_ts(hwc
->config
, cpu
))
633 * the event is migrated from an another logical
634 * cpu, so we need to swap thread specific flags
637 escr
= p4_config_unpack_escr(hwc
->config
);
638 cccr
= p4_config_unpack_cccr(hwc
->config
);
640 if (p4_ht_thread(cpu
)) {
641 cccr
&= ~P4_CCCR_OVF_PMI_T0
;
642 cccr
|= P4_CCCR_OVF_PMI_T1
;
643 if (escr
& P4_ESCR_T0_OS
) {
644 escr
&= ~P4_ESCR_T0_OS
;
645 escr
|= P4_ESCR_T1_OS
;
647 if (escr
& P4_ESCR_T0_USR
) {
648 escr
&= ~P4_ESCR_T0_USR
;
649 escr
|= P4_ESCR_T1_USR
;
651 hwc
->config
= p4_config_pack_escr(escr
);
652 hwc
->config
|= p4_config_pack_cccr(cccr
);
653 hwc
->config
|= P4_CONFIG_HT
;
655 cccr
&= ~P4_CCCR_OVF_PMI_T1
;
656 cccr
|= P4_CCCR_OVF_PMI_T0
;
657 if (escr
& P4_ESCR_T1_OS
) {
658 escr
&= ~P4_ESCR_T1_OS
;
659 escr
|= P4_ESCR_T0_OS
;
661 if (escr
& P4_ESCR_T1_USR
) {
662 escr
&= ~P4_ESCR_T1_USR
;
663 escr
|= P4_ESCR_T0_USR
;
665 hwc
->config
= p4_config_pack_escr(escr
);
666 hwc
->config
|= p4_config_pack_cccr(cccr
);
667 hwc
->config
&= ~P4_CONFIG_HT
;
672 * ESCR address hashing is tricky, ESCRs are not sequential
673 * in memory but all starts from MSR_P4_BSU_ESCR0 (0x03a0) and
674 * the metric between any ESCRs is laid in range [0xa0,0xe1]
676 * so we make ~70% filled hashtable
679 #define P4_ESCR_MSR_BASE 0x000003a0
680 #define P4_ESCR_MSR_MAX 0x000003e1
681 #define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
682 #define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
683 #define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr
685 static const unsigned int p4_escr_table
[P4_ESCR_MSR_TABLE_SIZE
] = {
686 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0
),
687 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1
),
688 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0
),
689 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1
),
690 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0
),
691 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1
),
692 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0
),
693 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1
),
694 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2
),
695 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3
),
696 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4
),
697 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5
),
698 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0
),
699 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1
),
700 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0
),
701 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1
),
702 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0
),
703 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1
),
704 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0
),
705 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1
),
706 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0
),
707 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1
),
708 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0
),
709 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1
),
710 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0
),
711 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1
),
712 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0
),
713 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1
),
714 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0
),
715 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1
),
716 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0
),
717 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1
),
718 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0
),
719 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1
),
720 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0
),
721 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1
),
722 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0
),
723 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1
),
724 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0
),
725 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1
),
726 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0
),
727 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1
),
728 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0
),
729 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1
),
730 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0
),
731 P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1
),
734 static int p4_get_escr_idx(unsigned int addr
)
736 unsigned int idx
= P4_ESCR_MSR_IDX(addr
);
738 if (unlikely(idx
>= P4_ESCR_MSR_TABLE_SIZE
||
739 !p4_escr_table
[idx
] ||
740 p4_escr_table
[idx
] != addr
)) {
741 WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr
);
748 static int p4_next_cntr(int thread
, unsigned long *used_mask
,
749 struct p4_event_bind
*bind
)
753 for (i
= 0; i
< P4_CNTR_LIMIT
; i
++) {
754 j
= bind
->cntr
[thread
][i
];
755 if (j
!= -1 && !test_bit(j
, used_mask
))
762 static int p4_pmu_schedule_events(struct cpu_hw_events
*cpuc
, int n
, int *assign
)
764 unsigned long used_mask
[BITS_TO_LONGS(X86_PMC_IDX_MAX
)];
765 unsigned long escr_mask
[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE
)];
766 int cpu
= smp_processor_id();
767 struct hw_perf_event
*hwc
;
768 struct p4_event_bind
*bind
;
769 unsigned int i
, thread
, num
;
770 int cntr_idx
, escr_idx
;
772 bitmap_zero(used_mask
, X86_PMC_IDX_MAX
);
773 bitmap_zero(escr_mask
, P4_ESCR_MSR_TABLE_SIZE
);
775 for (i
= 0, num
= n
; i
< n
; i
++, num
--) {
777 hwc
= &cpuc
->event_list
[i
]->hw
;
778 thread
= p4_ht_thread(cpu
);
779 bind
= p4_config_get_bind(hwc
->config
);
780 escr_idx
= p4_get_escr_idx(bind
->escr_msr
[thread
]);
781 if (unlikely(escr_idx
== -1))
784 if (hwc
->idx
!= -1 && !p4_should_swap_ts(hwc
->config
, cpu
)) {
787 assign
[i
] = hwc
->idx
;
791 cntr_idx
= p4_next_cntr(thread
, used_mask
, bind
);
792 if (cntr_idx
== -1 || test_bit(escr_idx
, escr_mask
))
795 p4_pmu_swap_config_ts(hwc
, cpu
);
797 assign
[i
] = cntr_idx
;
799 set_bit(cntr_idx
, used_mask
);
800 set_bit(escr_idx
, escr_mask
);
804 return num
? -ENOSPC
: 0;
807 static __initconst
const struct x86_pmu p4_pmu
= {
808 .name
= "Netburst P4/Xeon",
809 .handle_irq
= p4_pmu_handle_irq
,
810 .disable_all
= p4_pmu_disable_all
,
811 .enable_all
= p4_pmu_enable_all
,
812 .enable
= p4_pmu_enable_event
,
813 .disable
= p4_pmu_disable_event
,
814 .eventsel
= MSR_P4_BPU_CCCR0
,
815 .perfctr
= MSR_P4_BPU_PERFCTR0
,
816 .event_map
= p4_pmu_event_map
,
817 .max_events
= ARRAY_SIZE(p4_general_events
),
818 .get_event_constraints
= x86_get_event_constraints
,
820 * IF HT disabled we may need to use all
821 * ARCH_P4_MAX_CCCR counters simulaneously
822 * though leave it restricted at moment assuming
825 .num_counters
= ARCH_P4_MAX_CCCR
,
828 .cntval_mask
= (1ULL << 40) - 1,
829 .max_period
= (1ULL << 39) - 1,
830 .hw_config
= p4_hw_config
,
831 .schedule_events
= p4_pmu_schedule_events
,
834 static __init
int p4_pmu_init(void)
836 unsigned int low
, high
;
838 /* If we get stripped -- indexig fails */
839 BUILD_BUG_ON(ARCH_P4_MAX_CCCR
> X86_PMC_MAX_GENERIC
);
841 rdmsr(MSR_IA32_MISC_ENABLE
, low
, high
);
842 if (!(low
& (1 << 7))) {
843 pr_cont("unsupported Netburst CPU model %d ",
844 boot_cpu_data
.x86_model
);
848 memcpy(hw_cache_event_ids
, p4_hw_cache_event_ids
,
849 sizeof(hw_cache_event_ids
));
851 pr_cont("Netburst events, ");
858 #endif /* CONFIG_CPU_SUP_INTEL */