1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 /***************************************************************************
4 * Copyright (C) 2007 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 ***************************************************************************/
17 static const char * const etb_reg_list
[] = {
23 "ETB_ram_read_pointer",
24 "ETB_ram_write_pointer",
25 "ETB_trigger_counter",
29 static int etb_get_reg(struct reg
*reg
);
31 static int etb_set_instr(struct etb
*etb
, uint32_t new_instr
)
39 if (buf_get_u32(tap
->cur_instr
, 0, tap
->ir_length
) != new_instr
) {
40 struct scan_field field
;
42 field
.num_bits
= tap
->ir_length
;
43 void *t
= calloc(DIV_ROUND_UP(field
.num_bits
, 8), 1);
45 buf_set_u32(t
, 0, field
.num_bits
, new_instr
);
47 field
.in_value
= NULL
;
49 jtag_add_ir_scan(tap
, &field
, TAP_IDLE
);
57 static int etb_scann(struct etb
*etb
, uint32_t new_scan_chain
)
59 if (etb
->cur_scan_chain
!= new_scan_chain
) {
60 struct scan_field field
;
63 void *t
= calloc(DIV_ROUND_UP(field
.num_bits
, 8), 1);
65 buf_set_u32(t
, 0, field
.num_bits
, new_scan_chain
);
67 field
.in_value
= NULL
;
69 /* select INTEST instruction */
70 etb_set_instr(etb
, 0x2);
71 jtag_add_dr_scan(etb
->tap
, 1, &field
, TAP_IDLE
);
73 etb
->cur_scan_chain
= new_scan_chain
;
81 static int etb_read_reg_w_check(struct reg
*, uint8_t *, uint8_t *);
82 static int etb_set_reg_w_exec(struct reg
*, uint8_t *);
84 static int etb_read_reg(struct reg
*reg
)
86 return etb_read_reg_w_check(reg
, NULL
, NULL
);
89 static int etb_get_reg(struct reg
*reg
)
93 retval
= etb_read_reg(reg
);
94 if (retval
!= ERROR_OK
) {
95 LOG_ERROR("BUG: error scheduling ETB register read");
99 retval
= jtag_execute_queue();
100 if (retval
!= ERROR_OK
) {
101 LOG_ERROR("ETB register read failed");
108 static const struct reg_arch_type etb_reg_type
= {
110 .set
= etb_set_reg_w_exec
,
113 struct reg_cache
*etb_build_reg_cache(struct etb
*etb
)
115 struct reg_cache
*reg_cache
= malloc(sizeof(struct reg_cache
));
116 struct reg
*reg_list
= NULL
;
117 struct etb_reg
*arch_info
= NULL
;
121 /* the actual registers are kept in two arrays */
122 reg_list
= calloc(num_regs
, sizeof(struct reg
));
123 arch_info
= calloc(num_regs
, sizeof(struct etb_reg
));
125 /* fill in values for the reg cache */
126 reg_cache
->name
= "etb registers";
127 reg_cache
->next
= NULL
;
128 reg_cache
->reg_list
= reg_list
;
129 reg_cache
->num_regs
= num_regs
;
131 /* set up registers */
132 for (i
= 0; i
< num_regs
; i
++) {
133 reg_list
[i
].name
= etb_reg_list
[i
];
134 reg_list
[i
].size
= 32;
135 reg_list
[i
].dirty
= false;
136 reg_list
[i
].valid
= false;
137 reg_list
[i
].value
= calloc(1, 4);
138 reg_list
[i
].arch_info
= &arch_info
[i
];
139 reg_list
[i
].type
= &etb_reg_type
;
140 reg_list
[i
].size
= 32;
141 arch_info
[i
].addr
= i
;
142 arch_info
[i
].etb
= etb
;
148 static void etb_getbuf(jtag_callback_data_t arg
)
150 uint8_t *in
= (uint8_t *)arg
;
152 *((uint32_t *)arg
) = buf_get_u32(in
, 0, 32);
155 static int etb_read_ram(struct etb
*etb
, uint32_t *data
, int num_frames
)
157 struct scan_field fields
[3];
161 etb_set_instr(etb
, 0xc);
163 fields
[0].num_bits
= 32;
164 fields
[0].out_value
= NULL
;
165 fields
[0].in_value
= NULL
;
167 fields
[1].num_bits
= 7;
169 fields
[1].out_value
= &temp1
;
170 buf_set_u32(&temp1
, 0, 7, 4);
171 fields
[1].in_value
= NULL
;
173 fields
[2].num_bits
= 1;
175 fields
[2].out_value
= &temp2
;
176 buf_set_u32(&temp2
, 0, 1, 0);
177 fields
[2].in_value
= NULL
;
179 jtag_add_dr_scan(etb
->tap
, 3, fields
, TAP_IDLE
);
181 for (i
= 0; i
< num_frames
; i
++) {
182 /* ensure nR/W remains set to read */
183 buf_set_u32(&temp2
, 0, 1, 0);
185 /* address remains set to 0x4 (RAM data) until we read the last frame */
186 if (i
< num_frames
- 1)
187 buf_set_u32(&temp1
, 0, 7, 4);
189 buf_set_u32(&temp1
, 0, 7, 0);
191 fields
[0].in_value
= (uint8_t *)(data
+ i
);
192 jtag_add_dr_scan(etb
->tap
, 3, fields
, TAP_IDLE
);
194 jtag_add_callback(etb_getbuf
, (jtag_callback_data_t
)(data
+ i
));
197 jtag_execute_queue();
202 static int etb_read_reg_w_check(struct reg
*reg
,
203 uint8_t *check_value
, uint8_t *check_mask
)
205 struct etb_reg
*etb_reg
= reg
->arch_info
;
206 uint8_t reg_addr
= etb_reg
->addr
& 0x7f;
207 struct scan_field fields
[3];
209 LOG_DEBUG("%i", (int)(etb_reg
->addr
));
211 etb_scann(etb_reg
->etb
, 0x0);
212 etb_set_instr(etb_reg
->etb
, 0xc);
214 fields
[0].num_bits
= 32;
215 fields
[0].out_value
= reg
->value
;
216 fields
[0].in_value
= NULL
;
217 fields
[0].check_value
= NULL
;
218 fields
[0].check_mask
= NULL
;
220 fields
[1].num_bits
= 7;
222 fields
[1].out_value
= &temp1
;
223 buf_set_u32(&temp1
, 0, 7, reg_addr
);
224 fields
[1].in_value
= NULL
;
225 fields
[1].check_value
= NULL
;
226 fields
[1].check_mask
= NULL
;
228 fields
[2].num_bits
= 1;
230 fields
[2].out_value
= &temp2
;
231 buf_set_u32(&temp2
, 0, 1, 0);
232 fields
[2].in_value
= NULL
;
233 fields
[2].check_value
= NULL
;
234 fields
[2].check_mask
= NULL
;
236 jtag_add_dr_scan(etb_reg
->etb
->tap
, 3, fields
, TAP_IDLE
);
238 /* read the identification register in the second run, to make sure we
239 * don't read the ETB data register twice, skipping every second entry
241 buf_set_u32(&temp1
, 0, 7, 0x0);
242 fields
[0].in_value
= reg
->value
;
243 fields
[0].check_value
= check_value
;
244 fields
[0].check_mask
= check_mask
;
246 jtag_add_dr_scan_check(etb_reg
->etb
->tap
, 3, fields
, TAP_IDLE
);
251 static int etb_write_reg(struct reg
*, uint32_t);
253 static int etb_set_reg(struct reg
*reg
, uint32_t value
)
257 retval
= etb_write_reg(reg
, value
);
258 if (retval
!= ERROR_OK
) {
259 LOG_ERROR("BUG: error scheduling ETB register write");
263 buf_set_u32(reg
->value
, 0, reg
->size
, value
);
270 static int etb_set_reg_w_exec(struct reg
*reg
, uint8_t *buf
)
274 etb_set_reg(reg
, buf_get_u32(buf
, 0, reg
->size
));
276 retval
= jtag_execute_queue();
277 if (retval
!= ERROR_OK
) {
278 LOG_ERROR("ETB: register write failed");
284 static int etb_write_reg(struct reg
*reg
, uint32_t value
)
286 struct etb_reg
*etb_reg
= reg
->arch_info
;
287 uint8_t reg_addr
= etb_reg
->addr
& 0x7f;
288 struct scan_field fields
[3];
290 LOG_DEBUG("%i: 0x%8.8" PRIx32
"", (int)(etb_reg
->addr
), value
);
292 etb_scann(etb_reg
->etb
, 0x0);
293 etb_set_instr(etb_reg
->etb
, 0xc);
295 fields
[0].num_bits
= 32;
297 fields
[0].out_value
= temp0
;
298 buf_set_u32(temp0
, 0, 32, value
);
299 fields
[0].in_value
= NULL
;
301 fields
[1].num_bits
= 7;
303 fields
[1].out_value
= &temp1
;
304 buf_set_u32(&temp1
, 0, 7, reg_addr
);
305 fields
[1].in_value
= NULL
;
307 fields
[2].num_bits
= 1;
309 fields
[2].out_value
= &temp2
;
310 buf_set_u32(&temp2
, 0, 1, 1);
311 fields
[2].in_value
= NULL
;
313 jtag_add_dr_scan(etb_reg
->etb
->tap
, 3, fields
, TAP_IDLE
);
318 COMMAND_HANDLER(handle_etb_config_command
)
320 struct target
*target
;
321 struct jtag_tap
*tap
;
325 return ERROR_COMMAND_SYNTAX_ERROR
;
327 target
= get_target(CMD_ARGV
[0]);
330 LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV
[0]);
334 arm
= target_to_arm(target
);
336 command_print(CMD
, "ETB: '%s' isn't an ARM", CMD_ARGV
[0]);
340 tap
= jtag_tap_by_string(CMD_ARGV
[1]);
342 command_print(CMD
, "ETB: TAP %s does not exist", CMD_ARGV
[1]);
347 struct etb
*etb
= malloc(sizeof(struct etb
));
349 arm
->etm
->capture_driver_priv
= etb
;
352 etb
->cur_scan_chain
= 0xffffffff;
353 etb
->reg_cache
= NULL
;
357 LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
364 COMMAND_HANDLER(handle_etb_trigger_percent_command
)
366 struct target
*target
;
368 struct etm_context
*etm
;
371 target
= get_current_target(CMD_CTX
);
372 arm
= target_to_arm(target
);
374 command_print(CMD
, "ETB: current target isn't an ARM");
380 command_print(CMD
, "ETB: target has no ETM configured");
383 if (etm
->capture_driver
!= &etb_capture_driver
) {
384 command_print(CMD
, "ETB: target not using ETB");
387 etb
= arm
->etm
->capture_driver_priv
;
392 COMMAND_PARSE_NUMBER(u32
, CMD_ARGV
[0], new_value
);
393 if ((new_value
< 2) || (new_value
> 100))
395 "valid percentages are 2%% to 100%%");
397 etb
->trigger_percent
= (unsigned) new_value
;
400 command_print(CMD
, "%d percent of tracebuffer fills after trigger",
401 etb
->trigger_percent
);
406 static const struct command_registration etb_config_command_handlers
[] = {
408 /* NOTE: with ADIv5, ETBs are accessed using DAP operations,
409 * possibly over SWD, not through separate TAPs...
412 .handler
= handle_etb_config_command
,
413 .mode
= COMMAND_CONFIG
,
414 .help
= "Associate ETB with target and JTAG TAP.",
415 .usage
= "target tap",
418 .name
= "trigger_percent",
419 .handler
= handle_etb_trigger_percent_command
,
420 .mode
= COMMAND_EXEC
,
421 .help
= "Set percent of trace buffer to be filled "
422 "after the trigger occurs (2..100).",
423 .usage
= "[percent]",
425 COMMAND_REGISTRATION_DONE
427 static const struct command_registration etb_command_handlers
[] = {
431 .help
= "Embedded Trace Buffer command group",
432 .chain
= etb_config_command_handlers
,
435 COMMAND_REGISTRATION_DONE
438 static int etb_init(struct etm_context
*etm_ctx
)
440 struct etb
*etb
= etm_ctx
->capture_driver_priv
;
442 etb
->etm_ctx
= etm_ctx
;
444 /* identify ETB RAM depth and width */
445 etb_read_reg(&etb
->reg_cache
->reg_list
[ETB_RAM_DEPTH
]);
446 etb_read_reg(&etb
->reg_cache
->reg_list
[ETB_RAM_WIDTH
]);
447 jtag_execute_queue();
449 etb
->ram_depth
= buf_get_u32(etb
->reg_cache
->reg_list
[ETB_RAM_DEPTH
].value
, 0, 32);
450 etb
->ram_width
= buf_get_u32(etb
->reg_cache
->reg_list
[ETB_RAM_WIDTH
].value
, 0, 32);
452 etb
->trigger_percent
= 50;
457 static trace_status_t
etb_status(struct etm_context
*etm_ctx
)
459 struct etb
*etb
= etm_ctx
->capture_driver_priv
;
460 struct reg
*control
= &etb
->reg_cache
->reg_list
[ETB_CTRL
];
461 struct reg
*status
= &etb
->reg_cache
->reg_list
[ETB_STATUS
];
462 trace_status_t retval
= 0;
463 int etb_timeout
= 100;
465 etb
->etm_ctx
= etm_ctx
;
467 /* read control and status registers */
468 etb_read_reg(control
);
469 etb_read_reg(status
);
470 jtag_execute_queue();
472 /* See if it's (still) active */
473 retval
= buf_get_u32(control
->value
, 0, 1) ? TRACE_RUNNING
: TRACE_IDLE
;
475 /* check Full bit to identify wraparound/overflow */
476 if (buf_get_u32(status
->value
, 0, 1) == 1)
477 retval
|= TRACE_OVERFLOWED
;
479 /* check Triggered bit to identify trigger condition */
480 if (buf_get_u32(status
->value
, 1, 1) == 1)
481 retval
|= TRACE_TRIGGERED
;
483 /* check AcqComp to see if trigger counter dropped to zero */
484 if (buf_get_u32(status
->value
, 2, 1) == 1) {
485 /* wait for DFEmpty */
486 while (etb_timeout
-- && buf_get_u32(status
->value
, 3, 1) == 0)
489 if (etb_timeout
== 0)
490 LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
491 (unsigned) buf_get_u32(status
->value
, 0, 4));
493 if (!(etm_ctx
->capture_status
& TRACE_TRIGGERED
))
494 LOG_WARNING("ETB: trace complete without triggering?");
496 retval
|= TRACE_COMPLETED
;
499 /* NOTE: using a trigger is optional; and at least ETB11 has a mode
500 * where it can ignore the trigger counter.
503 /* update recorded state */
504 etm_ctx
->capture_status
= retval
;
509 static int etb_read_trace(struct etm_context
*etm_ctx
)
511 struct etb
*etb
= etm_ctx
->capture_driver_priv
;
513 int num_frames
= etb
->ram_depth
;
514 uint32_t *trace_data
= NULL
;
517 etb_read_reg(&etb
->reg_cache
->reg_list
[ETB_STATUS
]);
518 etb_read_reg(&etb
->reg_cache
->reg_list
[ETB_RAM_WRITE_POINTER
]);
519 jtag_execute_queue();
521 /* check if we overflowed, and adjust first frame of the trace accordingly
522 * if we didn't overflow, read only up to the frame that would be written next,
523 * i.e. don't read invalid entries
525 if (buf_get_u32(etb
->reg_cache
->reg_list
[ETB_STATUS
].value
, 0, 1))
526 first_frame
= buf_get_u32(etb
->reg_cache
->reg_list
[ETB_RAM_WRITE_POINTER
].value
,
530 num_frames
= buf_get_u32(etb
->reg_cache
->reg_list
[ETB_RAM_WRITE_POINTER
].value
,
534 etb_write_reg(&etb
->reg_cache
->reg_list
[ETB_RAM_READ_POINTER
], first_frame
);
536 /* read data into temporary array for unpacking */
537 trace_data
= malloc(sizeof(uint32_t) * num_frames
);
538 etb_read_ram(etb
, trace_data
, num_frames
);
540 if (etm_ctx
->trace_depth
> 0)
541 free(etm_ctx
->trace_data
);
543 if ((etm_ctx
->control
& ETM_PORT_WIDTH_MASK
) == ETM_PORT_4BIT
)
544 etm_ctx
->trace_depth
= num_frames
* 3;
545 else if ((etm_ctx
->control
& ETM_PORT_WIDTH_MASK
) == ETM_PORT_8BIT
)
546 etm_ctx
->trace_depth
= num_frames
* 2;
548 etm_ctx
->trace_depth
= num_frames
;
550 etm_ctx
->trace_data
= malloc(sizeof(struct etmv1_trace_data
) * etm_ctx
->trace_depth
);
552 for (i
= 0, j
= 0; i
< num_frames
; i
++) {
553 if ((etm_ctx
->control
& ETM_PORT_WIDTH_MASK
) == ETM_PORT_4BIT
) {
555 etm_ctx
->trace_data
[j
].pipestat
= trace_data
[i
] & 0x7;
556 etm_ctx
->trace_data
[j
].packet
= (trace_data
[i
] & 0x78) >> 3;
557 etm_ctx
->trace_data
[j
].flags
= 0;
558 if ((trace_data
[i
] & 0x80) >> 7)
559 etm_ctx
->trace_data
[j
].flags
|= ETMV1_TRACESYNC_CYCLE
;
560 if (etm_ctx
->trace_data
[j
].pipestat
== STAT_TR
) {
561 etm_ctx
->trace_data
[j
].pipestat
= etm_ctx
->trace_data
[j
].packet
&
563 etm_ctx
->trace_data
[j
].flags
|= ETMV1_TRIGGER_CYCLE
;
566 /* trace word j + 1 */
567 etm_ctx
->trace_data
[j
+ 1].pipestat
= (trace_data
[i
] & 0x100) >> 8;
568 etm_ctx
->trace_data
[j
+ 1].packet
= (trace_data
[i
] & 0x7800) >> 11;
569 etm_ctx
->trace_data
[j
+ 1].flags
= 0;
570 if ((trace_data
[i
] & 0x8000) >> 15)
571 etm_ctx
->trace_data
[j
+ 1].flags
|= ETMV1_TRACESYNC_CYCLE
;
572 if (etm_ctx
->trace_data
[j
+ 1].pipestat
== STAT_TR
) {
573 etm_ctx
->trace_data
[j
+
574 1].pipestat
= etm_ctx
->trace_data
[j
+ 1].packet
& 0x7;
575 etm_ctx
->trace_data
[j
+ 1].flags
|= ETMV1_TRIGGER_CYCLE
;
578 /* trace word j + 2 */
579 etm_ctx
->trace_data
[j
+ 2].pipestat
= (trace_data
[i
] & 0x10000) >> 16;
580 etm_ctx
->trace_data
[j
+ 2].packet
= (trace_data
[i
] & 0x780000) >> 19;
581 etm_ctx
->trace_data
[j
+ 2].flags
= 0;
582 if ((trace_data
[i
] & 0x800000) >> 23)
583 etm_ctx
->trace_data
[j
+ 2].flags
|= ETMV1_TRACESYNC_CYCLE
;
584 if (etm_ctx
->trace_data
[j
+ 2].pipestat
== STAT_TR
) {
585 etm_ctx
->trace_data
[j
+
586 2].pipestat
= etm_ctx
->trace_data
[j
+ 2].packet
& 0x7;
587 etm_ctx
->trace_data
[j
+ 2].flags
|= ETMV1_TRIGGER_CYCLE
;
591 } else if ((etm_ctx
->control
& ETM_PORT_WIDTH_MASK
) == ETM_PORT_8BIT
) {
593 etm_ctx
->trace_data
[j
].pipestat
= trace_data
[i
] & 0x7;
594 etm_ctx
->trace_data
[j
].packet
= (trace_data
[i
] & 0x7f8) >> 3;
595 etm_ctx
->trace_data
[j
].flags
= 0;
596 if ((trace_data
[i
] & 0x800) >> 11)
597 etm_ctx
->trace_data
[j
].flags
|= ETMV1_TRACESYNC_CYCLE
;
598 if (etm_ctx
->trace_data
[j
].pipestat
== STAT_TR
) {
599 etm_ctx
->trace_data
[j
].pipestat
= etm_ctx
->trace_data
[j
].packet
&
601 etm_ctx
->trace_data
[j
].flags
|= ETMV1_TRIGGER_CYCLE
;
604 /* trace word j + 1 */
605 etm_ctx
->trace_data
[j
+ 1].pipestat
= (trace_data
[i
] & 0x7000) >> 12;
606 etm_ctx
->trace_data
[j
+ 1].packet
= (trace_data
[i
] & 0x7f8000) >> 15;
607 etm_ctx
->trace_data
[j
+ 1].flags
= 0;
608 if ((trace_data
[i
] & 0x800000) >> 23)
609 etm_ctx
->trace_data
[j
+ 1].flags
|= ETMV1_TRACESYNC_CYCLE
;
610 if (etm_ctx
->trace_data
[j
+ 1].pipestat
== STAT_TR
) {
611 etm_ctx
->trace_data
[j
+
612 1].pipestat
= etm_ctx
->trace_data
[j
+ 1].packet
& 0x7;
613 etm_ctx
->trace_data
[j
+ 1].flags
|= ETMV1_TRIGGER_CYCLE
;
619 etm_ctx
->trace_data
[j
].pipestat
= trace_data
[i
] & 0x7;
620 etm_ctx
->trace_data
[j
].packet
= (trace_data
[i
] & 0x7fff8) >> 3;
621 etm_ctx
->trace_data
[j
].flags
= 0;
622 if ((trace_data
[i
] & 0x80000) >> 19)
623 etm_ctx
->trace_data
[j
].flags
|= ETMV1_TRACESYNC_CYCLE
;
624 if (etm_ctx
->trace_data
[j
].pipestat
== STAT_TR
) {
625 etm_ctx
->trace_data
[j
].pipestat
= etm_ctx
->trace_data
[j
].packet
&
627 etm_ctx
->trace_data
[j
].flags
|= ETMV1_TRIGGER_CYCLE
;
639 static int etb_start_capture(struct etm_context
*etm_ctx
)
641 struct etb
*etb
= etm_ctx
->capture_driver_priv
;
642 uint32_t etb_ctrl_value
= 0x1;
643 uint32_t trigger_count
;
645 if ((etm_ctx
->control
& ETM_PORT_MODE_MASK
) == ETM_PORT_DEMUXED
) {
646 if ((etm_ctx
->control
& ETM_PORT_WIDTH_MASK
) != ETM_PORT_8BIT
) {
647 LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
648 return ERROR_ETM_PORTMODE_NOT_SUPPORTED
;
650 etb_ctrl_value
|= 0x2;
653 if ((etm_ctx
->control
& ETM_PORT_MODE_MASK
) == ETM_PORT_MUXED
) {
654 LOG_ERROR("ETB: can't run in multiplexed mode");
655 return ERROR_ETM_PORTMODE_NOT_SUPPORTED
;
658 trigger_count
= (etb
->ram_depth
* etb
->trigger_percent
) / 100;
660 etb_write_reg(&etb
->reg_cache
->reg_list
[ETB_TRIGGER_COUNTER
], trigger_count
);
661 etb_write_reg(&etb
->reg_cache
->reg_list
[ETB_RAM_WRITE_POINTER
], 0x0);
662 etb_write_reg(&etb
->reg_cache
->reg_list
[ETB_CTRL
], etb_ctrl_value
);
663 jtag_execute_queue();
665 /* we're starting a new trace, initialize capture status */
666 etm_ctx
->capture_status
= TRACE_RUNNING
;
671 static int etb_stop_capture(struct etm_context
*etm_ctx
)
673 struct etb
*etb
= etm_ctx
->capture_driver_priv
;
674 struct reg
*etb_ctrl_reg
= &etb
->reg_cache
->reg_list
[ETB_CTRL
];
676 etb_write_reg(etb_ctrl_reg
, 0x0);
677 jtag_execute_queue();
679 /* trace stopped, just clear running flag, but preserve others */
680 etm_ctx
->capture_status
&= ~TRACE_RUNNING
;
685 struct etm_capture_driver etb_capture_driver
= {
687 .commands
= etb_command_handlers
,
689 .status
= etb_status
,
690 .start_capture
= etb_start_capture
,
691 .stop_capture
= etb_stop_capture
,
692 .read_trace
= etb_read_trace
,