arm_adi_v5: fix and update sequences to spec IHI 0031E
[openocd.git] / src / target / etb.c
blobdc25844b9ff5733b427b02e7c257ceff25e82785
1 /***************************************************************************
2 * Copyright (C) 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
17 ***************************************************************************/
19 #ifdef HAVE_CONFIG_H
20 #include "config.h"
21 #endif
23 #include "arm.h"
24 #include "etm.h"
25 #include "etb.h"
26 #include "register.h"
28 static const char * const etb_reg_list[] = {
29 "ETB_identification",
30 "ETB_ram_depth",
31 "ETB_ram_width",
32 "ETB_status",
33 "ETB_ram_data",
34 "ETB_ram_read_pointer",
35 "ETB_ram_write_pointer",
36 "ETB_trigger_counter",
37 "ETB_control",
40 static int etb_get_reg(struct reg *reg);
42 static int etb_set_instr(struct etb *etb, uint32_t new_instr)
44 struct jtag_tap *tap;
46 tap = etb->tap;
47 if (tap == NULL)
48 return ERROR_FAIL;
50 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
51 struct scan_field field;
53 field.num_bits = tap->ir_length;
54 void *t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
55 field.out_value = t;
56 buf_set_u32(t, 0, field.num_bits, new_instr);
58 field.in_value = NULL;
60 jtag_add_ir_scan(tap, &field, TAP_IDLE);
62 free(t);
65 return ERROR_OK;
68 static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
70 if (etb->cur_scan_chain != new_scan_chain) {
71 struct scan_field field;
73 field.num_bits = 5;
74 void *t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
75 field.out_value = t;
76 buf_set_u32(t, 0, field.num_bits, new_scan_chain);
78 field.in_value = NULL;
80 /* select INTEST instruction */
81 etb_set_instr(etb, 0x2);
82 jtag_add_dr_scan(etb->tap, 1, &field, TAP_IDLE);
84 etb->cur_scan_chain = new_scan_chain;
86 free(t);
89 return ERROR_OK;
92 static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
93 static int etb_set_reg_w_exec(struct reg *, uint8_t *);
95 static int etb_read_reg(struct reg *reg)
97 return etb_read_reg_w_check(reg, NULL, NULL);
100 static int etb_get_reg(struct reg *reg)
102 int retval;
104 retval = etb_read_reg(reg);
105 if (retval != ERROR_OK) {
106 LOG_ERROR("BUG: error scheduling ETB register read");
107 return retval;
110 retval = jtag_execute_queue();
111 if (retval != ERROR_OK) {
112 LOG_ERROR("ETB register read failed");
113 return retval;
116 return ERROR_OK;
119 static const struct reg_arch_type etb_reg_type = {
120 .get = etb_get_reg,
121 .set = etb_set_reg_w_exec,
124 struct reg_cache *etb_build_reg_cache(struct etb *etb)
126 struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
127 struct reg *reg_list = NULL;
128 struct etb_reg *arch_info = NULL;
129 int num_regs = 9;
130 int i;
132 /* the actual registers are kept in two arrays */
133 reg_list = calloc(num_regs, sizeof(struct reg));
134 arch_info = calloc(num_regs, sizeof(struct etb_reg));
136 /* fill in values for the reg cache */
137 reg_cache->name = "etb registers";
138 reg_cache->next = NULL;
139 reg_cache->reg_list = reg_list;
140 reg_cache->num_regs = num_regs;
142 /* set up registers */
143 for (i = 0; i < num_regs; i++) {
144 reg_list[i].name = etb_reg_list[i];
145 reg_list[i].size = 32;
146 reg_list[i].dirty = 0;
147 reg_list[i].valid = 0;
148 reg_list[i].value = calloc(1, 4);
149 reg_list[i].arch_info = &arch_info[i];
150 reg_list[i].type = &etb_reg_type;
151 reg_list[i].size = 32;
152 arch_info[i].addr = i;
153 arch_info[i].etb = etb;
156 return reg_cache;
159 static void etb_getbuf(jtag_callback_data_t arg)
161 uint8_t *in = (uint8_t *)arg;
163 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
166 static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
168 struct scan_field fields[3];
169 int i;
171 etb_scann(etb, 0x0);
172 etb_set_instr(etb, 0xc);
174 fields[0].num_bits = 32;
175 fields[0].out_value = NULL;
176 fields[0].in_value = NULL;
178 fields[1].num_bits = 7;
179 uint8_t temp1;
180 fields[1].out_value = &temp1;
181 buf_set_u32(&temp1, 0, 7, 4);
182 fields[1].in_value = NULL;
184 fields[2].num_bits = 1;
185 uint8_t temp2;
186 fields[2].out_value = &temp2;
187 buf_set_u32(&temp2, 0, 1, 0);
188 fields[2].in_value = NULL;
190 jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
192 for (i = 0; i < num_frames; i++) {
193 /* ensure nR/W remains set to read */
194 buf_set_u32(&temp2, 0, 1, 0);
196 /* address remains set to 0x4 (RAM data) until we read the last frame */
197 if (i < num_frames - 1)
198 buf_set_u32(&temp1, 0, 7, 4);
199 else
200 buf_set_u32(&temp1, 0, 7, 0);
202 fields[0].in_value = (uint8_t *)(data + i);
203 jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
205 jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
208 jtag_execute_queue();
210 return ERROR_OK;
213 static int etb_read_reg_w_check(struct reg *reg,
214 uint8_t *check_value, uint8_t *check_mask)
216 struct etb_reg *etb_reg = reg->arch_info;
217 uint8_t reg_addr = etb_reg->addr & 0x7f;
218 struct scan_field fields[3];
220 LOG_DEBUG("%i", (int)(etb_reg->addr));
222 etb_scann(etb_reg->etb, 0x0);
223 etb_set_instr(etb_reg->etb, 0xc);
225 fields[0].num_bits = 32;
226 fields[0].out_value = reg->value;
227 fields[0].in_value = NULL;
228 fields[0].check_value = NULL;
229 fields[0].check_mask = NULL;
231 fields[1].num_bits = 7;
232 uint8_t temp1;
233 fields[1].out_value = &temp1;
234 buf_set_u32(&temp1, 0, 7, reg_addr);
235 fields[1].in_value = NULL;
236 fields[1].check_value = NULL;
237 fields[1].check_mask = NULL;
239 fields[2].num_bits = 1;
240 uint8_t temp2;
241 fields[2].out_value = &temp2;
242 buf_set_u32(&temp2, 0, 1, 0);
243 fields[2].in_value = NULL;
244 fields[2].check_value = NULL;
245 fields[2].check_mask = NULL;
247 jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
249 /* read the identification register in the second run, to make sure we
250 * don't read the ETB data register twice, skipping every second entry
252 buf_set_u32(&temp1, 0, 7, 0x0);
253 fields[0].in_value = reg->value;
254 fields[0].check_value = check_value;
255 fields[0].check_mask = check_mask;
257 jtag_add_dr_scan_check(etb_reg->etb->tap, 3, fields, TAP_IDLE);
259 return ERROR_OK;
262 static int etb_write_reg(struct reg *, uint32_t);
264 static int etb_set_reg(struct reg *reg, uint32_t value)
266 int retval;
268 retval = etb_write_reg(reg, value);
269 if (retval != ERROR_OK) {
270 LOG_ERROR("BUG: error scheduling ETB register write");
271 return retval;
274 buf_set_u32(reg->value, 0, reg->size, value);
275 reg->valid = 1;
276 reg->dirty = 0;
278 return ERROR_OK;
281 static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
283 int retval;
285 etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
287 retval = jtag_execute_queue();
288 if (retval != ERROR_OK) {
289 LOG_ERROR("ETB: register write failed");
290 return retval;
292 return ERROR_OK;
295 static int etb_write_reg(struct reg *reg, uint32_t value)
297 struct etb_reg *etb_reg = reg->arch_info;
298 uint8_t reg_addr = etb_reg->addr & 0x7f;
299 struct scan_field fields[3];
301 LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
303 etb_scann(etb_reg->etb, 0x0);
304 etb_set_instr(etb_reg->etb, 0xc);
306 fields[0].num_bits = 32;
307 uint8_t temp0[4];
308 fields[0].out_value = temp0;
309 buf_set_u32(temp0, 0, 32, value);
310 fields[0].in_value = NULL;
312 fields[1].num_bits = 7;
313 uint8_t temp1;
314 fields[1].out_value = &temp1;
315 buf_set_u32(&temp1, 0, 7, reg_addr);
316 fields[1].in_value = NULL;
318 fields[2].num_bits = 1;
319 uint8_t temp2;
320 fields[2].out_value = &temp2;
321 buf_set_u32(&temp2, 0, 1, 1);
322 fields[2].in_value = NULL;
324 jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
326 return ERROR_OK;
329 COMMAND_HANDLER(handle_etb_config_command)
331 struct target *target;
332 struct jtag_tap *tap;
333 struct arm *arm;
335 if (CMD_ARGC != 2)
336 return ERROR_COMMAND_SYNTAX_ERROR;
338 target = get_target(CMD_ARGV[0]);
340 if (!target) {
341 LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV[0]);
342 return ERROR_FAIL;
345 arm = target_to_arm(target);
346 if (!is_arm(arm)) {
347 command_print(CMD_CTX, "ETB: '%s' isn't an ARM", CMD_ARGV[0]);
348 return ERROR_FAIL;
351 tap = jtag_tap_by_string(CMD_ARGV[1]);
352 if (tap == NULL) {
353 command_print(CMD_CTX, "ETB: TAP %s does not exist", CMD_ARGV[1]);
354 return ERROR_FAIL;
357 if (arm->etm) {
358 struct etb *etb = malloc(sizeof(struct etb));
360 arm->etm->capture_driver_priv = etb;
362 etb->tap = tap;
363 etb->cur_scan_chain = 0xffffffff;
364 etb->reg_cache = NULL;
365 etb->ram_width = 0;
366 etb->ram_depth = 0;
367 } else {
368 LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
369 return ERROR_FAIL;
372 return ERROR_OK;
375 COMMAND_HANDLER(handle_etb_trigger_percent_command)
377 struct target *target;
378 struct arm *arm;
379 struct etm_context *etm;
380 struct etb *etb;
382 target = get_current_target(CMD_CTX);
383 arm = target_to_arm(target);
384 if (!is_arm(arm)) {
385 command_print(CMD_CTX, "ETB: current target isn't an ARM");
386 return ERROR_FAIL;
389 etm = arm->etm;
390 if (!etm) {
391 command_print(CMD_CTX, "ETB: target has no ETM configured");
392 return ERROR_FAIL;
394 if (etm->capture_driver != &etb_capture_driver) {
395 command_print(CMD_CTX, "ETB: target not using ETB");
396 return ERROR_FAIL;
398 etb = arm->etm->capture_driver_priv;
400 if (CMD_ARGC > 0) {
401 uint32_t new_value;
403 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], new_value);
404 if ((new_value < 2) || (new_value > 100))
405 command_print(CMD_CTX,
406 "valid percentages are 2%% to 100%%");
407 else
408 etb->trigger_percent = (unsigned) new_value;
411 command_print(CMD_CTX, "%d percent of tracebuffer fills after trigger",
412 etb->trigger_percent);
414 return ERROR_OK;
417 static const struct command_registration etb_config_command_handlers[] = {
419 /* NOTE: with ADIv5, ETBs are accessed using DAP operations,
420 * possibly over SWD, not through separate TAPs...
422 .name = "config",
423 .handler = handle_etb_config_command,
424 .mode = COMMAND_CONFIG,
425 .help = "Associate ETB with target and JTAG TAP.",
426 .usage = "target tap",
429 .name = "trigger_percent",
430 .handler = handle_etb_trigger_percent_command,
431 .mode = COMMAND_EXEC,
432 .help = "Set percent of trace buffer to be filled "
433 "after the trigger occurs (2..100).",
434 .usage = "[percent]",
436 COMMAND_REGISTRATION_DONE
438 static const struct command_registration etb_command_handlers[] = {
440 .name = "etb",
441 .mode = COMMAND_ANY,
442 .help = "Embedded Trace Buffer command group",
443 .chain = etb_config_command_handlers,
445 COMMAND_REGISTRATION_DONE
448 static int etb_init(struct etm_context *etm_ctx)
450 struct etb *etb = etm_ctx->capture_driver_priv;
452 etb->etm_ctx = etm_ctx;
454 /* identify ETB RAM depth and width */
455 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
456 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
457 jtag_execute_queue();
459 etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
460 etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
462 etb->trigger_percent = 50;
464 return ERROR_OK;
467 static trace_status_t etb_status(struct etm_context *etm_ctx)
469 struct etb *etb = etm_ctx->capture_driver_priv;
470 struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
471 struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
472 trace_status_t retval = 0;
473 int etb_timeout = 100;
475 etb->etm_ctx = etm_ctx;
477 /* read control and status registers */
478 etb_read_reg(control);
479 etb_read_reg(status);
480 jtag_execute_queue();
482 /* See if it's (still) active */
483 retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
485 /* check Full bit to identify wraparound/overflow */
486 if (buf_get_u32(status->value, 0, 1) == 1)
487 retval |= TRACE_OVERFLOWED;
489 /* check Triggered bit to identify trigger condition */
490 if (buf_get_u32(status->value, 1, 1) == 1)
491 retval |= TRACE_TRIGGERED;
493 /* check AcqComp to see if trigger counter dropped to zero */
494 if (buf_get_u32(status->value, 2, 1) == 1) {
495 /* wait for DFEmpty */
496 while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
497 etb_get_reg(status);
499 if (etb_timeout == 0)
500 LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
501 (unsigned) buf_get_u32(status->value, 0, 4));
503 if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
504 LOG_WARNING("ETB: trace complete without triggering?");
506 retval |= TRACE_COMPLETED;
509 /* NOTE: using a trigger is optional; and at least ETB11 has a mode
510 * where it can ignore the trigger counter.
513 /* update recorded state */
514 etm_ctx->capture_status = retval;
516 return retval;
519 static int etb_read_trace(struct etm_context *etm_ctx)
521 struct etb *etb = etm_ctx->capture_driver_priv;
522 int first_frame = 0;
523 int num_frames = etb->ram_depth;
524 uint32_t *trace_data = NULL;
525 int i, j;
527 etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
528 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
529 jtag_execute_queue();
531 /* check if we overflowed, and adjust first frame of the trace accordingly
532 * if we didn't overflow, read only up to the frame that would be written next,
533 * i.e. don't read invalid entries
535 if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
536 first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value,
538 32);
539 else
540 num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value,
542 32);
544 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
546 /* read data into temporary array for unpacking */
547 trace_data = malloc(sizeof(uint32_t) * num_frames);
548 etb_read_ram(etb, trace_data, num_frames);
550 if (etm_ctx->trace_depth > 0)
551 free(etm_ctx->trace_data);
553 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
554 etm_ctx->trace_depth = num_frames * 3;
555 else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
556 etm_ctx->trace_depth = num_frames * 2;
557 else
558 etm_ctx->trace_depth = num_frames;
560 etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
562 for (i = 0, j = 0; i < num_frames; i++) {
563 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT) {
564 /* trace word j */
565 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
566 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
567 etm_ctx->trace_data[j].flags = 0;
568 if ((trace_data[i] & 0x80) >> 7)
569 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
570 if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
571 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
572 0x7;
573 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
576 /* trace word j + 1 */
577 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
578 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
579 etm_ctx->trace_data[j + 1].flags = 0;
580 if ((trace_data[i] & 0x8000) >> 15)
581 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
582 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR) {
583 etm_ctx->trace_data[j +
584 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
585 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
588 /* trace word j + 2 */
589 etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
590 etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
591 etm_ctx->trace_data[j + 2].flags = 0;
592 if ((trace_data[i] & 0x800000) >> 23)
593 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
594 if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR) {
595 etm_ctx->trace_data[j +
596 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
597 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
600 j += 3;
601 } else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT) {
602 /* trace word j */
603 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
604 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
605 etm_ctx->trace_data[j].flags = 0;
606 if ((trace_data[i] & 0x800) >> 11)
607 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
608 if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
609 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
610 0x7;
611 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
614 /* trace word j + 1 */
615 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
616 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
617 etm_ctx->trace_data[j + 1].flags = 0;
618 if ((trace_data[i] & 0x800000) >> 23)
619 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
620 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR) {
621 etm_ctx->trace_data[j +
622 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
623 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
626 j += 2;
627 } else {
628 /* trace word j */
629 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
630 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
631 etm_ctx->trace_data[j].flags = 0;
632 if ((trace_data[i] & 0x80000) >> 19)
633 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
634 if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
635 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
636 0x7;
637 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
640 j += 1;
644 free(trace_data);
646 return ERROR_OK;
649 static int etb_start_capture(struct etm_context *etm_ctx)
651 struct etb *etb = etm_ctx->capture_driver_priv;
652 uint32_t etb_ctrl_value = 0x1;
653 uint32_t trigger_count;
655 if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED) {
656 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT) {
657 LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
658 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
660 etb_ctrl_value |= 0x2;
663 if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
664 LOG_ERROR("ETB: can't run in multiplexed mode");
665 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
668 trigger_count = (etb->ram_depth * etb->trigger_percent) / 100;
670 etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
671 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
672 etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
673 jtag_execute_queue();
675 /* we're starting a new trace, initialize capture status */
676 etm_ctx->capture_status = TRACE_RUNNING;
678 return ERROR_OK;
681 static int etb_stop_capture(struct etm_context *etm_ctx)
683 struct etb *etb = etm_ctx->capture_driver_priv;
684 struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
686 etb_write_reg(etb_ctrl_reg, 0x0);
687 jtag_execute_queue();
689 /* trace stopped, just clear running flag, but preserve others */
690 etm_ctx->capture_status &= ~TRACE_RUNNING;
692 return ERROR_OK;
695 struct etm_capture_driver etb_capture_driver = {
696 .name = "etb",
697 .commands = etb_command_handlers,
698 .init = etb_init,
699 .status = etb_status,
700 .start_capture = etb_start_capture,
701 .stop_capture = etb_stop_capture,
702 .read_trace = etb_read_trace,