Use (uint8_t *) for buf_(set|get)_u(32|64) instead of (void *)
[openocd.git] / src / target / etb.c
blob56f5795bde003a9d2e4f62de0b07039c6d0c327e
1 /***************************************************************************
2 * Copyright (C) 2007 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * This program is free software; you can redistribute it and/or modify *
6 * it under the terms of the GNU General Public License as published by *
7 * the Free Software Foundation; either version 2 of the License, or *
8 * (at your option) any later version. *
9 * *
10 * This program is distributed in the hope that it will be useful, *
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
13 * GNU General Public License for more details. *
14 * *
15 * You should have received a copy of the GNU General Public License *
16 * along with this program; if not, write to the *
17 * Free Software Foundation, Inc., *
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
19 ***************************************************************************/
21 #ifdef HAVE_CONFIG_H
22 #include "config.h"
23 #endif
25 #include "arm.h"
26 #include "etm.h"
27 #include "etb.h"
28 #include "register.h"
30 static const char * const etb_reg_list[] = {
31 "ETB_identification",
32 "ETB_ram_depth",
33 "ETB_ram_width",
34 "ETB_status",
35 "ETB_ram_data",
36 "ETB_ram_read_pointer",
37 "ETB_ram_write_pointer",
38 "ETB_trigger_counter",
39 "ETB_control",
42 static int etb_get_reg(struct reg *reg);
44 static int etb_set_instr(struct etb *etb, uint32_t new_instr)
46 struct jtag_tap *tap;
48 tap = etb->tap;
49 if (tap == NULL)
50 return ERROR_FAIL;
52 if (buf_get_u32(tap->cur_instr, 0, tap->ir_length) != new_instr) {
53 struct scan_field field;
55 field.num_bits = tap->ir_length;
56 void *t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
57 field.out_value = t;
58 buf_set_u32(t, 0, field.num_bits, new_instr);
60 field.in_value = NULL;
62 jtag_add_ir_scan(tap, &field, TAP_IDLE);
64 free(t);
67 return ERROR_OK;
70 static int etb_scann(struct etb *etb, uint32_t new_scan_chain)
72 if (etb->cur_scan_chain != new_scan_chain) {
73 struct scan_field field;
75 field.num_bits = 5;
76 void *t = calloc(DIV_ROUND_UP(field.num_bits, 8), 1);
77 field.out_value = t;
78 buf_set_u32(t, 0, field.num_bits, new_scan_chain);
80 field.in_value = NULL;
82 /* select INTEST instruction */
83 etb_set_instr(etb, 0x2);
84 jtag_add_dr_scan(etb->tap, 1, &field, TAP_IDLE);
86 etb->cur_scan_chain = new_scan_chain;
88 free(t);
91 return ERROR_OK;
94 static int etb_read_reg_w_check(struct reg *, uint8_t *, uint8_t *);
95 static int etb_set_reg_w_exec(struct reg *, uint8_t *);
97 static int etb_read_reg(struct reg *reg)
99 return etb_read_reg_w_check(reg, NULL, NULL);
102 static int etb_get_reg(struct reg *reg)
104 int retval;
106 retval = etb_read_reg(reg);
107 if (retval != ERROR_OK) {
108 LOG_ERROR("BUG: error scheduling ETB register read");
109 return retval;
112 retval = jtag_execute_queue();
113 if (retval != ERROR_OK) {
114 LOG_ERROR("ETB register read failed");
115 return retval;
118 return ERROR_OK;
121 static const struct reg_arch_type etb_reg_type = {
122 .get = etb_get_reg,
123 .set = etb_set_reg_w_exec,
126 struct reg_cache *etb_build_reg_cache(struct etb *etb)
128 struct reg_cache *reg_cache = malloc(sizeof(struct reg_cache));
129 struct reg *reg_list = NULL;
130 struct etb_reg *arch_info = NULL;
131 int num_regs = 9;
132 int i;
134 /* the actual registers are kept in two arrays */
135 reg_list = calloc(num_regs, sizeof(struct reg));
136 arch_info = calloc(num_regs, sizeof(struct etb_reg));
138 /* fill in values for the reg cache */
139 reg_cache->name = "etb registers";
140 reg_cache->next = NULL;
141 reg_cache->reg_list = reg_list;
142 reg_cache->num_regs = num_regs;
144 /* set up registers */
145 for (i = 0; i < num_regs; i++) {
146 reg_list[i].name = etb_reg_list[i];
147 reg_list[i].size = 32;
148 reg_list[i].dirty = 0;
149 reg_list[i].valid = 0;
150 reg_list[i].value = calloc(1, 4);
151 reg_list[i].arch_info = &arch_info[i];
152 reg_list[i].type = &etb_reg_type;
153 reg_list[i].size = 32;
154 arch_info[i].addr = i;
155 arch_info[i].etb = etb;
158 return reg_cache;
161 static void etb_getbuf(jtag_callback_data_t arg)
163 uint8_t *in = (uint8_t *)arg;
165 *((uint32_t *)arg) = buf_get_u32(in, 0, 32);
168 static int etb_read_ram(struct etb *etb, uint32_t *data, int num_frames)
170 struct scan_field fields[3];
171 int i;
173 etb_scann(etb, 0x0);
174 etb_set_instr(etb, 0xc);
176 fields[0].num_bits = 32;
177 fields[0].out_value = NULL;
178 fields[0].in_value = NULL;
180 fields[1].num_bits = 7;
181 uint8_t temp1;
182 fields[1].out_value = &temp1;
183 buf_set_u32(&temp1, 0, 7, 4);
184 fields[1].in_value = NULL;
186 fields[2].num_bits = 1;
187 uint8_t temp2;
188 fields[2].out_value = &temp2;
189 buf_set_u32(&temp2, 0, 1, 0);
190 fields[2].in_value = NULL;
192 jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
194 for (i = 0; i < num_frames; i++) {
195 /* ensure nR/W remains set to read */
196 buf_set_u32(&temp2, 0, 1, 0);
198 /* address remains set to 0x4 (RAM data) until we read the last frame */
199 if (i < num_frames - 1)
200 buf_set_u32(&temp1, 0, 7, 4);
201 else
202 buf_set_u32(&temp1, 0, 7, 0);
204 fields[0].in_value = (uint8_t *)(data + i);
205 jtag_add_dr_scan(etb->tap, 3, fields, TAP_IDLE);
207 jtag_add_callback(etb_getbuf, (jtag_callback_data_t)(data + i));
210 jtag_execute_queue();
212 return ERROR_OK;
215 static int etb_read_reg_w_check(struct reg *reg,
216 uint8_t *check_value, uint8_t *check_mask)
218 struct etb_reg *etb_reg = reg->arch_info;
219 uint8_t reg_addr = etb_reg->addr & 0x7f;
220 struct scan_field fields[3];
222 LOG_DEBUG("%i", (int)(etb_reg->addr));
224 etb_scann(etb_reg->etb, 0x0);
225 etb_set_instr(etb_reg->etb, 0xc);
227 fields[0].num_bits = 32;
228 fields[0].out_value = reg->value;
229 fields[0].in_value = NULL;
230 fields[0].check_value = NULL;
231 fields[0].check_mask = NULL;
233 fields[1].num_bits = 7;
234 uint8_t temp1;
235 fields[1].out_value = &temp1;
236 buf_set_u32(&temp1, 0, 7, reg_addr);
237 fields[1].in_value = NULL;
238 fields[1].check_value = NULL;
239 fields[1].check_mask = NULL;
241 fields[2].num_bits = 1;
242 uint8_t temp2;
243 fields[2].out_value = &temp2;
244 buf_set_u32(&temp2, 0, 1, 0);
245 fields[2].in_value = NULL;
246 fields[2].check_value = NULL;
247 fields[2].check_mask = NULL;
249 jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
251 /* read the identification register in the second run, to make sure we
252 * don't read the ETB data register twice, skipping every second entry
254 buf_set_u32(&temp1, 0, 7, 0x0);
255 fields[0].in_value = reg->value;
256 fields[0].check_value = check_value;
257 fields[0].check_mask = check_mask;
259 jtag_add_dr_scan_check(etb_reg->etb->tap, 3, fields, TAP_IDLE);
261 return ERROR_OK;
264 static int etb_write_reg(struct reg *, uint32_t);
266 static int etb_set_reg(struct reg *reg, uint32_t value)
268 int retval;
270 retval = etb_write_reg(reg, value);
271 if (retval != ERROR_OK) {
272 LOG_ERROR("BUG: error scheduling ETB register write");
273 return retval;
276 buf_set_u32(reg->value, 0, reg->size, value);
277 reg->valid = 1;
278 reg->dirty = 0;
280 return ERROR_OK;
283 static int etb_set_reg_w_exec(struct reg *reg, uint8_t *buf)
285 int retval;
287 etb_set_reg(reg, buf_get_u32(buf, 0, reg->size));
289 retval = jtag_execute_queue();
290 if (retval != ERROR_OK) {
291 LOG_ERROR("ETB: register write failed");
292 return retval;
294 return ERROR_OK;
297 static int etb_write_reg(struct reg *reg, uint32_t value)
299 struct etb_reg *etb_reg = reg->arch_info;
300 uint8_t reg_addr = etb_reg->addr & 0x7f;
301 struct scan_field fields[3];
303 LOG_DEBUG("%i: 0x%8.8" PRIx32 "", (int)(etb_reg->addr), value);
305 etb_scann(etb_reg->etb, 0x0);
306 etb_set_instr(etb_reg->etb, 0xc);
308 fields[0].num_bits = 32;
309 uint8_t temp0[4];
310 fields[0].out_value = temp0;
311 buf_set_u32(temp0, 0, 32, value);
312 fields[0].in_value = NULL;
314 fields[1].num_bits = 7;
315 uint8_t temp1;
316 fields[1].out_value = &temp1;
317 buf_set_u32(&temp1, 0, 7, reg_addr);
318 fields[1].in_value = NULL;
320 fields[2].num_bits = 1;
321 uint8_t temp2;
322 fields[2].out_value = &temp2;
323 buf_set_u32(&temp2, 0, 1, 1);
324 fields[2].in_value = NULL;
326 jtag_add_dr_scan(etb_reg->etb->tap, 3, fields, TAP_IDLE);
328 return ERROR_OK;
331 COMMAND_HANDLER(handle_etb_config_command)
333 struct target *target;
334 struct jtag_tap *tap;
335 struct arm *arm;
337 if (CMD_ARGC != 2)
338 return ERROR_COMMAND_SYNTAX_ERROR;
340 target = get_target(CMD_ARGV[0]);
342 if (!target) {
343 LOG_ERROR("ETB: target '%s' not defined", CMD_ARGV[0]);
344 return ERROR_FAIL;
347 arm = target_to_arm(target);
348 if (!is_arm(arm)) {
349 command_print(CMD_CTX, "ETB: '%s' isn't an ARM", CMD_ARGV[0]);
350 return ERROR_FAIL;
353 tap = jtag_tap_by_string(CMD_ARGV[1]);
354 if (tap == NULL) {
355 command_print(CMD_CTX, "ETB: TAP %s does not exist", CMD_ARGV[1]);
356 return ERROR_FAIL;
359 if (arm->etm) {
360 struct etb *etb = malloc(sizeof(struct etb));
362 arm->etm->capture_driver_priv = etb;
364 etb->tap = tap;
365 etb->cur_scan_chain = 0xffffffff;
366 etb->reg_cache = NULL;
367 etb->ram_width = 0;
368 etb->ram_depth = 0;
369 } else {
370 LOG_ERROR("ETM: target has no ETM defined, ETB left unconfigured");
371 return ERROR_FAIL;
374 return ERROR_OK;
377 COMMAND_HANDLER(handle_etb_trigger_percent_command)
379 struct target *target;
380 struct arm *arm;
381 struct etm_context *etm;
382 struct etb *etb;
384 target = get_current_target(CMD_CTX);
385 arm = target_to_arm(target);
386 if (!is_arm(arm)) {
387 command_print(CMD_CTX, "ETB: current target isn't an ARM");
388 return ERROR_FAIL;
391 etm = arm->etm;
392 if (!etm) {
393 command_print(CMD_CTX, "ETB: target has no ETM configured");
394 return ERROR_FAIL;
396 if (etm->capture_driver != &etb_capture_driver) {
397 command_print(CMD_CTX, "ETB: target not using ETB");
398 return ERROR_FAIL;
400 etb = arm->etm->capture_driver_priv;
402 if (CMD_ARGC > 0) {
403 uint32_t new_value;
405 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], new_value);
406 if ((new_value < 2) || (new_value > 100))
407 command_print(CMD_CTX,
408 "valid percentages are 2%% to 100%%");
409 else
410 etb->trigger_percent = (unsigned) new_value;
413 command_print(CMD_CTX, "%d percent of tracebuffer fills after trigger",
414 etb->trigger_percent);
416 return ERROR_OK;
419 static const struct command_registration etb_config_command_handlers[] = {
421 /* NOTE: with ADIv5, ETBs are accessed using DAP operations,
422 * possibly over SWD, not through separate TAPs...
424 .name = "config",
425 .handler = handle_etb_config_command,
426 .mode = COMMAND_CONFIG,
427 .help = "Associate ETB with target and JTAG TAP.",
428 .usage = "target tap",
431 .name = "trigger_percent",
432 .handler = handle_etb_trigger_percent_command,
433 .mode = COMMAND_EXEC,
434 .help = "Set percent of trace buffer to be filled "
435 "after the trigger occurs (2..100).",
436 .usage = "[percent]",
438 COMMAND_REGISTRATION_DONE
440 static const struct command_registration etb_command_handlers[] = {
442 .name = "etb",
443 .mode = COMMAND_ANY,
444 .help = "Embedded Trace Buffer command group",
445 .chain = etb_config_command_handlers,
447 COMMAND_REGISTRATION_DONE
450 static int etb_init(struct etm_context *etm_ctx)
452 struct etb *etb = etm_ctx->capture_driver_priv;
454 etb->etm_ctx = etm_ctx;
456 /* identify ETB RAM depth and width */
457 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_DEPTH]);
458 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WIDTH]);
459 jtag_execute_queue();
461 etb->ram_depth = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_DEPTH].value, 0, 32);
462 etb->ram_width = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WIDTH].value, 0, 32);
464 etb->trigger_percent = 50;
466 return ERROR_OK;
469 static trace_status_t etb_status(struct etm_context *etm_ctx)
471 struct etb *etb = etm_ctx->capture_driver_priv;
472 struct reg *control = &etb->reg_cache->reg_list[ETB_CTRL];
473 struct reg *status = &etb->reg_cache->reg_list[ETB_STATUS];
474 trace_status_t retval = 0;
475 int etb_timeout = 100;
477 etb->etm_ctx = etm_ctx;
479 /* read control and status registers */
480 etb_read_reg(control);
481 etb_read_reg(status);
482 jtag_execute_queue();
484 /* See if it's (still) active */
485 retval = buf_get_u32(control->value, 0, 1) ? TRACE_RUNNING : TRACE_IDLE;
487 /* check Full bit to identify wraparound/overflow */
488 if (buf_get_u32(status->value, 0, 1) == 1)
489 retval |= TRACE_OVERFLOWED;
491 /* check Triggered bit to identify trigger condition */
492 if (buf_get_u32(status->value, 1, 1) == 1)
493 retval |= TRACE_TRIGGERED;
495 /* check AcqComp to see if trigger counter dropped to zero */
496 if (buf_get_u32(status->value, 2, 1) == 1) {
497 /* wait for DFEmpty */
498 while (etb_timeout-- && buf_get_u32(status->value, 3, 1) == 0)
499 etb_get_reg(status);
501 if (etb_timeout == 0)
502 LOG_ERROR("ETB: DFEmpty won't go high, status 0x%02x",
503 (unsigned) buf_get_u32(status->value, 0, 4));
505 if (!(etm_ctx->capture_status & TRACE_TRIGGERED))
506 LOG_WARNING("ETB: trace complete without triggering?");
508 retval |= TRACE_COMPLETED;
511 /* NOTE: using a trigger is optional; and at least ETB11 has a mode
512 * where it can ignore the trigger counter.
515 /* update recorded state */
516 etm_ctx->capture_status = retval;
518 return retval;
521 static int etb_read_trace(struct etm_context *etm_ctx)
523 struct etb *etb = etm_ctx->capture_driver_priv;
524 int first_frame = 0;
525 int num_frames = etb->ram_depth;
526 uint32_t *trace_data = NULL;
527 int i, j;
529 etb_read_reg(&etb->reg_cache->reg_list[ETB_STATUS]);
530 etb_read_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER]);
531 jtag_execute_queue();
533 /* check if we overflowed, and adjust first frame of the trace accordingly
534 * if we didn't overflow, read only up to the frame that would be written next,
535 * i.e. don't read invalid entries
537 if (buf_get_u32(etb->reg_cache->reg_list[ETB_STATUS].value, 0, 1))
538 first_frame = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value,
540 32);
541 else
542 num_frames = buf_get_u32(etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER].value,
544 32);
546 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_READ_POINTER], first_frame);
548 /* read data into temporary array for unpacking */
549 trace_data = malloc(sizeof(uint32_t) * num_frames);
550 etb_read_ram(etb, trace_data, num_frames);
552 if (etm_ctx->trace_depth > 0)
553 free(etm_ctx->trace_data);
555 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT)
556 etm_ctx->trace_depth = num_frames * 3;
557 else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT)
558 etm_ctx->trace_depth = num_frames * 2;
559 else
560 etm_ctx->trace_depth = num_frames;
562 etm_ctx->trace_data = malloc(sizeof(struct etmv1_trace_data) * etm_ctx->trace_depth);
564 for (i = 0, j = 0; i < num_frames; i++) {
565 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_4BIT) {
566 /* trace word j */
567 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
568 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x78) >> 3;
569 etm_ctx->trace_data[j].flags = 0;
570 if ((trace_data[i] & 0x80) >> 7)
571 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
572 if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
573 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
574 0x7;
575 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
578 /* trace word j + 1 */
579 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x100) >> 8;
580 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7800) >> 11;
581 etm_ctx->trace_data[j + 1].flags = 0;
582 if ((trace_data[i] & 0x8000) >> 15)
583 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
584 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR) {
585 etm_ctx->trace_data[j +
586 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
587 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
590 /* trace word j + 2 */
591 etm_ctx->trace_data[j + 2].pipestat = (trace_data[i] & 0x10000) >> 16;
592 etm_ctx->trace_data[j + 2].packet = (trace_data[i] & 0x780000) >> 19;
593 etm_ctx->trace_data[j + 2].flags = 0;
594 if ((trace_data[i] & 0x800000) >> 23)
595 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRACESYNC_CYCLE;
596 if (etm_ctx->trace_data[j + 2].pipestat == STAT_TR) {
597 etm_ctx->trace_data[j +
598 2].pipestat = etm_ctx->trace_data[j + 2].packet & 0x7;
599 etm_ctx->trace_data[j + 2].flags |= ETMV1_TRIGGER_CYCLE;
602 j += 3;
603 } else if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) == ETM_PORT_8BIT) {
604 /* trace word j */
605 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
606 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7f8) >> 3;
607 etm_ctx->trace_data[j].flags = 0;
608 if ((trace_data[i] & 0x800) >> 11)
609 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
610 if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
611 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
612 0x7;
613 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
616 /* trace word j + 1 */
617 etm_ctx->trace_data[j + 1].pipestat = (trace_data[i] & 0x7000) >> 12;
618 etm_ctx->trace_data[j + 1].packet = (trace_data[i] & 0x7f8000) >> 15;
619 etm_ctx->trace_data[j + 1].flags = 0;
620 if ((trace_data[i] & 0x800000) >> 23)
621 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRACESYNC_CYCLE;
622 if (etm_ctx->trace_data[j + 1].pipestat == STAT_TR) {
623 etm_ctx->trace_data[j +
624 1].pipestat = etm_ctx->trace_data[j + 1].packet & 0x7;
625 etm_ctx->trace_data[j + 1].flags |= ETMV1_TRIGGER_CYCLE;
628 j += 2;
629 } else {
630 /* trace word j */
631 etm_ctx->trace_data[j].pipestat = trace_data[i] & 0x7;
632 etm_ctx->trace_data[j].packet = (trace_data[i] & 0x7fff8) >> 3;
633 etm_ctx->trace_data[j].flags = 0;
634 if ((trace_data[i] & 0x80000) >> 19)
635 etm_ctx->trace_data[j].flags |= ETMV1_TRACESYNC_CYCLE;
636 if (etm_ctx->trace_data[j].pipestat == STAT_TR) {
637 etm_ctx->trace_data[j].pipestat = etm_ctx->trace_data[j].packet &
638 0x7;
639 etm_ctx->trace_data[j].flags |= ETMV1_TRIGGER_CYCLE;
642 j += 1;
646 free(trace_data);
648 return ERROR_OK;
651 static int etb_start_capture(struct etm_context *etm_ctx)
653 struct etb *etb = etm_ctx->capture_driver_priv;
654 uint32_t etb_ctrl_value = 0x1;
655 uint32_t trigger_count;
657 if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_DEMUXED) {
658 if ((etm_ctx->control & ETM_PORT_WIDTH_MASK) != ETM_PORT_8BIT) {
659 LOG_ERROR("ETB can't run in demultiplexed mode with a 4 or 16 bit port");
660 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
662 etb_ctrl_value |= 0x2;
665 if ((etm_ctx->control & ETM_PORT_MODE_MASK) == ETM_PORT_MUXED) {
666 LOG_ERROR("ETB: can't run in multiplexed mode");
667 return ERROR_ETM_PORTMODE_NOT_SUPPORTED;
670 trigger_count = (etb->ram_depth * etb->trigger_percent) / 100;
672 etb_write_reg(&etb->reg_cache->reg_list[ETB_TRIGGER_COUNTER], trigger_count);
673 etb_write_reg(&etb->reg_cache->reg_list[ETB_RAM_WRITE_POINTER], 0x0);
674 etb_write_reg(&etb->reg_cache->reg_list[ETB_CTRL], etb_ctrl_value);
675 jtag_execute_queue();
677 /* we're starting a new trace, initialize capture status */
678 etm_ctx->capture_status = TRACE_RUNNING;
680 return ERROR_OK;
683 static int etb_stop_capture(struct etm_context *etm_ctx)
685 struct etb *etb = etm_ctx->capture_driver_priv;
686 struct reg *etb_ctrl_reg = &etb->reg_cache->reg_list[ETB_CTRL];
688 etb_write_reg(etb_ctrl_reg, 0x0);
689 jtag_execute_queue();
691 /* trace stopped, just clear running flag, but preserve others */
692 etm_ctx->capture_status &= ~TRACE_RUNNING;
694 return ERROR_OK;
697 struct etm_capture_driver etb_capture_driver = {
698 .name = "etb",
699 .commands = etb_command_handlers,
700 .init = etb_init,
701 .status = etb_status,
702 .start_capture = etb_start_capture,
703 .stop_capture = etb_stop_capture,
704 .read_trace = etb_read_trace,